aboutsummaryrefslogtreecommitdiffstats
path: root/vendor/github.com/apache/arrow/go
diff options
context:
space:
mode:
Diffstat (limited to 'vendor/github.com/apache/arrow/go')
-rw-r--r--vendor/github.com/apache/arrow/go/v14/LICENSE.txt1791
-rw-r--r--vendor/github.com/apache/arrow/go/v14/arrow/.editorconfig21
-rw-r--r--vendor/github.com/apache/arrow/go/v14/arrow/.gitignore35
-rw-r--r--vendor/github.com/apache/arrow/go/v14/arrow/Gopkg.lock44
-rw-r--r--vendor/github.com/apache/arrow/go/v14/arrow/Gopkg.toml23
-rw-r--r--vendor/github.com/apache/arrow/go/v14/arrow/Makefile54
-rw-r--r--vendor/github.com/apache/arrow/go/v14/arrow/array.go127
-rw-r--r--vendor/github.com/apache/arrow/go/v14/arrow/array/array.go185
-rw-r--r--vendor/github.com/apache/arrow/go/v14/arrow/array/binary.go323
-rw-r--r--vendor/github.com/apache/arrow/go/v14/arrow/array/binarybuilder.go375
-rw-r--r--vendor/github.com/apache/arrow/go/v14/arrow/array/boolean.go126
-rw-r--r--vendor/github.com/apache/arrow/go/v14/arrow/array/booleanbuilder.go263
-rw-r--r--vendor/github.com/apache/arrow/go/v14/arrow/array/bufferbuilder.go153
-rw-r--r--vendor/github.com/apache/arrow/go/v14/arrow/array/bufferbuilder_byte.go30
-rw-r--r--vendor/github.com/apache/arrow/go/v14/arrow/array/bufferbuilder_numeric.gen.go124
-rw-r--r--vendor/github.com/apache/arrow/go/v14/arrow/array/bufferbuilder_numeric.gen.go.tmpl61
-rw-r--r--vendor/github.com/apache/arrow/go/v14/arrow/array/builder.go369
-rw-r--r--vendor/github.com/apache/arrow/go/v14/arrow/array/compare.go842
-rw-r--r--vendor/github.com/apache/arrow/go/v14/arrow/array/concat.go910
-rw-r--r--vendor/github.com/apache/arrow/go/v14/arrow/array/data.go250
-rw-r--r--vendor/github.com/apache/arrow/go/v14/arrow/array/decimal128.go365
-rw-r--r--vendor/github.com/apache/arrow/go/v14/arrow/array/decimal256.go364
-rw-r--r--vendor/github.com/apache/arrow/go/v14/arrow/array/dictionary.go1953
-rw-r--r--vendor/github.com/apache/arrow/go/v14/arrow/array/diff.go315
-rw-r--r--vendor/github.com/apache/arrow/go/v14/arrow/array/doc.go20
-rw-r--r--vendor/github.com/apache/arrow/go/v14/arrow/array/encoded.go520
-rw-r--r--vendor/github.com/apache/arrow/go/v14/arrow/array/extension.go244
-rw-r--r--vendor/github.com/apache/arrow/go/v14/arrow/array/extension_builder.go23
-rw-r--r--vendor/github.com/apache/arrow/go/v14/arrow/array/fixed_size_list.go372
-rw-r--r--vendor/github.com/apache/arrow/go/v14/arrow/array/fixedsize_binary.go123
-rw-r--r--vendor/github.com/apache/arrow/go/v14/arrow/array/fixedsize_binarybuilder.go261
-rw-r--r--vendor/github.com/apache/arrow/go/v14/arrow/array/float16.go113
-rw-r--r--vendor/github.com/apache/arrow/go/v14/arrow/array/float16_builder.go263
-rw-r--r--vendor/github.com/apache/arrow/go/v14/arrow/array/interval.go953
-rw-r--r--vendor/github.com/apache/arrow/go/v14/arrow/array/json_reader.go205
-rw-r--r--vendor/github.com/apache/arrow/go/v14/arrow/array/list.go1688
-rw-r--r--vendor/github.com/apache/arrow/go/v14/arrow/array/map.go361
-rw-r--r--vendor/github.com/apache/arrow/go/v14/arrow/array/null.go218
-rw-r--r--vendor/github.com/apache/arrow/go/v14/arrow/array/numeric.gen.go1430
-rw-r--r--vendor/github.com/apache/arrow/go/v14/arrow/array/numeric.gen.go.tmpl158
-rw-r--r--vendor/github.com/apache/arrow/go/v14/arrow/array/numericbuilder.gen.go3664
-rw-r--r--vendor/github.com/apache/arrow/go/v14/arrow/array/numericbuilder.gen.go.tmpl447
-rw-r--r--vendor/github.com/apache/arrow/go/v14/arrow/array/numericbuilder.gen_test.go.tmpl276
-rw-r--r--vendor/github.com/apache/arrow/go/v14/arrow/array/record.go411
-rw-r--r--vendor/github.com/apache/arrow/go/v14/arrow/array/string.go521
-rw-r--r--vendor/github.com/apache/arrow/go/v14/arrow/array/struct.go491
-rw-r--r--vendor/github.com/apache/arrow/go/v14/arrow/array/table.go421
-rw-r--r--vendor/github.com/apache/arrow/go/v14/arrow/array/timestamp.go381
-rw-r--r--vendor/github.com/apache/arrow/go/v14/arrow/array/union.go1370
-rw-r--r--vendor/github.com/apache/arrow/go/v14/arrow/array/util.go523
-rw-r--r--vendor/github.com/apache/arrow/go/v14/arrow/arrio/arrio.go92
-rw-r--r--vendor/github.com/apache/arrow/go/v14/arrow/bitutil/Makefile62
-rw-r--r--vendor/github.com/apache/arrow/go/v14/arrow/bitutil/bitmap_ops.go109
-rw-r--r--vendor/github.com/apache/arrow/go/v14/arrow/bitutil/bitmap_ops_amd64.go41
-rw-r--r--vendor/github.com/apache/arrow/go/v14/arrow/bitutil/bitmap_ops_arm64.go27
-rw-r--r--vendor/github.com/apache/arrow/go/v14/arrow/bitutil/bitmap_ops_avx2_amd64.go52
-rw-r--r--vendor/github.com/apache/arrow/go/v14/arrow/bitutil/bitmap_ops_avx2_amd64.s373
-rw-r--r--vendor/github.com/apache/arrow/go/v14/arrow/bitutil/bitmap_ops_noasm.go27
-rw-r--r--vendor/github.com/apache/arrow/go/v14/arrow/bitutil/bitmap_ops_ppc64le.go27
-rw-r--r--vendor/github.com/apache/arrow/go/v14/arrow/bitutil/bitmap_ops_s390x.go27
-rw-r--r--vendor/github.com/apache/arrow/go/v14/arrow/bitutil/bitmap_ops_sse4_amd64.go52
-rw-r--r--vendor/github.com/apache/arrow/go/v14/arrow/bitutil/bitmap_ops_sse4_amd64.s501
-rw-r--r--vendor/github.com/apache/arrow/go/v14/arrow/bitutil/bitmaps.go747
-rw-r--r--vendor/github.com/apache/arrow/go/v14/arrow/bitutil/bitutil.go217
-rw-r--r--vendor/github.com/apache/arrow/go/v14/arrow/bitutil/endian_default.go33
-rw-r--r--vendor/github.com/apache/arrow/go/v14/arrow/bitutil/endian_s390x.go32
-rw-r--r--vendor/github.com/apache/arrow/go/v14/arrow/compare.go153
-rw-r--r--vendor/github.com/apache/arrow/go/v14/arrow/datatype.go404
-rw-r--r--vendor/github.com/apache/arrow/go/v14/arrow/datatype_binary.go98
-rw-r--r--vendor/github.com/apache/arrow/go/v14/arrow/datatype_encoded.go67
-rw-r--r--vendor/github.com/apache/arrow/go/v14/arrow/datatype_extension.go173
-rw-r--r--vendor/github.com/apache/arrow/go/v14/arrow/datatype_fixedwidth.go819
-rw-r--r--vendor/github.com/apache/arrow/go/v14/arrow/datatype_nested.go977
-rw-r--r--vendor/github.com/apache/arrow/go/v14/arrow/datatype_null.go33
-rw-r--r--vendor/github.com/apache/arrow/go/v14/arrow/datatype_numeric.gen.go206
-rw-r--r--vendor/github.com/apache/arrow/go/v14/arrow/datatype_numeric.gen.go.tmpl45
-rw-r--r--vendor/github.com/apache/arrow/go/v14/arrow/datatype_numeric.gen.go.tmpldata66
-rw-r--r--vendor/github.com/apache/arrow/go/v14/arrow/decimal128/decimal128.go611
-rw-r--r--vendor/github.com/apache/arrow/go/v14/arrow/decimal256/decimal256.go693
-rw-r--r--vendor/github.com/apache/arrow/go/v14/arrow/doc.go48
-rw-r--r--vendor/github.com/apache/arrow/go/v14/arrow/encoded/ree_utils.go219
-rw-r--r--vendor/github.com/apache/arrow/go/v14/arrow/endian/big.go30
-rw-r--r--vendor/github.com/apache/arrow/go/v14/arrow/endian/endian.go41
-rw-r--r--vendor/github.com/apache/arrow/go/v14/arrow/endian/little.go30
-rw-r--r--vendor/github.com/apache/arrow/go/v14/arrow/errors.go28
-rw-r--r--vendor/github.com/apache/arrow/go/v14/arrow/float16/float16.go165
-rw-r--r--vendor/github.com/apache/arrow/go/v14/arrow/internal/debug/assert_off.go24
-rw-r--r--vendor/github.com/apache/arrow/go/v14/arrow/internal/debug/assert_on.go28
-rw-r--r--vendor/github.com/apache/arrow/go/v14/arrow/internal/debug/doc.go32
-rw-r--r--vendor/github.com/apache/arrow/go/v14/arrow/internal/debug/log_off.go21
-rw-r--r--vendor/github.com/apache/arrow/go/v14/arrow/internal/debug/log_on.go32
-rw-r--r--vendor/github.com/apache/arrow/go/v14/arrow/internal/debug/util.go37
-rw-r--r--vendor/github.com/apache/arrow/go/v14/arrow/internal/dictutils/dict.go406
-rw-r--r--vendor/github.com/apache/arrow/go/v14/arrow/internal/flatbuf/Binary.go51
-rw-r--r--vendor/github.com/apache/arrow/go/v14/arrow/internal/flatbuf/BinaryView.go57
-rw-r--r--vendor/github.com/apache/arrow/go/v14/arrow/internal/flatbuf/Block.go74
-rw-r--r--vendor/github.com/apache/arrow/go/v14/arrow/internal/flatbuf/BodyCompression.go89
-rw-r--r--vendor/github.com/apache/arrow/go/v14/arrow/internal/flatbuf/BodyCompressionMethod.go52
-rw-r--r--vendor/github.com/apache/arrow/go/v14/arrow/internal/flatbuf/Bool.go50
-rw-r--r--vendor/github.com/apache/arrow/go/v14/arrow/internal/flatbuf/Buffer.go73
-rw-r--r--vendor/github.com/apache/arrow/go/v14/arrow/internal/flatbuf/CompressionType.go45
-rw-r--r--vendor/github.com/apache/arrow/go/v14/arrow/internal/flatbuf/Date.go71
-rw-r--r--vendor/github.com/apache/arrow/go/v14/arrow/internal/flatbuf/DateUnit.go45
-rw-r--r--vendor/github.com/apache/arrow/go/v14/arrow/internal/flatbuf/Decimal.go107
-rw-r--r--vendor/github.com/apache/arrow/go/v14/arrow/internal/flatbuf/DictionaryBatch.go108
-rw-r--r--vendor/github.com/apache/arrow/go/v14/arrow/internal/flatbuf/DictionaryEncoding.go135
-rw-r--r--vendor/github.com/apache/arrow/go/v14/arrow/internal/flatbuf/DictionaryKind.go47
-rw-r--r--vendor/github.com/apache/arrow/go/v14/arrow/internal/flatbuf/Duration.go65
-rw-r--r--vendor/github.com/apache/arrow/go/v14/arrow/internal/flatbuf/Endianness.go47
-rw-r--r--vendor/github.com/apache/arrow/go/v14/arrow/internal/flatbuf/Feature.go71
-rw-r--r--vendor/github.com/apache/arrow/go/v14/arrow/internal/flatbuf/Field.go188
-rw-r--r--vendor/github.com/apache/arrow/go/v14/arrow/internal/flatbuf/FieldNode.go76
-rw-r--r--vendor/github.com/apache/arrow/go/v14/arrow/internal/flatbuf/FixedSizeBinary.go67
-rw-r--r--vendor/github.com/apache/arrow/go/v14/arrow/internal/flatbuf/FixedSizeList.go67
-rw-r--r--vendor/github.com/apache/arrow/go/v14/arrow/internal/flatbuf/FloatingPoint.go65
-rw-r--r--vendor/github.com/apache/arrow/go/v14/arrow/internal/flatbuf/Footer.go162
-rw-r--r--vendor/github.com/apache/arrow/go/v14/arrow/internal/flatbuf/Int.go80
-rw-r--r--vendor/github.com/apache/arrow/go/v14/arrow/internal/flatbuf/Interval.go65
-rw-r--r--vendor/github.com/apache/arrow/go/v14/arrow/internal/flatbuf/IntervalUnit.go48
-rw-r--r--vendor/github.com/apache/arrow/go/v14/arrow/internal/flatbuf/KeyValue.go75
-rw-r--r--vendor/github.com/apache/arrow/go/v14/arrow/internal/flatbuf/LargeBinary.go52
-rw-r--r--vendor/github.com/apache/arrow/go/v14/arrow/internal/flatbuf/LargeList.go52
-rw-r--r--vendor/github.com/apache/arrow/go/v14/arrow/internal/flatbuf/LargeListView.go52
-rw-r--r--vendor/github.com/apache/arrow/go/v14/arrow/internal/flatbuf/LargeUtf8.go52
-rw-r--r--vendor/github.com/apache/arrow/go/v14/arrow/internal/flatbuf/List.go50
-rw-r--r--vendor/github.com/apache/arrow/go/v14/arrow/internal/flatbuf/ListView.go53
-rw-r--r--vendor/github.com/apache/arrow/go/v14/arrow/internal/flatbuf/Map.go92
-rw-r--r--vendor/github.com/apache/arrow/go/v14/arrow/internal/flatbuf/Message.go133
-rw-r--r--vendor/github.com/apache/arrow/go/v14/arrow/internal/flatbuf/MessageHeader.go65
-rw-r--r--vendor/github.com/apache/arrow/go/v14/arrow/internal/flatbuf/MetadataVersion.go65
-rw-r--r--vendor/github.com/apache/arrow/go/v14/arrow/internal/flatbuf/Null.go51
-rw-r--r--vendor/github.com/apache/arrow/go/v14/arrow/internal/flatbuf/Precision.go48
-rw-r--r--vendor/github.com/apache/arrow/go/v14/arrow/internal/flatbuf/RecordBatch.go214
-rw-r--r--vendor/github.com/apache/arrow/go/v14/arrow/internal/flatbuf/RunEndEncoded.go55
-rw-r--r--vendor/github.com/apache/arrow/go/v14/arrow/internal/flatbuf/RunLengthEncoded.go50
-rw-r--r--vendor/github.com/apache/arrow/go/v14/arrow/internal/flatbuf/Schema.go159
-rw-r--r--vendor/github.com/apache/arrow/go/v14/arrow/internal/flatbuf/SparseMatrixCompressedAxis.go45
-rw-r--r--vendor/github.com/apache/arrow/go/v14/arrow/internal/flatbuf/SparseMatrixIndexCSR.go181
-rw-r--r--vendor/github.com/apache/arrow/go/v14/arrow/internal/flatbuf/SparseMatrixIndexCSX.go200
-rw-r--r--vendor/github.com/apache/arrow/go/v14/arrow/internal/flatbuf/SparseTensor.go175
-rw-r--r--vendor/github.com/apache/arrow/go/v14/arrow/internal/flatbuf/SparseTensorIndex.go51
-rw-r--r--vendor/github.com/apache/arrow/go/v14/arrow/internal/flatbuf/SparseTensorIndexCOO.go179
-rw-r--r--vendor/github.com/apache/arrow/go/v14/arrow/internal/flatbuf/SparseTensorIndexCSF.go291
-rw-r--r--vendor/github.com/apache/arrow/go/v14/arrow/internal/flatbuf/Struct_.go53
-rw-r--r--vendor/github.com/apache/arrow/go/v14/arrow/internal/flatbuf/Tensor.go163
-rw-r--r--vendor/github.com/apache/arrow/go/v14/arrow/internal/flatbuf/TensorDim.go83
-rw-r--r--vendor/github.com/apache/arrow/go/v14/arrow/internal/flatbuf/Time.go94
-rw-r--r--vendor/github.com/apache/arrow/go/v14/arrow/internal/flatbuf/TimeUnit.go51
-rw-r--r--vendor/github.com/apache/arrow/go/v14/arrow/internal/flatbuf/Timestamp.go201
-rw-r--r--vendor/github.com/apache/arrow/go/v14/arrow/internal/flatbuf/Type.go123
-rw-r--r--vendor/github.com/apache/arrow/go/v14/arrow/internal/flatbuf/Union.go101
-rw-r--r--vendor/github.com/apache/arrow/go/v14/arrow/internal/flatbuf/UnionMode.go45
-rw-r--r--vendor/github.com/apache/arrow/go/v14/arrow/internal/flatbuf/Utf8.go51
-rw-r--r--vendor/github.com/apache/arrow/go/v14/arrow/internal/flatbuf/Utf8View.go57
-rw-r--r--vendor/github.com/apache/arrow/go/v14/arrow/internal/utils.go47
-rw-r--r--vendor/github.com/apache/arrow/go/v14/arrow/ipc/compression.go135
-rw-r--r--vendor/github.com/apache/arrow/go/v14/arrow/ipc/endian_swap.go162
-rw-r--r--vendor/github.com/apache/arrow/go/v14/arrow/ipc/file_reader.go751
-rw-r--r--vendor/github.com/apache/arrow/go/v14/arrow/ipc/file_writer.go394
-rw-r--r--vendor/github.com/apache/arrow/go/v14/arrow/ipc/ipc.go199
-rw-r--r--vendor/github.com/apache/arrow/go/v14/arrow/ipc/message.go242
-rw-r--r--vendor/github.com/apache/arrow/go/v14/arrow/ipc/metadata.go1287
-rw-r--r--vendor/github.com/apache/arrow/go/v14/arrow/ipc/reader.go285
-rw-r--r--vendor/github.com/apache/arrow/go/v14/arrow/ipc/writer.go1004
-rw-r--r--vendor/github.com/apache/arrow/go/v14/arrow/memory/Makefile66
-rw-r--r--vendor/github.com/apache/arrow/go/v14/arrow/memory/allocator.go27
-rw-r--r--vendor/github.com/apache/arrow/go/v14/arrow/memory/buffer.go145
-rw-r--r--vendor/github.com/apache/arrow/go/v14/arrow/memory/cgo_allocator.go108
-rw-r--r--vendor/github.com/apache/arrow/go/v14/arrow/memory/cgo_allocator_defaults.go23
-rw-r--r--vendor/github.com/apache/arrow/go/v14/arrow/memory/cgo_allocator_logging.go23
-rw-r--r--vendor/github.com/apache/arrow/go/v14/arrow/memory/checked_allocator.go221
-rw-r--r--vendor/github.com/apache/arrow/go/v14/arrow/memory/default_allocator.go25
-rw-r--r--vendor/github.com/apache/arrow/go/v14/arrow/memory/default_mallocator.go29
-rw-r--r--vendor/github.com/apache/arrow/go/v14/arrow/memory/doc.go22
-rw-r--r--vendor/github.com/apache/arrow/go/v14/arrow/memory/go_allocator.go47
-rw-r--r--vendor/github.com/apache/arrow/go/v14/arrow/memory/internal/cgoalloc/allocator.cc71
-rw-r--r--vendor/github.com/apache/arrow/go/v14/arrow/memory/internal/cgoalloc/allocator.go108
-rw-r--r--vendor/github.com/apache/arrow/go/v14/arrow/memory/internal/cgoalloc/allocator.h39
-rw-r--r--vendor/github.com/apache/arrow/go/v14/arrow/memory/internal/cgoalloc/helpers.h52
-rw-r--r--vendor/github.com/apache/arrow/go/v14/arrow/memory/mallocator/doc.go21
-rw-r--r--vendor/github.com/apache/arrow/go/v14/arrow/memory/mallocator/mallocator.go115
-rw-r--r--vendor/github.com/apache/arrow/go/v14/arrow/memory/memory.go33
-rw-r--r--vendor/github.com/apache/arrow/go/v14/arrow/memory/memory_amd64.go33
-rw-r--r--vendor/github.com/apache/arrow/go/v14/arrow/memory/memory_arm64.go31
-rw-r--r--vendor/github.com/apache/arrow/go/v14/arrow/memory/memory_avx2_amd64.go41
-rw-r--r--vendor/github.com/apache/arrow/go/v14/arrow/memory/memory_avx2_amd64.s85
-rw-r--r--vendor/github.com/apache/arrow/go/v14/arrow/memory/memory_js_wasm.go23
-rw-r--r--vendor/github.com/apache/arrow/go/v14/arrow/memory/memory_neon_arm64.go31
-rw-r--r--vendor/github.com/apache/arrow/go/v14/arrow/memory/memory_neon_arm64.s43
-rw-r--r--vendor/github.com/apache/arrow/go/v14/arrow/memory/memory_noasm.go23
-rw-r--r--vendor/github.com/apache/arrow/go/v14/arrow/memory/memory_sse4_amd64.go31
-rw-r--r--vendor/github.com/apache/arrow/go/v14/arrow/memory/memory_sse4_amd64.s84
-rw-r--r--vendor/github.com/apache/arrow/go/v14/arrow/memory/util.go37
-rw-r--r--vendor/github.com/apache/arrow/go/v14/arrow/numeric.schema.json15
-rw-r--r--vendor/github.com/apache/arrow/go/v14/arrow/numeric.tmpldata135
-rw-r--r--vendor/github.com/apache/arrow/go/v14/arrow/record.go49
-rw-r--r--vendor/github.com/apache/arrow/go/v14/arrow/schema.go301
-rw-r--r--vendor/github.com/apache/arrow/go/v14/arrow/table.go193
-rw-r--r--vendor/github.com/apache/arrow/go/v14/arrow/tools.go25
-rw-r--r--vendor/github.com/apache/arrow/go/v14/arrow/type_string.go65
-rw-r--r--vendor/github.com/apache/arrow/go/v14/arrow/type_traits_boolean.go28
-rw-r--r--vendor/github.com/apache/arrow/go/v14/arrow/type_traits_decimal128.go63
-rw-r--r--vendor/github.com/apache/arrow/go/v14/arrow/type_traits_decimal256.go58
-rw-r--r--vendor/github.com/apache/arrow/go/v14/arrow/type_traits_float16.go62
-rw-r--r--vendor/github.com/apache/arrow/go/v14/arrow/type_traits_interval.go148
-rw-r--r--vendor/github.com/apache/arrow/go/v14/arrow/type_traits_numeric.gen.go585
-rw-r--r--vendor/github.com/apache/arrow/go/v14/arrow/type_traits_numeric.gen.go.tmpl83
-rw-r--r--vendor/github.com/apache/arrow/go/v14/arrow/type_traits_numeric.gen_test.go.tmpl61
-rw-r--r--vendor/github.com/apache/arrow/go/v14/arrow/type_traits_timestamp.go59
-rw-r--r--vendor/github.com/apache/arrow/go/v14/arrow/unionmode_string.go25
-rw-r--r--vendor/github.com/apache/arrow/go/v14/internal/bitutils/bit_block_counter.go452
-rw-r--r--vendor/github.com/apache/arrow/go/v14/internal/bitutils/bit_run_reader.go151
-rw-r--r--vendor/github.com/apache/arrow/go/v14/internal/bitutils/bit_set_run_reader.go361
-rw-r--r--vendor/github.com/apache/arrow/go/v14/internal/bitutils/bitmap_generate.go109
-rw-r--r--vendor/github.com/apache/arrow/go/v14/internal/hashing/hash_funcs.go90
-rw-r--r--vendor/github.com/apache/arrow/go/v14/internal/hashing/hash_string.go26
-rw-r--r--vendor/github.com/apache/arrow/go/v14/internal/hashing/hash_string_go1.19.go37
-rw-r--r--vendor/github.com/apache/arrow/go/v14/internal/hashing/types.tmpldata42
-rw-r--r--vendor/github.com/apache/arrow/go/v14/internal/hashing/xxh3_memo_table.gen.go2833
-rw-r--r--vendor/github.com/apache/arrow/go/v14/internal/hashing/xxh3_memo_table.gen.go.tmpl349
-rw-r--r--vendor/github.com/apache/arrow/go/v14/internal/hashing/xxh3_memo_table.go443
-rw-r--r--vendor/github.com/apache/arrow/go/v14/internal/json/json.go51
-rw-r--r--vendor/github.com/apache/arrow/go/v14/internal/json/json_tinygo.go51
-rw-r--r--vendor/github.com/apache/arrow/go/v14/internal/utils/Makefile80
-rw-r--r--vendor/github.com/apache/arrow/go/v14/internal/utils/buf_reader.go212
-rw-r--r--vendor/github.com/apache/arrow/go/v14/internal/utils/endians_default.go30
-rw-r--r--vendor/github.com/apache/arrow/go/v14/internal/utils/endians_s390x.go33
-rw-r--r--vendor/github.com/apache/arrow/go/v14/internal/utils/math.go49
-rw-r--r--vendor/github.com/apache/arrow/go/v14/internal/utils/min_max.go212
-rw-r--r--vendor/github.com/apache/arrow/go/v14/internal/utils/min_max_amd64.go55
-rw-r--r--vendor/github.com/apache/arrow/go/v14/internal/utils/min_max_arm64.go65
-rw-r--r--vendor/github.com/apache/arrow/go/v14/internal/utils/min_max_avx2_amd64.go90
-rw-r--r--vendor/github.com/apache/arrow/go/v14/internal/utils/min_max_avx2_amd64.s927
-rw-r--r--vendor/github.com/apache/arrow/go/v14/internal/utils/min_max_neon_arm64.go56
-rw-r--r--vendor/github.com/apache/arrow/go/v14/internal/utils/min_max_neon_arm64.s324
-rw-r--r--vendor/github.com/apache/arrow/go/v14/internal/utils/min_max_noasm.go31
-rw-r--r--vendor/github.com/apache/arrow/go/v14/internal/utils/min_max_ppc64le.go30
-rw-r--r--vendor/github.com/apache/arrow/go/v14/internal/utils/min_max_s390x.go30
-rw-r--r--vendor/github.com/apache/arrow/go/v14/internal/utils/min_max_sse4_amd64.go88
-rw-r--r--vendor/github.com/apache/arrow/go/v14/internal/utils/min_max_sse4_amd64.s1044
-rw-r--r--vendor/github.com/apache/arrow/go/v14/internal/utils/transpose_ints.go407
-rw-r--r--vendor/github.com/apache/arrow/go/v14/internal/utils/transpose_ints.go.tmpl34
-rw-r--r--vendor/github.com/apache/arrow/go/v14/internal/utils/transpose_ints.tmpldata34
-rw-r--r--vendor/github.com/apache/arrow/go/v14/internal/utils/transpose_ints_amd64.go325
-rw-r--r--vendor/github.com/apache/arrow/go/v14/internal/utils/transpose_ints_amd64.go.tmpl75
-rw-r--r--vendor/github.com/apache/arrow/go/v14/internal/utils/transpose_ints_arm64.go96
-rw-r--r--vendor/github.com/apache/arrow/go/v14/internal/utils/transpose_ints_avx2_amd64.go473
-rw-r--r--vendor/github.com/apache/arrow/go/v14/internal/utils/transpose_ints_avx2_amd64.s3074
-rw-r--r--vendor/github.com/apache/arrow/go/v14/internal/utils/transpose_ints_def.go227
-rw-r--r--vendor/github.com/apache/arrow/go/v14/internal/utils/transpose_ints_noasm.go96
-rw-r--r--vendor/github.com/apache/arrow/go/v14/internal/utils/transpose_ints_noasm.go.tmpl34
-rw-r--r--vendor/github.com/apache/arrow/go/v14/internal/utils/transpose_ints_ppc64le.go96
-rw-r--r--vendor/github.com/apache/arrow/go/v14/internal/utils/transpose_ints_s390x.go96
-rw-r--r--vendor/github.com/apache/arrow/go/v14/internal/utils/transpose_ints_s390x.go.tmpl34
-rw-r--r--vendor/github.com/apache/arrow/go/v14/internal/utils/transpose_ints_simd.go.tmpl42
-rw-r--r--vendor/github.com/apache/arrow/go/v14/internal/utils/transpose_ints_sse4_amd64.go473
-rw-r--r--vendor/github.com/apache/arrow/go/v14/internal/utils/transpose_ints_sse4_amd64.s3074
257 files changed, 63071 insertions, 0 deletions
diff --git a/vendor/github.com/apache/arrow/go/v14/LICENSE.txt b/vendor/github.com/apache/arrow/go/v14/LICENSE.txt
new file mode 100644
index 000000000..573103298
--- /dev/null
+++ b/vendor/github.com/apache/arrow/go/v14/LICENSE.txt
@@ -0,0 +1,1791 @@
+
+ Apache License
+ Version 2.0, January 2004
+ http://www.apache.org/licenses/
+
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+ 1. Definitions.
+
+ "License" shall mean the terms and conditions for use, reproduction,
+ and distribution as defined by Sections 1 through 9 of this document.
+
+ "Licensor" shall mean the copyright owner or entity authorized by
+ the copyright owner that is granting the License.
+
+ "Legal Entity" shall mean the union of the acting entity and all
+ other entities that control, are controlled by, or are under common
+ control with that entity. For the purposes of this definition,
+ "control" means (i) the power, direct or indirect, to cause the
+ direction or management of such entity, whether by contract or
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
+ outstanding shares, or (iii) beneficial ownership of such entity.
+
+ "You" (or "Your") shall mean an individual or Legal Entity
+ exercising permissions granted by this License.
+
+ "Source" form shall mean the preferred form for making modifications,
+ including but not limited to software source code, documentation
+ source, and configuration files.
+
+ "Object" form shall mean any form resulting from mechanical
+ transformation or translation of a Source form, including but
+ not limited to compiled object code, generated documentation,
+ and conversions to other media types.
+
+ "Work" shall mean the work of authorship, whether in Source or
+ Object form, made available under the License, as indicated by a
+ copyright notice that is included in or attached to the work
+ (an example is provided in the Appendix below).
+
+ "Derivative Works" shall mean any work, whether in Source or Object
+ form, that is based on (or derived from) the Work and for which the
+ editorial revisions, annotations, elaborations, or other modifications
+ represent, as a whole, an original work of authorship. For the purposes
+ of this License, Derivative Works shall not include works that remain
+ separable from, or merely link (or bind by name) to the interfaces of,
+ the Work and Derivative Works thereof.
+
+ "Contribution" shall mean any work of authorship, including
+ the original version of the Work and any modifications or additions
+ to that Work or Derivative Works thereof, that is intentionally
+ submitted to Licensor for inclusion in the Work by the copyright owner
+ or by an individual or Legal Entity authorized to submit on behalf of
+ the copyright owner. For the purposes of this definition, "submitted"
+ means any form of electronic, verbal, or written communication sent
+ to the Licensor or its representatives, including but not limited to
+ communication on electronic mailing lists, source code control systems,
+ and issue tracking systems that are managed by, or on behalf of, the
+ Licensor for the purpose of discussing and improving the Work, but
+ excluding communication that is conspicuously marked or otherwise
+ designated in writing by the copyright owner as "Not a Contribution."
+
+ "Contributor" shall mean Licensor and any individual or Legal Entity
+ on behalf of whom a Contribution has been received by Licensor and
+ subsequently incorporated within the Work.
+
+ 2. Grant of Copyright License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ copyright license to reproduce, prepare Derivative Works of,
+ publicly display, publicly perform, sublicense, and distribute the
+ Work and such Derivative Works in Source or Object form.
+
+ 3. Grant of Patent License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ (except as stated in this section) patent license to make, have made,
+ use, offer to sell, sell, import, and otherwise transfer the Work,
+ where such license applies only to those patent claims licensable
+ by such Contributor that are necessarily infringed by their
+ Contribution(s) alone or by combination of their Contribution(s)
+ with the Work to which such Contribution(s) was submitted. If You
+ institute patent litigation against any entity (including a
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
+ or a Contribution incorporated within the Work constitutes direct
+ or contributory patent infringement, then any patent licenses
+ granted to You under this License for that Work shall terminate
+ as of the date such litigation is filed.
+
+ 4. Redistribution. You may reproduce and distribute copies of the
+ Work or Derivative Works thereof in any medium, with or without
+ modifications, and in Source or Object form, provided that You
+ meet the following conditions:
+
+ (a) You must give any other recipients of the Work or
+ Derivative Works a copy of this License; and
+
+ (b) You must cause any modified files to carry prominent notices
+ stating that You changed the files; and
+
+ (c) You must retain, in the Source form of any Derivative Works
+ that You distribute, all copyright, patent, trademark, and
+ attribution notices from the Source form of the Work,
+ excluding those notices that do not pertain to any part of
+ the Derivative Works; and
+
+ (d) If the Work includes a "NOTICE" text file as part of its
+ distribution, then any Derivative Works that You distribute must
+ include a readable copy of the attribution notices contained
+ within such NOTICE file, excluding those notices that do not
+ pertain to any part of the Derivative Works, in at least one
+ of the following places: within a NOTICE text file distributed
+ as part of the Derivative Works; within the Source form or
+ documentation, if provided along with the Derivative Works; or,
+ within a display generated by the Derivative Works, if and
+ wherever such third-party notices normally appear. The contents
+ of the NOTICE file are for informational purposes only and
+ do not modify the License. You may add Your own attribution
+ notices within Derivative Works that You distribute, alongside
+ or as an addendum to the NOTICE text from the Work, provided
+ that such additional attribution notices cannot be construed
+ as modifying the License.
+
+ You may add Your own copyright statement to Your modifications and
+ may provide additional or different license terms and conditions
+ for use, reproduction, or distribution of Your modifications, or
+ for any such Derivative Works as a whole, provided Your use,
+ reproduction, and distribution of the Work otherwise complies with
+ the conditions stated in this License.
+
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
+ any Contribution intentionally submitted for inclusion in the Work
+ by You to the Licensor shall be under the terms and conditions of
+ this License, without any additional terms or conditions.
+ Notwithstanding the above, nothing herein shall supersede or modify
+ the terms of any separate license agreement you may have executed
+ with Licensor regarding such Contributions.
+
+ 6. Trademarks. This License does not grant permission to use the trade
+ names, trademarks, service marks, or product names of the Licensor,
+ except as required for reasonable and customary use in describing the
+ origin of the Work and reproducing the content of the NOTICE file.
+
+ 7. Disclaimer of Warranty. Unless required by applicable law or
+ agreed to in writing, Licensor provides the Work (and each
+ Contributor provides its Contributions) on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied, including, without limitation, any warranties or conditions
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+ PARTICULAR PURPOSE. You are solely responsible for determining the
+ appropriateness of using or redistributing the Work and assume any
+ risks associated with Your exercise of permissions under this License.
+
+ 8. Limitation of Liability. In no event and under no legal theory,
+ whether in tort (including negligence), contract, or otherwise,
+ unless required by applicable law (such as deliberate and grossly
+ negligent acts) or agreed to in writing, shall any Contributor be
+ liable to You for damages, including any direct, indirect, special,
+ incidental, or consequential damages of any character arising as a
+ result of this License or out of the use or inability to use the
+ Work (including but not limited to damages for loss of goodwill,
+ work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses), even if such Contributor
+ has been advised of the possibility of such damages.
+
+ 9. Accepting Warranty or Additional Liability. While redistributing
+ the Work or Derivative Works thereof, You may choose to offer,
+ and charge a fee for, acceptance of support, warranty, indemnity,
+ or other liability obligations and/or rights consistent with this
+ License. However, in accepting such obligations, You may act only
+ on Your own behalf and on Your sole responsibility, not on behalf
+ of any other Contributor, and only if You agree to indemnify,
+ defend, and hold each Contributor harmless for any liability
+ incurred by, or claims asserted against, such Contributor by reason
+ of your accepting any such warranty or additional liability.
+
+ END OF TERMS AND CONDITIONS
+
+ APPENDIX: How to apply the Apache License to your work.
+
+ To apply the Apache License to your work, attach the following
+ boilerplate notice, with the fields enclosed by brackets "[]"
+ replaced with your own identifying information. (Don't include
+ the brackets!) The text should be enclosed in the appropriate
+ comment syntax for the file format. We also recommend that a
+ file or class name and description of purpose be included on the
+ same "printed page" as the copyright notice for easier
+ identification within third-party archives.
+
+ Copyright [yyyy] [name of copyright owner]
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+
+--------------------------------------------------------------------------------
+
+src/arrow/util (some portions): Apache 2.0, and 3-clause BSD
+
+Some portions of this module are derived from code in the Chromium project,
+copyright (c) Google inc and (c) The Chromium Authors and licensed under the
+Apache 2.0 License or the under the 3-clause BSD license:
+
+ Copyright (c) 2013 The Chromium Authors. All rights reserved.
+
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions are
+ met:
+
+ * Redistributions of source code must retain the above copyright
+ notice, this list of conditions and the following disclaimer.
+ * Redistributions in binary form must reproduce the above
+ copyright notice, this list of conditions and the following disclaimer
+ in the documentation and/or other materials provided with the
+ distribution.
+ * Neither the name of Google Inc. nor the names of its
+ contributors may be used to endorse or promote products derived from
+ this software without specific prior written permission.
+
+ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+--------------------------------------------------------------------------------
+
+This project includes code from Daniel Lemire's FrameOfReference project.
+
+https://github.com/lemire/FrameOfReference/blob/6ccaf9e97160f9a3b299e23a8ef739e711ef0c71/src/bpacking.cpp
+
+Copyright: 2013 Daniel Lemire
+Home page: http://lemire.me/en/
+Project page: https://github.com/lemire/FrameOfReference
+License: Apache License Version 2.0 http://www.apache.org/licenses/LICENSE-2.0
+
+--------------------------------------------------------------------------------
+
+This project includes code from the TensorFlow project
+
+Copyright 2015 The TensorFlow Authors. All Rights Reserved.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+--------------------------------------------------------------------------------
+
+This project includes code from the NumPy project.
+
+https://github.com/numpy/numpy/blob/e1f191c46f2eebd6cb892a4bfe14d9dd43a06c4e/numpy/core/src/multiarray/multiarraymodule.c#L2910
+
+https://github.com/numpy/numpy/blob/68fd82271b9ea5a9e50d4e761061dfcca851382a/numpy/core/src/multiarray/datetime.c
+
+Copyright (c) 2005-2017, NumPy Developers.
+All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are
+met:
+
+ * Redistributions of source code must retain the above copyright
+ notice, this list of conditions and the following disclaimer.
+
+ * Redistributions in binary form must reproduce the above
+ copyright notice, this list of conditions and the following
+ disclaimer in the documentation and/or other materials provided
+ with the distribution.
+
+ * Neither the name of the NumPy Developers nor the names of any
+ contributors may be used to endorse or promote products derived
+ from this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+--------------------------------------------------------------------------------
+
+This project includes code from the Boost project
+
+Boost Software License - Version 1.0 - August 17th, 2003
+
+Permission is hereby granted, free of charge, to any person or organization
+obtaining a copy of the software and accompanying documentation covered by
+this license (the "Software") to use, reproduce, display, distribute,
+execute, and transmit the Software, and to prepare derivative works of the
+Software, and to permit third-parties to whom the Software is furnished to
+do so, all subject to the following:
+
+The copyright notices in the Software and this entire statement, including
+the above license grant, this restriction and the following disclaimer,
+must be included in all copies of the Software, in whole or in part, and
+all derivative works of the Software, unless such copies or derivative
+works are solely in the form of machine-executable object code generated by
+a source language processor.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE, TITLE AND NON-INFRINGEMENT. IN NO EVENT
+SHALL THE COPYRIGHT HOLDERS OR ANYONE DISTRIBUTING THE SOFTWARE BE LIABLE
+FOR ANY DAMAGES OR OTHER LIABILITY, WHETHER IN CONTRACT, TORT OR OTHERWISE,
+ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+DEALINGS IN THE SOFTWARE.
+
+--------------------------------------------------------------------------------
+
+This project includes code from the FlatBuffers project
+
+Copyright 2014 Google Inc.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+--------------------------------------------------------------------------------
+
+This project includes code from the tslib project
+
+Copyright 2015 Microsoft Corporation. All rights reserved.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+--------------------------------------------------------------------------------
+
+This project includes code from the jemalloc project
+
+https://github.com/jemalloc/jemalloc
+
+Copyright (C) 2002-2017 Jason Evans <jasone@canonware.com>.
+All rights reserved.
+Copyright (C) 2007-2012 Mozilla Foundation. All rights reserved.
+Copyright (C) 2009-2017 Facebook, Inc. All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are met:
+1. Redistributions of source code must retain the above copyright notice(s),
+ this list of conditions and the following disclaimer.
+2. Redistributions in binary form must reproduce the above copyright notice(s),
+ this list of conditions and the following disclaimer in the documentation
+ and/or other materials provided with the distribution.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDER(S) ``AS IS'' AND ANY EXPRESS
+OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO
+EVENT SHALL THE COPYRIGHT HOLDER(S) BE LIABLE FOR ANY DIRECT, INDIRECT,
+INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
+LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
+OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
+ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+--------------------------------------------------------------------------------
+
+This project includes code from the Go project, BSD 3-clause license + PATENTS
+weak patent termination clause
+(https://github.com/golang/go/blob/master/PATENTS).
+
+Copyright (c) 2009 The Go Authors. All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are
+met:
+
+ * Redistributions of source code must retain the above copyright
+notice, this list of conditions and the following disclaimer.
+ * Redistributions in binary form must reproduce the above
+copyright notice, this list of conditions and the following disclaimer
+in the documentation and/or other materials provided with the
+distribution.
+ * Neither the name of Google Inc. nor the names of its
+contributors may be used to endorse or promote products derived from
+this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+--------------------------------------------------------------------------------
+
+This project includes code from the hs2client
+
+https://github.com/cloudera/hs2client
+
+Copyright 2016 Cloudera Inc.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+--------------------------------------------------------------------------------
+
+The script ci/scripts/util_wait_for_it.sh has the following license
+
+Copyright (c) 2016 Giles Hall
+
+Permission is hereby granted, free of charge, to any person obtaining a copy of
+this software and associated documentation files (the "Software"), to deal in
+the Software without restriction, including without limitation the rights to
+use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies
+of the Software, and to permit persons to whom the Software is furnished to do
+so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in all
+copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+SOFTWARE.
+
+--------------------------------------------------------------------------------
+
+The script r/configure has the following license (MIT)
+
+Copyright (c) 2017, Jeroen Ooms and Jim Hester
+
+Permission is hereby granted, free of charge, to any person obtaining a copy of
+this software and associated documentation files (the "Software"), to deal in
+the Software without restriction, including without limitation the rights to
+use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies
+of the Software, and to permit persons to whom the Software is furnished to do
+so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in all
+copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+SOFTWARE.
+
+--------------------------------------------------------------------------------
+
+cpp/src/arrow/util/logging.cc, cpp/src/arrow/util/logging.h and
+cpp/src/arrow/util/logging-test.cc are adapted from
+Ray Project (https://github.com/ray-project/ray) (Apache 2.0).
+
+Copyright (c) 2016 Ray Project (https://github.com/ray-project/ray)
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+--------------------------------------------------------------------------------
+The files cpp/src/arrow/vendored/datetime/date.h, cpp/src/arrow/vendored/datetime/tz.h,
+cpp/src/arrow/vendored/datetime/tz_private.h, cpp/src/arrow/vendored/datetime/ios.h,
+cpp/src/arrow/vendored/datetime/ios.mm,
+cpp/src/arrow/vendored/datetime/tz.cpp are adapted from
+Howard Hinnant's date library (https://github.com/HowardHinnant/date)
+It is licensed under MIT license.
+
+The MIT License (MIT)
+Copyright (c) 2015, 2016, 2017 Howard Hinnant
+Copyright (c) 2016 Adrian Colomitchi
+Copyright (c) 2017 Florian Dang
+Copyright (c) 2017 Paul Thompson
+Copyright (c) 2018 Tomasz Kamiński
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in all
+copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+SOFTWARE.
+
+--------------------------------------------------------------------------------
+
+The file cpp/src/arrow/util/utf8.h includes code adapted from the page
+ https://bjoern.hoehrmann.de/utf-8/decoder/dfa/
+with the following license (MIT)
+
+Copyright (c) 2008-2009 Bjoern Hoehrmann <bjoern@hoehrmann.de>
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in all
+copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+SOFTWARE.
+
+--------------------------------------------------------------------------------
+
+The file cpp/src/arrow/vendored/string_view.hpp has the following license
+
+Boost Software License - Version 1.0 - August 17th, 2003
+
+Permission is hereby granted, free of charge, to any person or organization
+obtaining a copy of the software and accompanying documentation covered by
+this license (the "Software") to use, reproduce, display, distribute,
+execute, and transmit the Software, and to prepare derivative works of the
+Software, and to permit third-parties to whom the Software is furnished to
+do so, all subject to the following:
+
+The copyright notices in the Software and this entire statement, including
+the above license grant, this restriction and the following disclaimer,
+must be included in all copies of the Software, in whole or in part, and
+all derivative works of the Software, unless such copies or derivative
+works are solely in the form of machine-executable object code generated by
+a source language processor.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE, TITLE AND NON-INFRINGEMENT. IN NO EVENT
+SHALL THE COPYRIGHT HOLDERS OR ANYONE DISTRIBUTING THE SOFTWARE BE LIABLE
+FOR ANY DAMAGES OR OTHER LIABILITY, WHETHER IN CONTRACT, TORT OR OTHERWISE,
+ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+DEALINGS IN THE SOFTWARE.
+
+--------------------------------------------------------------------------------
+
+The files in cpp/src/arrow/vendored/xxhash/ have the following license
+(BSD 2-Clause License)
+
+xxHash Library
+Copyright (c) 2012-2014, Yann Collet
+All rights reserved.
+
+Redistribution and use in source and binary forms, with or without modification,
+are permitted provided that the following conditions are met:
+
+* Redistributions of source code must retain the above copyright notice, this
+ list of conditions and the following disclaimer.
+
+* Redistributions in binary form must reproduce the above copyright notice, this
+ list of conditions and the following disclaimer in the documentation and/or
+ other materials provided with the distribution.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
+ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR
+ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
+ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+You can contact the author at :
+- xxHash homepage: http://www.xxhash.com
+- xxHash source repository : https://github.com/Cyan4973/xxHash
+
+--------------------------------------------------------------------------------
+
+The files in cpp/src/arrow/vendored/double-conversion/ have the following license
+(BSD 3-Clause License)
+
+Copyright 2006-2011, the V8 project authors. All rights reserved.
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are
+met:
+
+ * Redistributions of source code must retain the above copyright
+ notice, this list of conditions and the following disclaimer.
+ * Redistributions in binary form must reproduce the above
+ copyright notice, this list of conditions and the following
+ disclaimer in the documentation and/or other materials provided
+ with the distribution.
+ * Neither the name of Google Inc. nor the names of its
+ contributors may be used to endorse or promote products derived
+ from this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+--------------------------------------------------------------------------------
+
+The files in cpp/src/arrow/vendored/uriparser/ have the following license
+(BSD 3-Clause License)
+
+uriparser - RFC 3986 URI parsing library
+
+Copyright (C) 2007, Weijia Song <songweijia@gmail.com>
+Copyright (C) 2007, Sebastian Pipping <sebastian@pipping.org>
+All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions
+are met:
+
+ * Redistributions of source code must retain the above
+ copyright notice, this list of conditions and the following
+ disclaimer.
+
+ * Redistributions in binary form must reproduce the above
+ copyright notice, this list of conditions and the following
+ disclaimer in the documentation and/or other materials
+ provided with the distribution.
+
+ * Neither the name of the <ORGANIZATION> nor the names of its
+ contributors may be used to endorse or promote products
+ derived from this software without specific prior written
+ permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
+FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
+COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
+INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
+STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED
+OF THE POSSIBILITY OF SUCH DAMAGE.
+
+--------------------------------------------------------------------------------
+
+The files under dev/tasks/conda-recipes have the following license
+
+BSD 3-clause license
+Copyright (c) 2015-2018, conda-forge
+All rights reserved.
+
+Redistribution and use in source and binary forms, with or without modification,
+are permitted provided that the following conditions are met:
+
+1. Redistributions of source code must retain the above copyright notice, this
+ list of conditions and the following disclaimer.
+
+2. Redistributions in binary form must reproduce the above copyright notice,
+ this list of conditions and the following disclaimer in the documentation
+ and/or other materials provided with the distribution.
+
+3. Neither the name of the copyright holder nor the names of its contributors
+ may be used to endorse or promote products derived from this software without
+ specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
+ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
+FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR
+TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+--------------------------------------------------------------------------------
+
+The files in cpp/src/arrow/vendored/utf8cpp/ have the following license
+
+Copyright 2006 Nemanja Trifunovic
+
+Permission is hereby granted, free of charge, to any person or organization
+obtaining a copy of the software and accompanying documentation covered by
+this license (the "Software") to use, reproduce, display, distribute,
+execute, and transmit the Software, and to prepare derivative works of the
+Software, and to permit third-parties to whom the Software is furnished to
+do so, all subject to the following:
+
+The copyright notices in the Software and this entire statement, including
+the above license grant, this restriction and the following disclaimer,
+must be included in all copies of the Software, in whole or in part, and
+all derivative works of the Software, unless such copies or derivative
+works are solely in the form of machine-executable object code generated by
+a source language processor.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE, TITLE AND NON-INFRINGEMENT. IN NO EVENT
+SHALL THE COPYRIGHT HOLDERS OR ANYONE DISTRIBUTING THE SOFTWARE BE LIABLE
+FOR ANY DAMAGES OR OTHER LIABILITY, WHETHER IN CONTRACT, TORT OR OTHERWISE,
+ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+DEALINGS IN THE SOFTWARE.
+
+--------------------------------------------------------------------------------
+
+This project includes code from Apache Kudu.
+
+ * cpp/cmake_modules/CompilerInfo.cmake is based on Kudu's cmake_modules/CompilerInfo.cmake
+
+Copyright: 2016 The Apache Software Foundation.
+Home page: https://kudu.apache.org/
+License: http://www.apache.org/licenses/LICENSE-2.0
+
+--------------------------------------------------------------------------------
+
+This project includes code from Apache Impala (incubating), formerly
+Impala. The Impala code and rights were donated to the ASF as part of the
+Incubator process after the initial code imports into Apache Parquet.
+
+Copyright: 2012 Cloudera, Inc.
+Copyright: 2016 The Apache Software Foundation.
+Home page: http://impala.apache.org/
+License: http://www.apache.org/licenses/LICENSE-2.0
+
+--------------------------------------------------------------------------------
+
+This project includes code from Apache Aurora.
+
+* dev/release/{release,changelog,release-candidate} are based on the scripts from
+ Apache Aurora
+
+Copyright: 2016 The Apache Software Foundation.
+Home page: https://aurora.apache.org/
+License: http://www.apache.org/licenses/LICENSE-2.0
+
+--------------------------------------------------------------------------------
+
+This project includes code from the Google styleguide.
+
+* cpp/build-support/cpplint.py is based on the scripts from the Google styleguide.
+
+Copyright: 2009 Google Inc. All rights reserved.
+Homepage: https://github.com/google/styleguide
+License: 3-clause BSD
+
+--------------------------------------------------------------------------------
+
+This project includes code from Snappy.
+
+* cpp/cmake_modules/{SnappyCMakeLists.txt,SnappyConfig.h} are based on code
+ from Google's Snappy project.
+
+Copyright: 2009 Google Inc. All rights reserved.
+Homepage: https://github.com/google/snappy
+License: 3-clause BSD
+
+--------------------------------------------------------------------------------
+
+This project includes code from the manylinux project.
+
+* python/manylinux1/scripts/{build_python.sh,python-tag-abi-tag.py,
+ requirements.txt} are based on code from the manylinux project.
+
+Copyright: 2016 manylinux
+Homepage: https://github.com/pypa/manylinux
+License: The MIT License (MIT)
+
+--------------------------------------------------------------------------------
+
+This project includes code from the cymove project:
+
+* python/pyarrow/includes/common.pxd includes code from the cymove project
+
+The MIT License (MIT)
+Copyright (c) 2019 Omer Ozarslan
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in all
+copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM,
+DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE
+OR OTHER DEALINGS IN THE SOFTWARE.
+
+--------------------------------------------------------------------------------
+
+The projects includes code from the Ursabot project under the dev/archery
+directory.
+
+License: BSD 2-Clause
+
+Copyright 2019 RStudio, Inc.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are met:
+
+1. Redistributions of source code must retain the above copyright notice, this
+ list of conditions and the following disclaimer.
+
+2. Redistributions in binary form must reproduce the above copyright notice,
+ this list of conditions and the following disclaimer in the documentation
+ and/or other materials provided with the distribution.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
+ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
+FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
+OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+--------------------------------------------------------------------------------
+
+This project include code from CMake.
+
+* cpp/cmake_modules/FindGTest.cmake is based on code from CMake.
+
+Copyright: Copyright 2000-2019 Kitware, Inc. and Contributors
+Homepage: https://gitlab.kitware.com/cmake/cmake
+License: 3-clause BSD
+
+--------------------------------------------------------------------------------
+
+This project include code from mingw-w64.
+
+* cpp/src/arrow/util/cpu-info.cc has a polyfill for mingw-w64 < 5
+
+Copyright (c) 2009 - 2013 by the mingw-w64 project
+Homepage: https://mingw-w64.org
+License: Zope Public License (ZPL) Version 2.1.
+
+---------------------------------------------------------------------------------
+
+This project include code from Google's Asylo project.
+
+* cpp/src/arrow/result.h is based on status_or.h
+
+Copyright (c) Copyright 2017 Asylo authors
+Homepage: https://asylo.dev/
+License: Apache 2.0
+
+--------------------------------------------------------------------------------
+
+This project includes code from Google's protobuf project
+
+* cpp/src/arrow/result.h ARROW_ASSIGN_OR_RAISE is based off ASSIGN_OR_RETURN
+
+Copyright 2008 Google Inc. All rights reserved.
+Homepage: https://developers.google.com/protocol-buffers/
+License:
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are
+met:
+
+ * Redistributions of source code must retain the above copyright
+notice, this list of conditions and the following disclaimer.
+ * Redistributions in binary form must reproduce the above
+copyright notice, this list of conditions and the following disclaimer
+in the documentation and/or other materials provided with the
+distribution.
+ * Neither the name of Google Inc. nor the names of its
+contributors may be used to endorse or promote products derived from
+this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+Code generated by the Protocol Buffer compiler is owned by the owner
+of the input file used when generating it. This code is not
+standalone and requires a support library to be linked with it. This
+support library is itself covered by the above license.
+
+--------------------------------------------------------------------------------
+
+3rdparty dependency LLVM is statically linked in certain binary distributions.
+Additionally some sections of source code have been derived from sources in LLVM
+and have been clearly labeled as such. LLVM has the following license:
+
+==============================================================================
+LLVM Release License
+==============================================================================
+University of Illinois/NCSA
+Open Source License
+
+Copyright (c) 2003-2018 University of Illinois at Urbana-Champaign.
+All rights reserved.
+
+Developed by:
+
+ LLVM Team
+
+ University of Illinois at Urbana-Champaign
+
+ http://llvm.org
+
+Permission is hereby granted, free of charge, to any person obtaining a copy of
+this software and associated documentation files (the "Software"), to deal with
+the Software without restriction, including without limitation the rights to
+use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies
+of the Software, and to permit persons to whom the Software is furnished to do
+so, subject to the following conditions:
+
+ * Redistributions of source code must retain the above copyright notice,
+ this list of conditions and the following disclaimers.
+
+ * Redistributions in binary form must reproduce the above copyright notice,
+ this list of conditions and the following disclaimers in the
+ documentation and/or other materials provided with the distribution.
+
+ * Neither the names of the LLVM Team, University of Illinois at
+ Urbana-Champaign, nor the names of its contributors may be used to
+ endorse or promote products derived from this Software without specific
+ prior written permission.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
+FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+CONTRIBUTORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS WITH THE
+SOFTWARE.
+
+==============================================================================
+Copyrights and Licenses for Third Party Software Distributed with LLVM:
+==============================================================================
+The LLVM software contains code written by third parties. Such software will
+have its own individual LICENSE.TXT file in the directory in which it appears.
+This file will describe the copyrights, license, and restrictions which apply
+to that code.
+
+The disclaimer of warranty in the University of Illinois Open Source License
+applies to all code in the LLVM Distribution, and nothing in any of the
+other licenses gives permission to use the names of the LLVM Team or the
+University of Illinois to endorse or promote products derived from this
+Software.
+
+The following pieces of software have additional or alternate copyrights,
+licenses, and/or restrictions:
+
+Program Directory
+------- ---------
+Google Test llvm/utils/unittest/googletest
+OpenBSD regex llvm/lib/Support/{reg*, COPYRIGHT.regex}
+pyyaml tests llvm/test/YAMLParser/{*.data, LICENSE.TXT}
+ARM contributions llvm/lib/Target/ARM/LICENSE.TXT
+md5 contributions llvm/lib/Support/MD5.cpp llvm/include/llvm/Support/MD5.h
+
+--------------------------------------------------------------------------------
+
+3rdparty dependency gRPC is statically linked in certain binary
+distributions, like the python wheels. gRPC has the following license:
+
+Copyright 2014 gRPC authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+--------------------------------------------------------------------------------
+
+3rdparty dependency Apache Thrift is statically linked in certain binary
+distributions, like the python wheels. Apache Thrift has the following license:
+
+Apache Thrift
+Copyright (C) 2006 - 2019, The Apache Software Foundation
+
+This product includes software developed at
+The Apache Software Foundation (http://www.apache.org/).
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+--------------------------------------------------------------------------------
+
+3rdparty dependency Apache ORC is statically linked in certain binary
+distributions, like the python wheels. Apache ORC has the following license:
+
+Apache ORC
+Copyright 2013-2019 The Apache Software Foundation
+
+This product includes software developed by The Apache Software
+Foundation (http://www.apache.org/).
+
+This product includes software developed by Hewlett-Packard:
+(c) Copyright [2014-2015] Hewlett-Packard Development Company, L.P
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+--------------------------------------------------------------------------------
+
+3rdparty dependency zstd is statically linked in certain binary
+distributions, like the python wheels. ZSTD has the following license:
+
+BSD License
+
+For Zstandard software
+
+Copyright (c) 2016-present, Facebook, Inc. All rights reserved.
+
+Redistribution and use in source and binary forms, with or without modification,
+are permitted provided that the following conditions are met:
+
+ * Redistributions of source code must retain the above copyright notice, this
+ list of conditions and the following disclaimer.
+
+ * Redistributions in binary form must reproduce the above copyright notice,
+ this list of conditions and the following disclaimer in the documentation
+ and/or other materials provided with the distribution.
+
+ * Neither the name Facebook nor the names of its contributors may be used to
+ endorse or promote products derived from this software without specific
+ prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
+ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR
+ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
+ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+--------------------------------------------------------------------------------
+
+3rdparty dependency lz4 is statically linked in certain binary
+distributions, like the python wheels. lz4 has the following license:
+
+LZ4 Library
+Copyright (c) 2011-2016, Yann Collet
+All rights reserved.
+
+Redistribution and use in source and binary forms, with or without modification,
+are permitted provided that the following conditions are met:
+
+* Redistributions of source code must retain the above copyright notice, this
+ list of conditions and the following disclaimer.
+
+* Redistributions in binary form must reproduce the above copyright notice, this
+ list of conditions and the following disclaimer in the documentation and/or
+ other materials provided with the distribution.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
+ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR
+ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
+ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+--------------------------------------------------------------------------------
+
+3rdparty dependency Brotli is statically linked in certain binary
+distributions, like the python wheels. Brotli has the following license:
+
+Copyright (c) 2009, 2010, 2013-2016 by the Brotli Authors.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+THE SOFTWARE.
+
+--------------------------------------------------------------------------------
+
+3rdparty dependency snappy is statically linked in certain binary
+distributions, like the python wheels. snappy has the following license:
+
+Copyright 2011, Google Inc.
+All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are
+met:
+
+ * Redistributions of source code must retain the above copyright notice,
+ this list of conditions and the following disclaimer.
+ * Redistributions in binary form must reproduce the above copyright notice,
+ this list of conditions and the following disclaimer in the documentation
+ and/or other materials provided with the distribution.
+ * Neither the name of Google Inc. nor the names of its contributors may be
+ used to endorse or promote products derived from this software without
+ specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+===
+
+Some of the benchmark data in testdata/ is licensed differently:
+
+ - fireworks.jpeg is Copyright 2013 Steinar H. Gunderson, and
+ is licensed under the Creative Commons Attribution 3.0 license
+ (CC-BY-3.0). See https://creativecommons.org/licenses/by/3.0/
+ for more information.
+
+ - kppkn.gtb is taken from the Gaviota chess tablebase set, and
+ is licensed under the MIT License. See
+ https://sites.google.com/site/gaviotachessengine/Home/endgame-tablebases-1
+ for more information.
+
+ - paper-100k.pdf is an excerpt (bytes 92160 to 194560) from the paper
+ “Combinatorial Modeling of Chromatin Features Quantitatively Predicts DNA
+ Replication Timing in _Drosophila_” by Federico Comoglio and Renato Paro,
+ which is licensed under the CC-BY license. See
+ http://www.ploscompbiol.org/static/license for more ifnormation.
+
+ - alice29.txt, asyoulik.txt, plrabn12.txt and lcet10.txt are from Project
+ Gutenberg. The first three have expired copyrights and are in the public
+ domain; the latter does not have expired copyright, but is still in the
+ public domain according to the license information
+ (http://www.gutenberg.org/ebooks/53).
+
+--------------------------------------------------------------------------------
+
+3rdparty dependency gflags is statically linked in certain binary
+distributions, like the python wheels. gflags has the following license:
+
+Copyright (c) 2006, Google Inc.
+All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are
+met:
+
+ * Redistributions of source code must retain the above copyright
+notice, this list of conditions and the following disclaimer.
+ * Redistributions in binary form must reproduce the above
+copyright notice, this list of conditions and the following disclaimer
+in the documentation and/or other materials provided with the
+distribution.
+ * Neither the name of Google Inc. nor the names of its
+contributors may be used to endorse or promote products derived from
+this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+--------------------------------------------------------------------------------
+
+3rdparty dependency glog is statically linked in certain binary
+distributions, like the python wheels. glog has the following license:
+
+Copyright (c) 2008, Google Inc.
+All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are
+met:
+
+ * Redistributions of source code must retain the above copyright
+notice, this list of conditions and the following disclaimer.
+ * Redistributions in binary form must reproduce the above
+copyright notice, this list of conditions and the following disclaimer
+in the documentation and/or other materials provided with the
+distribution.
+ * Neither the name of Google Inc. nor the names of its
+contributors may be used to endorse or promote products derived from
+this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+
+A function gettimeofday in utilities.cc is based on
+
+http://www.google.com/codesearch/p?hl=en#dR3YEbitojA/COPYING&q=GetSystemTimeAsFileTime%20license:bsd
+
+The license of this code is:
+
+Copyright (c) 2003-2008, Jouni Malinen <j@w1.fi> and contributors
+All Rights Reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are
+met:
+
+1. Redistributions of source code must retain the above copyright
+ notice, this list of conditions and the following disclaimer.
+
+2. Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in the
+ documentation and/or other materials provided with the distribution.
+
+3. Neither the name(s) of the above-listed copyright holder(s) nor the
+ names of its contributors may be used to endorse or promote products
+ derived from this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+--------------------------------------------------------------------------------
+
+3rdparty dependency re2 is statically linked in certain binary
+distributions, like the python wheels. re2 has the following license:
+
+Copyright (c) 2009 The RE2 Authors. All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are
+met:
+
+ * Redistributions of source code must retain the above copyright
+ notice, this list of conditions and the following disclaimer.
+ * Redistributions in binary form must reproduce the above
+ copyright notice, this list of conditions and the following
+ disclaimer in the documentation and/or other materials provided
+ with the distribution.
+ * Neither the name of Google Inc. nor the names of its contributors
+ may be used to endorse or promote products derived from this
+ software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+--------------------------------------------------------------------------------
+
+3rdparty dependency c-ares is statically linked in certain binary
+distributions, like the python wheels. c-ares has the following license:
+
+# c-ares license
+
+Copyright (c) 2007 - 2018, Daniel Stenberg with many contributors, see AUTHORS
+file.
+
+Copyright 1998 by the Massachusetts Institute of Technology.
+
+Permission to use, copy, modify, and distribute this software and its
+documentation for any purpose and without fee is hereby granted, provided that
+the above copyright notice appear in all copies and that both that copyright
+notice and this permission notice appear in supporting documentation, and that
+the name of M.I.T. not be used in advertising or publicity pertaining to
+distribution of the software without specific, written prior permission.
+M.I.T. makes no representations about the suitability of this software for any
+purpose. It is provided "as is" without express or implied warranty.
+
+--------------------------------------------------------------------------------
+
+3rdparty dependency zlib is redistributed as a dynamically linked shared
+library in certain binary distributions, like the python wheels. In the future
+this will likely change to static linkage. zlib has the following license:
+
+zlib.h -- interface of the 'zlib' general purpose compression library
+ version 1.2.11, January 15th, 2017
+
+ Copyright (C) 1995-2017 Jean-loup Gailly and Mark Adler
+
+ This software is provided 'as-is', without any express or implied
+ warranty. In no event will the authors be held liable for any damages
+ arising from the use of this software.
+
+ Permission is granted to anyone to use this software for any purpose,
+ including commercial applications, and to alter it and redistribute it
+ freely, subject to the following restrictions:
+
+ 1. The origin of this software must not be misrepresented; you must not
+ claim that you wrote the original software. If you use this software
+ in a product, an acknowledgment in the product documentation would be
+ appreciated but is not required.
+ 2. Altered source versions must be plainly marked as such, and must not be
+ misrepresented as being the original software.
+ 3. This notice may not be removed or altered from any source distribution.
+
+ Jean-loup Gailly Mark Adler
+ jloup@gzip.org madler@alumni.caltech.edu
+
+--------------------------------------------------------------------------------
+
+3rdparty dependency openssl is redistributed as a dynamically linked shared
+library in certain binary distributions, like the python wheels. openssl
+preceding version 3 has the following license:
+
+ LICENSE ISSUES
+ ==============
+
+ The OpenSSL toolkit stays under a double license, i.e. both the conditions of
+ the OpenSSL License and the original SSLeay license apply to the toolkit.
+ See below for the actual license texts.
+
+ OpenSSL License
+ ---------------
+
+/* ====================================================================
+ * Copyright (c) 1998-2019 The OpenSSL Project. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ *
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ *
+ * 3. All advertising materials mentioning features or use of this
+ * software must display the following acknowledgment:
+ * "This product includes software developed by the OpenSSL Project
+ * for use in the OpenSSL Toolkit. (http://www.openssl.org/)"
+ *
+ * 4. The names "OpenSSL Toolkit" and "OpenSSL Project" must not be used to
+ * endorse or promote products derived from this software without
+ * prior written permission. For written permission, please contact
+ * openssl-core@openssl.org.
+ *
+ * 5. Products derived from this software may not be called "OpenSSL"
+ * nor may "OpenSSL" appear in their names without prior written
+ * permission of the OpenSSL Project.
+ *
+ * 6. Redistributions of any form whatsoever must retain the following
+ * acknowledgment:
+ * "This product includes software developed by the OpenSSL Project
+ * for use in the OpenSSL Toolkit (http://www.openssl.org/)"
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE OpenSSL PROJECT ``AS IS'' AND ANY
+ * EXPRESSED OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE OpenSSL PROJECT OR
+ * ITS CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
+ * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED
+ * OF THE POSSIBILITY OF SUCH DAMAGE.
+ * ====================================================================
+ *
+ * This product includes cryptographic software written by Eric Young
+ * (eay@cryptsoft.com). This product includes software written by Tim
+ * Hudson (tjh@cryptsoft.com).
+ *
+ */
+
+ Original SSLeay License
+ -----------------------
+
+/* Copyright (C) 1995-1998 Eric Young (eay@cryptsoft.com)
+ * All rights reserved.
+ *
+ * This package is an SSL implementation written
+ * by Eric Young (eay@cryptsoft.com).
+ * The implementation was written so as to conform with Netscapes SSL.
+ *
+ * This library is free for commercial and non-commercial use as long as
+ * the following conditions are aheared to. The following conditions
+ * apply to all code found in this distribution, be it the RC4, RSA,
+ * lhash, DES, etc., code; not just the SSL code. The SSL documentation
+ * included with this distribution is covered by the same copyright terms
+ * except that the holder is Tim Hudson (tjh@cryptsoft.com).
+ *
+ * Copyright remains Eric Young's, and as such any Copyright notices in
+ * the code are not to be removed.
+ * If this package is used in a product, Eric Young should be given attribution
+ * as the author of the parts of the library used.
+ * This can be in the form of a textual message at program startup or
+ * in documentation (online or textual) provided with the package.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. All advertising materials mentioning features or use of this software
+ * must display the following acknowledgement:
+ * "This product includes cryptographic software written by
+ * Eric Young (eay@cryptsoft.com)"
+ * The word 'cryptographic' can be left out if the rouines from the library
+ * being used are not cryptographic related :-).
+ * 4. If you include any Windows specific code (or a derivative thereof) from
+ * the apps directory (application code) you must include an acknowledgement:
+ * "This product includes software written by Tim Hudson (tjh@cryptsoft.com)"
+ *
+ * THIS SOFTWARE IS PROVIDED BY ERIC YOUNG ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * The licence and distribution terms for any publically available version or
+ * derivative of this code cannot be changed. i.e. this code cannot simply be
+ * copied and put under another distribution licence
+ * [including the GNU Public Licence.]
+ */
+
+--------------------------------------------------------------------------------
+
+This project includes code from the rtools-backports project.
+
+* ci/scripts/PKGBUILD and ci/scripts/r_windows_build.sh are based on code
+ from the rtools-backports project.
+
+Copyright: Copyright (c) 2013 - 2019, Алексей and Jeroen Ooms.
+All rights reserved.
+Homepage: https://github.com/r-windows/rtools-backports
+License: 3-clause BSD
+
+--------------------------------------------------------------------------------
+
+Some code from pandas has been adapted for the pyarrow codebase. pandas is
+available under the 3-clause BSD license, which follows:
+
+pandas license
+==============
+
+Copyright (c) 2011-2012, Lambda Foundry, Inc. and PyData Development Team
+All rights reserved.
+
+Copyright (c) 2008-2011 AQR Capital Management, LLC
+All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are
+met:
+
+ * Redistributions of source code must retain the above copyright
+ notice, this list of conditions and the following disclaimer.
+
+ * Redistributions in binary form must reproduce the above
+ copyright notice, this list of conditions and the following
+ disclaimer in the documentation and/or other materials provided
+ with the distribution.
+
+ * Neither the name of the copyright holder nor the names of any
+ contributors may be used to endorse or promote products derived
+ from this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDER AND CONTRIBUTORS
+"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+--------------------------------------------------------------------------------
+
+Some bits from DyND, in particular aspects of the build system, have been
+adapted from libdynd and dynd-python under the terms of the BSD 2-clause
+license
+
+The BSD 2-Clause License
+
+ Copyright (C) 2011-12, Dynamic NDArray Developers
+ All rights reserved.
+
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions are
+ met:
+
+ * Redistributions of source code must retain the above copyright
+ notice, this list of conditions and the following disclaimer.
+
+ * Redistributions in binary form must reproduce the above
+ copyright notice, this list of conditions and the following
+ disclaimer in the documentation and/or other materials provided
+ with the distribution.
+
+ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+Dynamic NDArray Developers list:
+
+ * Mark Wiebe
+ * Continuum Analytics
+
+--------------------------------------------------------------------------------
+
+Some source code from Ibis (https://github.com/cloudera/ibis) has been adapted
+for PyArrow. Ibis is released under the Apache License, Version 2.0.
+
+--------------------------------------------------------------------------------
+
+dev/tasks/homebrew-formulae/apache-arrow.rb has the following license:
+
+BSD 2-Clause License
+
+Copyright (c) 2009-present, Homebrew contributors
+All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are met:
+
+* Redistributions of source code must retain the above copyright notice, this
+ list of conditions and the following disclaimer.
+
+* Redistributions in binary form must reproduce the above copyright notice,
+ this list of conditions and the following disclaimer in the documentation
+ and/or other materials provided with the distribution.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
+FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
+OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+----------------------------------------------------------------------
+
+cpp/src/arrow/vendored/base64.cpp has the following license
+
+ZLIB License
+
+Copyright (C) 2004-2017 René Nyffenegger
+
+This source code is provided 'as-is', without any express or implied
+warranty. In no event will the author be held liable for any damages arising
+from the use of this software.
+
+Permission is granted to anyone to use this software for any purpose, including
+commercial applications, and to alter it and redistribute it freely, subject to
+the following restrictions:
+
+1. The origin of this source code must not be misrepresented; you must not
+ claim that you wrote the original source code. If you use this source code
+ in a product, an acknowledgment in the product documentation would be
+ appreciated but is not required.
+
+2. Altered source versions must be plainly marked as such, and must not be
+ misrepresented as being the original source code.
+
+3. This notice may not be removed or altered from any source distribution.
+
+René Nyffenegger rene.nyffenegger@adp-gmbh.ch
+
+--------------------------------------------------------------------------------
+
+The file cpp/src/arrow/vendored/optional.hpp has the following license
+
+Boost Software License - Version 1.0 - August 17th, 2003
+
+Permission is hereby granted, free of charge, to any person or organization
+obtaining a copy of the software and accompanying documentation covered by
+this license (the "Software") to use, reproduce, display, distribute,
+execute, and transmit the Software, and to prepare derivative works of the
+Software, and to permit third-parties to whom the Software is furnished to
+do so, all subject to the following:
+
+The copyright notices in the Software and this entire statement, including
+the above license grant, this restriction and the following disclaimer,
+must be included in all copies of the Software, in whole or in part, and
+all derivative works of the Software, unless such copies or derivative
+works are solely in the form of machine-executable object code generated by
+a source language processor.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE, TITLE AND NON-INFRINGEMENT. IN NO EVENT
+SHALL THE COPYRIGHT HOLDERS OR ANYONE DISTRIBUTING THE SOFTWARE BE LIABLE
+FOR ANY DAMAGES OR OTHER LIABILITY, WHETHER IN CONTRACT, TORT OR OTHERWISE,
+ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+DEALINGS IN THE SOFTWARE.
+
+--------------------------------------------------------------------------------
+
+The file cpp/src/arrow/vendored/musl/strptime.c has the following license
+
+Copyright © 2005-2020 Rich Felker, et al.
+
+Permission is hereby granted, free of charge, to any person obtaining
+a copy of this software and associated documentation files (the
+"Software"), to deal in the Software without restriction, including
+without limitation the rights to use, copy, modify, merge, publish,
+distribute, sublicense, and/or sell copies of the Software, and to
+permit persons to whom the Software is furnished to do so, subject to
+the following conditions:
+
+The above copyright notice and this permission notice shall be
+included in all copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
+CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
diff --git a/vendor/github.com/apache/arrow/go/v14/arrow/.editorconfig b/vendor/github.com/apache/arrow/go/v14/arrow/.editorconfig
new file mode 100644
index 000000000..a7ceaf938
--- /dev/null
+++ b/vendor/github.com/apache/arrow/go/v14/arrow/.editorconfig
@@ -0,0 +1,21 @@
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements. See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership. The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+root = true
+
+[*.tmpl]
+indent_style = tab
+indent_size = 4 \ No newline at end of file
diff --git a/vendor/github.com/apache/arrow/go/v14/arrow/.gitignore b/vendor/github.com/apache/arrow/go/v14/arrow/.gitignore
new file mode 100644
index 000000000..d4b831ae8
--- /dev/null
+++ b/vendor/github.com/apache/arrow/go/v14/arrow/.gitignore
@@ -0,0 +1,35 @@
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements. See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership. The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+### Go template
+# Binaries for programs and plugins
+*.exe
+*.dll
+*.so
+*.dylib
+*.o
+
+# Test binary, build with `go test -c`
+*.test
+
+# Output of the go coverage tool, specifically when used with LiteIDE
+*.out
+
+# Project-local glide cache, RE: https://github.com/Masterminds/glide/issues/736
+.glide/
+
+bin/
+vendor/ \ No newline at end of file
diff --git a/vendor/github.com/apache/arrow/go/v14/arrow/Gopkg.lock b/vendor/github.com/apache/arrow/go/v14/arrow/Gopkg.lock
new file mode 100644
index 000000000..143e4f93b
--- /dev/null
+++ b/vendor/github.com/apache/arrow/go/v14/arrow/Gopkg.lock
@@ -0,0 +1,44 @@
+# This file is autogenerated, do not edit; changes may be undone by the next 'dep ensure'.
+
+
+[[projects]]
+ digest = "1:56c130d885a4aacae1dd9c7b71cfe39912c7ebc1ff7d2b46083c8812996dc43b"
+ name = "github.com/davecgh/go-spew"
+ packages = ["spew"]
+ pruneopts = ""
+ revision = "346938d642f2ec3594ed81d874461961cd0faa76"
+ version = "v1.1.0"
+
+[[projects]]
+ digest = "1:1d7e1867c49a6dd9856598ef7c3123604ea3daabf5b83f303ff457bcbc410b1d"
+ name = "github.com/pkg/errors"
+ packages = ["."]
+ pruneopts = ""
+ revision = "ba968bfe8b2f7e042a574c888954fccecfa385b4"
+ version = "v0.8.1"
+
+[[projects]]
+ digest = "1:256484dbbcd271f9ecebc6795b2df8cad4c458dd0f5fd82a8c2fa0c29f233411"
+ name = "github.com/pmezard/go-difflib"
+ packages = ["difflib"]
+ pruneopts = ""
+ revision = "792786c7400a136282c1664665ae0a8db921c6c2"
+ version = "v1.0.0"
+
+[[projects]]
+ digest = "1:2d0dc026c4aef5e2f3a0e06a4dabe268b840d8f63190cf6894e02134a03f52c5"
+ name = "github.com/stretchr/testify"
+ packages = ["assert"]
+ pruneopts = ""
+ revision = "b91bfb9ebec76498946beb6af7c0230c7cc7ba6c"
+ version = "v1.2.0"
+
+[solve-meta]
+ analyzer-name = "dep"
+ analyzer-version = 1
+ input-imports = [
+ "github.com/pkg/errors",
+ "github.com/stretchr/testify/assert",
+ ]
+ solver-name = "gps-cdcl"
+ solver-version = 1
diff --git a/vendor/github.com/apache/arrow/go/v14/arrow/Gopkg.toml b/vendor/github.com/apache/arrow/go/v14/arrow/Gopkg.toml
new file mode 100644
index 000000000..b27807d69
--- /dev/null
+++ b/vendor/github.com/apache/arrow/go/v14/arrow/Gopkg.toml
@@ -0,0 +1,23 @@
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements. See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership. The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+[[constraint]]
+ name = "github.com/stretchr/testify"
+ version = "1.2.0"
+
+[[constraint]]
+ name = "github.com/pkg/errors"
+ version = "0.8.1" \ No newline at end of file
diff --git a/vendor/github.com/apache/arrow/go/v14/arrow/Makefile b/vendor/github.com/apache/arrow/go/v14/arrow/Makefile
new file mode 100644
index 000000000..9c4a23262
--- /dev/null
+++ b/vendor/github.com/apache/arrow/go/v14/arrow/Makefile
@@ -0,0 +1,54 @@
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements. See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership. The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+GO_BUILD=go build
+GO_GEN=go generate
+GO_TEST?=go test
+GOPATH=$(realpath ../../../../../..)
+
+GO_SOURCES := $(shell find . -path ./_lib -prune -o -name '*.go' -not -name '*_test.go')
+ALL_SOURCES := $(shell find . -path ./_lib -prune -o -name '*.go' -name '*.s' -not -name '*_test.go')
+SOURCES_NO_VENDOR := $(shell find . -path ./vendor -prune -o -name "*.go" -not -name '*_test.go' -print)
+
+.PHONEY: test bench assembly generate
+
+assembly:
+ @$(MAKE) -C memory assembly
+ @$(MAKE) -C math assembly
+
+generate: bin/tmpl
+ bin/tmpl -i -data=numeric.tmpldata type_traits_numeric.gen.go.tmpl type_traits_numeric.gen_test.go.tmpl array/numeric.gen.go.tmpl array/numericbuilder.gen_test.go.tmpl array/numericbuilder.gen.go.tmpl array/bufferbuilder_numeric.gen.go.tmpl
+ bin/tmpl -i -data=datatype_numeric.gen.go.tmpldata datatype_numeric.gen.go.tmpl
+ @$(MAKE) -C math generate
+
+fmt: $(SOURCES_NO_VENDOR)
+ goimports -w $^
+
+bench: $(GO_SOURCES) | assembly
+ $(GO_TEST) $(GO_TEST_ARGS) -bench=. -run=- ./...
+
+bench-noasm: $(GO_SOURCES)
+ $(GO_TEST) $(GO_TEST_ARGS) -tags='noasm' -bench=. -run=- ./...
+
+test: $(GO_SOURCES) | assembly
+ $(GO_TEST) $(GO_TEST_ARGS) ./...
+
+test-noasm: $(GO_SOURCES)
+ $(GO_TEST) $(GO_TEST_ARGS) -tags='noasm' ./...
+
+bin/tmpl: _tools/tmpl/main.go
+ $(GO_BUILD) -o $@ ./_tools/tmpl
+
diff --git a/vendor/github.com/apache/arrow/go/v14/arrow/array.go b/vendor/github.com/apache/arrow/go/v14/arrow/array.go
new file mode 100644
index 000000000..7622e7503
--- /dev/null
+++ b/vendor/github.com/apache/arrow/go/v14/arrow/array.go
@@ -0,0 +1,127 @@
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements. See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership. The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License. You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package arrow
+
+import (
+ "fmt"
+
+ "github.com/apache/arrow/go/v14/arrow/memory"
+ "github.com/apache/arrow/go/v14/internal/json"
+)
+
+// ArrayData is the underlying memory and metadata of an Arrow array, corresponding
+// to the same-named object in the C++ implementation.
+//
+// The Array interface and subsequent typed objects provide strongly typed
+// accessors which support marshalling and other patterns to the data.
+// This interface allows direct access to the underlying raw byte buffers
+// which allows for manipulating the internal data and casting. For example,
+// one could cast the raw bytes from int64 to float64 like so:
+//
+// arrdata := GetMyInt64Data().Data()
+// newdata := array.NewData(arrow.PrimitiveTypes.Float64, arrdata.Len(),
+// arrdata.Buffers(), nil, arrdata.NullN(), arrdata.Offset())
+// defer newdata.Release()
+// float64arr := array.NewFloat64Data(newdata)
+// defer float64arr.Release()
+//
+// This is also useful in an analytics setting where memory may be reused. For
+// example, if we had a group of operations all returning float64 such as:
+//
+// Log(Sqrt(Expr(arr)))
+//
+// The low-level implementations could have signatures such as:
+//
+// func Log(values arrow.ArrayData) arrow.ArrayData
+//
+// Another example would be a function that consumes one or more memory buffers
+// in an input array and replaces them with newly-allocated data, changing the
+// output data type as well.
+type ArrayData interface {
+ // Retain increases the reference count by 1, it is safe to call
+ // in multiple goroutines simultaneously.
+ Retain()
+ // Release decreases the reference count by 1, it is safe to call
+ // in multiple goroutines simultaneously. Data is removed when reference
+ // count is 0.
+ Release()
+ // DataType returns the current datatype stored in the object.
+ DataType() DataType
+ // NullN returns the number of nulls for this data instance.
+ NullN() int
+ // Len returns the length of this data instance
+ Len() int
+ // Offset returns the offset into the raw buffers where this data begins
+ Offset() int
+ // Buffers returns the slice of raw data buffers for this data instance. Their
+ // meaning depends on the context of the data type.
+ Buffers() []*memory.Buffer
+ // Children returns the slice of children data instances, only relevant for
+ // nested data types. For instance, List data will have a single child containing
+ // elements of all the rows and Struct data will contain numfields children which
+ // are the arrays for each field of the struct.
+ Children() []ArrayData
+ // Reset allows reusing this ArrayData object by replacing the data in this ArrayData
+ // object without changing the reference count.
+ Reset(newtype DataType, newlength int, newbuffers []*memory.Buffer, newchildren []ArrayData, newnulls int, newoffset int)
+ // Dictionary returns the ArrayData object for the dictionary if this is a
+ // dictionary array, otherwise it will be nil.
+ Dictionary() ArrayData
+}
+
+// Array represents an immutable sequence of values using the Arrow in-memory format.
+type Array interface {
+ json.Marshaler
+
+ fmt.Stringer
+
+ // DataType returns the type metadata for this instance.
+ DataType() DataType
+
+ // NullN returns the number of null values in the array.
+ NullN() int
+
+ // NullBitmapBytes returns a byte slice of the validity bitmap.
+ NullBitmapBytes() []byte
+
+ // IsNull returns true if value at index is null.
+ // NOTE: IsNull will panic if NullBitmapBytes is not empty and 0 > i ≥ Len.
+ IsNull(i int) bool
+
+ // IsValid returns true if value at index is not null.
+ // NOTE: IsValid will panic if NullBitmapBytes is not empty and 0 > i ≥ Len.
+ IsValid(i int) bool
+ // ValueStr returns the value at index as a string.
+ ValueStr(i int) string
+
+ // Get single value to be marshalled with `json.Marshal`
+ GetOneForMarshal(i int) interface{}
+
+ Data() ArrayData
+
+ // Len returns the number of elements in the array.
+ Len() int
+
+ // Retain increases the reference count by 1.
+ // Retain may be called simultaneously from multiple goroutines.
+ Retain()
+
+ // Release decreases the reference count by 1.
+ // Release may be called simultaneously from multiple goroutines.
+ // When the reference count goes to zero, the memory is freed.
+ Release()
+}
diff --git a/vendor/github.com/apache/arrow/go/v14/arrow/array/array.go b/vendor/github.com/apache/arrow/go/v14/arrow/array/array.go
new file mode 100644
index 000000000..1ee04c7aa
--- /dev/null
+++ b/vendor/github.com/apache/arrow/go/v14/arrow/array/array.go
@@ -0,0 +1,185 @@
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements. See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership. The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License. You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package array
+
+import (
+ "sync/atomic"
+
+ "github.com/apache/arrow/go/v14/arrow"
+ "github.com/apache/arrow/go/v14/arrow/bitutil"
+ "github.com/apache/arrow/go/v14/arrow/internal/debug"
+)
+
+const (
+ // UnknownNullCount specifies the NullN should be calculated from the null bitmap buffer.
+ UnknownNullCount = -1
+
+ // NullValueStr represents a null value in arrow.Array.ValueStr and in Builder.AppendValueFromString.
+ // It should be returned from the arrow.Array.ValueStr implementations.
+ // Using it as the value in Builder.AppendValueFromString should be equivalent to Builder.AppendNull.
+ NullValueStr = "(null)"
+)
+
+type array struct {
+ refCount int64
+ data *Data
+ nullBitmapBytes []byte
+}
+
+// Retain increases the reference count by 1.
+// Retain may be called simultaneously from multiple goroutines.
+func (a *array) Retain() {
+ atomic.AddInt64(&a.refCount, 1)
+}
+
+// Release decreases the reference count by 1.
+// Release may be called simultaneously from multiple goroutines.
+// When the reference count goes to zero, the memory is freed.
+func (a *array) Release() {
+ debug.Assert(atomic.LoadInt64(&a.refCount) > 0, "too many releases")
+
+ if atomic.AddInt64(&a.refCount, -1) == 0 {
+ a.data.Release()
+ a.data, a.nullBitmapBytes = nil, nil
+ }
+}
+
+// DataType returns the type metadata for this instance.
+func (a *array) DataType() arrow.DataType { return a.data.dtype }
+
+// NullN returns the number of null values in the array.
+func (a *array) NullN() int {
+ if a.data.nulls < 0 {
+ a.data.nulls = a.data.length - bitutil.CountSetBits(a.nullBitmapBytes, a.data.offset, a.data.length)
+ }
+ return a.data.nulls
+}
+
+// NullBitmapBytes returns a byte slice of the validity bitmap.
+func (a *array) NullBitmapBytes() []byte { return a.nullBitmapBytes }
+
+func (a *array) Data() arrow.ArrayData { return a.data }
+
+// Len returns the number of elements in the array.
+func (a *array) Len() int { return a.data.length }
+
+// IsNull returns true if value at index is null.
+// NOTE: IsNull will panic if NullBitmapBytes is not empty and 0 > i ≥ Len.
+func (a *array) IsNull(i int) bool {
+ return len(a.nullBitmapBytes) != 0 && bitutil.BitIsNotSet(a.nullBitmapBytes, a.data.offset+i)
+}
+
+// IsValid returns true if value at index is not null.
+// NOTE: IsValid will panic if NullBitmapBytes is not empty and 0 > i ≥ Len.
+func (a *array) IsValid(i int) bool {
+ return len(a.nullBitmapBytes) == 0 || bitutil.BitIsSet(a.nullBitmapBytes, a.data.offset+i)
+}
+
+func (a *array) setData(data *Data) {
+ // Retain before releasing in case a.data is the same as data.
+ data.Retain()
+
+ if a.data != nil {
+ a.data.Release()
+ }
+
+ if len(data.buffers) > 0 && data.buffers[0] != nil {
+ a.nullBitmapBytes = data.buffers[0].Bytes()
+ }
+ a.data = data
+}
+
+func (a *array) Offset() int {
+ return a.data.Offset()
+}
+
+type arrayConstructorFn func(arrow.ArrayData) arrow.Array
+
+var (
+ makeArrayFn [64]arrayConstructorFn
+)
+
+func invalidDataType(data arrow.ArrayData) arrow.Array {
+ panic("invalid data type: " + data.DataType().ID().String())
+}
+
+// MakeFromData constructs a strongly-typed array instance from generic Data.
+func MakeFromData(data arrow.ArrayData) arrow.Array {
+ return makeArrayFn[byte(data.DataType().ID()&0x3f)](data)
+}
+
+// NewSlice constructs a zero-copy slice of the array with the indicated
+// indices i and j, corresponding to array[i:j].
+// The returned array must be Release()'d after use.
+//
+// NewSlice panics if the slice is outside the valid range of the input array.
+// NewSlice panics if j < i.
+func NewSlice(arr arrow.Array, i, j int64) arrow.Array {
+ data := NewSliceData(arr.Data(), i, j)
+ slice := MakeFromData(data)
+ data.Release()
+ return slice
+}
+
+func init() {
+ makeArrayFn = [...]arrayConstructorFn{
+ arrow.NULL: func(data arrow.ArrayData) arrow.Array { return NewNullData(data) },
+ arrow.BOOL: func(data arrow.ArrayData) arrow.Array { return NewBooleanData(data) },
+ arrow.UINT8: func(data arrow.ArrayData) arrow.Array { return NewUint8Data(data) },
+ arrow.INT8: func(data arrow.ArrayData) arrow.Array { return NewInt8Data(data) },
+ arrow.UINT16: func(data arrow.ArrayData) arrow.Array { return NewUint16Data(data) },
+ arrow.INT16: func(data arrow.ArrayData) arrow.Array { return NewInt16Data(data) },
+ arrow.UINT32: func(data arrow.ArrayData) arrow.Array { return NewUint32Data(data) },
+ arrow.INT32: func(data arrow.ArrayData) arrow.Array { return NewInt32Data(data) },
+ arrow.UINT64: func(data arrow.ArrayData) arrow.Array { return NewUint64Data(data) },
+ arrow.INT64: func(data arrow.ArrayData) arrow.Array { return NewInt64Data(data) },
+ arrow.FLOAT16: func(data arrow.ArrayData) arrow.Array { return NewFloat16Data(data) },
+ arrow.FLOAT32: func(data arrow.ArrayData) arrow.Array { return NewFloat32Data(data) },
+ arrow.FLOAT64: func(data arrow.ArrayData) arrow.Array { return NewFloat64Data(data) },
+ arrow.STRING: func(data arrow.ArrayData) arrow.Array { return NewStringData(data) },
+ arrow.BINARY: func(data arrow.ArrayData) arrow.Array { return NewBinaryData(data) },
+ arrow.FIXED_SIZE_BINARY: func(data arrow.ArrayData) arrow.Array { return NewFixedSizeBinaryData(data) },
+ arrow.DATE32: func(data arrow.ArrayData) arrow.Array { return NewDate32Data(data) },
+ arrow.DATE64: func(data arrow.ArrayData) arrow.Array { return NewDate64Data(data) },
+ arrow.TIMESTAMP: func(data arrow.ArrayData) arrow.Array { return NewTimestampData(data) },
+ arrow.TIME32: func(data arrow.ArrayData) arrow.Array { return NewTime32Data(data) },
+ arrow.TIME64: func(data arrow.ArrayData) arrow.Array { return NewTime64Data(data) },
+ arrow.INTERVAL_MONTHS: func(data arrow.ArrayData) arrow.Array { return NewMonthIntervalData(data) },
+ arrow.INTERVAL_DAY_TIME: func(data arrow.ArrayData) arrow.Array { return NewDayTimeIntervalData(data) },
+ arrow.DECIMAL128: func(data arrow.ArrayData) arrow.Array { return NewDecimal128Data(data) },
+ arrow.DECIMAL256: func(data arrow.ArrayData) arrow.Array { return NewDecimal256Data(data) },
+ arrow.LIST: func(data arrow.ArrayData) arrow.Array { return NewListData(data) },
+ arrow.STRUCT: func(data arrow.ArrayData) arrow.Array { return NewStructData(data) },
+ arrow.SPARSE_UNION: func(data arrow.ArrayData) arrow.Array { return NewSparseUnionData(data) },
+ arrow.DENSE_UNION: func(data arrow.ArrayData) arrow.Array { return NewDenseUnionData(data) },
+ arrow.DICTIONARY: func(data arrow.ArrayData) arrow.Array { return NewDictionaryData(data) },
+ arrow.MAP: func(data arrow.ArrayData) arrow.Array { return NewMapData(data) },
+ arrow.EXTENSION: func(data arrow.ArrayData) arrow.Array { return NewExtensionData(data) },
+ arrow.FIXED_SIZE_LIST: func(data arrow.ArrayData) arrow.Array { return NewFixedSizeListData(data) },
+ arrow.DURATION: func(data arrow.ArrayData) arrow.Array { return NewDurationData(data) },
+ arrow.LARGE_STRING: func(data arrow.ArrayData) arrow.Array { return NewLargeStringData(data) },
+ arrow.LARGE_BINARY: func(data arrow.ArrayData) arrow.Array { return NewLargeBinaryData(data) },
+ arrow.LARGE_LIST: func(data arrow.ArrayData) arrow.Array { return NewLargeListData(data) },
+ arrow.INTERVAL_MONTH_DAY_NANO: func(data arrow.ArrayData) arrow.Array { return NewMonthDayNanoIntervalData(data) },
+ arrow.RUN_END_ENCODED: func(data arrow.ArrayData) arrow.Array { return NewRunEndEncodedData(data) },
+ arrow.LIST_VIEW: func(data arrow.ArrayData) arrow.Array { return NewListViewData(data) },
+ arrow.LARGE_LIST_VIEW: func(data arrow.ArrayData) arrow.Array { return NewLargeListViewData(data) },
+
+ // invalid data types to fill out array to size 2^6 - 1
+ 63: invalidDataType,
+ }
+}
diff --git a/vendor/github.com/apache/arrow/go/v14/arrow/array/binary.go b/vendor/github.com/apache/arrow/go/v14/arrow/array/binary.go
new file mode 100644
index 000000000..e9e6e66e7
--- /dev/null
+++ b/vendor/github.com/apache/arrow/go/v14/arrow/array/binary.go
@@ -0,0 +1,323 @@
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements. See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership. The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License. You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package array
+
+import (
+ "bytes"
+ "encoding/base64"
+ "fmt"
+ "strings"
+ "unsafe"
+
+ "github.com/apache/arrow/go/v14/arrow"
+ "github.com/apache/arrow/go/v14/internal/json"
+)
+
+type BinaryLike interface {
+ arrow.Array
+ ValueBytes() []byte
+ ValueOffset64(int) int64
+}
+
+// A type which represents an immutable sequence of variable-length binary strings.
+type Binary struct {
+ array
+ valueOffsets []int32
+ valueBytes []byte
+}
+
+// NewBinaryData constructs a new Binary array from data.
+func NewBinaryData(data arrow.ArrayData) *Binary {
+ a := &Binary{}
+ a.refCount = 1
+ a.setData(data.(*Data))
+ return a
+}
+
+// Value returns the slice at index i. This value should not be mutated.
+func (a *Binary) Value(i int) []byte {
+ if i < 0 || i >= a.array.data.length {
+ panic("arrow/array: index out of range")
+ }
+ idx := a.array.data.offset + i
+ return a.valueBytes[a.valueOffsets[idx]:a.valueOffsets[idx+1]]
+}
+
+// ValueStr returns a copy of the base64-encoded string value or NullValueStr
+func (a *Binary) ValueStr(i int) string {
+ if a.IsNull(i) {
+ return NullValueStr
+ }
+ return base64.StdEncoding.EncodeToString(a.Value(i))
+}
+
+// ValueString returns the string at index i without performing additional allocations.
+// The string is only valid for the lifetime of the Binary array.
+func (a *Binary) ValueString(i int) string {
+ b := a.Value(i)
+ return *(*string)(unsafe.Pointer(&b))
+}
+
+func (a *Binary) ValueOffset(i int) int {
+ if i < 0 || i >= a.array.data.length {
+ panic("arrow/array: index out of range")
+ }
+ return int(a.valueOffsets[a.array.data.offset+i])
+}
+
+func (a *Binary) ValueOffset64(i int) int64 {
+ return int64(a.ValueOffset(i))
+}
+
+func (a *Binary) ValueLen(i int) int {
+ if i < 0 || i >= a.array.data.length {
+ panic("arrow/array: index out of range")
+ }
+ beg := a.array.data.offset + i
+ return int(a.valueOffsets[beg+1] - a.valueOffsets[beg])
+}
+
+func (a *Binary) ValueOffsets() []int32 {
+ beg := a.array.data.offset
+ end := beg + a.array.data.length + 1
+ return a.valueOffsets[beg:end]
+}
+
+func (a *Binary) ValueBytes() []byte {
+ beg := a.array.data.offset
+ end := beg + a.array.data.length
+ return a.valueBytes[a.valueOffsets[beg]:a.valueOffsets[end]]
+}
+
+func (a *Binary) String() string {
+ o := new(strings.Builder)
+ o.WriteString("[")
+ for i := 0; i < a.Len(); i++ {
+ if i > 0 {
+ o.WriteString(" ")
+ }
+ switch {
+ case a.IsNull(i):
+ o.WriteString(NullValueStr)
+ default:
+ fmt.Fprintf(o, "%q", a.ValueString(i))
+ }
+ }
+ o.WriteString("]")
+ return o.String()
+}
+
+func (a *Binary) setData(data *Data) {
+ if len(data.buffers) != 3 {
+ panic("len(data.buffers) != 3")
+ }
+
+ a.array.setData(data)
+
+ if valueData := data.buffers[2]; valueData != nil {
+ a.valueBytes = valueData.Bytes()
+ }
+
+ if valueOffsets := data.buffers[1]; valueOffsets != nil {
+ a.valueOffsets = arrow.Int32Traits.CastFromBytes(valueOffsets.Bytes())
+ }
+
+ if a.array.data.length < 1 {
+ return
+ }
+
+ expNumOffsets := a.array.data.offset + a.array.data.length + 1
+ if len(a.valueOffsets) < expNumOffsets {
+ panic(fmt.Errorf("arrow/array: binary offset buffer must have at least %d values", expNumOffsets))
+ }
+
+ if int(a.valueOffsets[expNumOffsets-1]) > len(a.valueBytes) {
+ panic("arrow/array: binary offsets out of bounds of data buffer")
+ }
+}
+
+func (a *Binary) GetOneForMarshal(i int) interface{} {
+ if a.IsNull(i) {
+ return nil
+ }
+ return a.Value(i)
+}
+
+func (a *Binary) MarshalJSON() ([]byte, error) {
+ vals := make([]interface{}, a.Len())
+ for i := 0; i < a.Len(); i++ {
+ vals[i] = a.GetOneForMarshal(i)
+ }
+ // golang marshal standard says that []byte will be marshalled
+ // as a base64-encoded string
+ return json.Marshal(vals)
+}
+
+func arrayEqualBinary(left, right *Binary) bool {
+ for i := 0; i < left.Len(); i++ {
+ if left.IsNull(i) {
+ continue
+ }
+ if !bytes.Equal(left.Value(i), right.Value(i)) {
+ return false
+ }
+ }
+ return true
+}
+
+type LargeBinary struct {
+ array
+ valueOffsets []int64
+ valueBytes []byte
+}
+
+func NewLargeBinaryData(data arrow.ArrayData) *LargeBinary {
+ a := &LargeBinary{}
+ a.refCount = 1
+ a.setData(data.(*Data))
+ return a
+}
+
+func (a *LargeBinary) Value(i int) []byte {
+ if i < 0 || i >= a.array.data.length {
+ panic("arrow/array: index out of range")
+ }
+ idx := a.array.data.offset + i
+ return a.valueBytes[a.valueOffsets[idx]:a.valueOffsets[idx+1]]
+}
+
+func (a *LargeBinary) ValueStr(i int) string {
+ if a.IsNull(i) {
+ return NullValueStr
+ }
+ return base64.StdEncoding.EncodeToString(a.Value(i))
+}
+func (a *LargeBinary) ValueString(i int) string {
+ b := a.Value(i)
+ return *(*string)(unsafe.Pointer(&b))
+}
+
+func (a *LargeBinary) ValueOffset(i int) int64 {
+ if i < 0 || i >= a.array.data.length {
+ panic("arrow/array: index out of range")
+ }
+ return a.valueOffsets[a.array.data.offset+i]
+}
+
+func (a *LargeBinary) ValueOffset64(i int) int64 {
+ return a.ValueOffset(i)
+}
+
+func (a *LargeBinary) ValueLen(i int) int {
+ if i < 0 || i >= a.array.data.length {
+ panic("arrow/array: index out of range")
+ }
+ beg := a.array.data.offset + i
+ return int(a.valueOffsets[beg+1] - a.valueOffsets[beg])
+}
+
+func (a *LargeBinary) ValueOffsets() []int64 {
+ beg := a.array.data.offset
+ end := beg + a.array.data.length + 1
+ return a.valueOffsets[beg:end]
+}
+
+func (a *LargeBinary) ValueBytes() []byte {
+ beg := a.array.data.offset
+ end := beg + a.array.data.length
+ return a.valueBytes[a.valueOffsets[beg]:a.valueOffsets[end]]
+}
+
+func (a *LargeBinary) String() string {
+ var o strings.Builder
+ o.WriteString("[")
+ for i := 0; i < a.Len(); i++ {
+ if i > 0 {
+ o.WriteString(" ")
+ }
+ switch {
+ case a.IsNull(i):
+ o.WriteString(NullValueStr)
+ default:
+ fmt.Fprintf(&o, "%q", a.ValueString(i))
+ }
+ }
+ o.WriteString("]")
+ return o.String()
+}
+
+func (a *LargeBinary) setData(data *Data) {
+ if len(data.buffers) != 3 {
+ panic("len(data.buffers) != 3")
+ }
+
+ a.array.setData(data)
+
+ if valueData := data.buffers[2]; valueData != nil {
+ a.valueBytes = valueData.Bytes()
+ }
+
+ if valueOffsets := data.buffers[1]; valueOffsets != nil {
+ a.valueOffsets = arrow.Int64Traits.CastFromBytes(valueOffsets.Bytes())
+ }
+
+ if a.array.data.length < 1 {
+ return
+ }
+
+ expNumOffsets := a.array.data.offset + a.array.data.length + 1
+ if len(a.valueOffsets) < expNumOffsets {
+ panic(fmt.Errorf("arrow/array: large binary offset buffer must have at least %d values", expNumOffsets))
+ }
+
+ if int(a.valueOffsets[expNumOffsets-1]) > len(a.valueBytes) {
+ panic("arrow/array: large binary offsets out of bounds of data buffer")
+ }
+}
+
+func (a *LargeBinary) GetOneForMarshal(i int) interface{} {
+ if a.IsNull(i) {
+ return nil
+ }
+ return a.Value(i)
+}
+
+func (a *LargeBinary) MarshalJSON() ([]byte, error) {
+ vals := make([]interface{}, a.Len())
+ for i := 0; i < a.Len(); i++ {
+ vals[i] = a.GetOneForMarshal(i)
+ }
+ // golang marshal standard says that []byte will be marshalled
+ // as a base64-encoded string
+ return json.Marshal(vals)
+}
+
+func arrayEqualLargeBinary(left, right *LargeBinary) bool {
+ for i := 0; i < left.Len(); i++ {
+ if left.IsNull(i) {
+ continue
+ }
+ if !bytes.Equal(left.Value(i), right.Value(i)) {
+ return false
+ }
+ }
+ return true
+}
+
+var (
+ _ arrow.Array = (*Binary)(nil)
+)
diff --git a/vendor/github.com/apache/arrow/go/v14/arrow/array/binarybuilder.go b/vendor/github.com/apache/arrow/go/v14/arrow/array/binarybuilder.go
new file mode 100644
index 000000000..3cb709b45
--- /dev/null
+++ b/vendor/github.com/apache/arrow/go/v14/arrow/array/binarybuilder.go
@@ -0,0 +1,375 @@
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements. See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership. The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License. You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package array
+
+import (
+ "bytes"
+ "encoding/base64"
+ "fmt"
+ "math"
+ "reflect"
+ "sync/atomic"
+
+ "github.com/apache/arrow/go/v14/arrow"
+ "github.com/apache/arrow/go/v14/arrow/internal/debug"
+ "github.com/apache/arrow/go/v14/arrow/memory"
+ "github.com/apache/arrow/go/v14/internal/json"
+)
+
+// A BinaryBuilder is used to build a Binary array using the Append methods.
+type BinaryBuilder struct {
+ builder
+
+ dtype arrow.BinaryDataType
+ offsets bufBuilder
+ values *byteBufferBuilder
+
+ appendOffsetVal func(int)
+ getOffsetVal func(int) int
+ maxCapacity uint64
+ offsetByteWidth int
+}
+
+// NewBinaryBuilder can be used for any of the variable length binary types,
+// Binary, LargeBinary, String, LargeString by passing the appropriate data type
+func NewBinaryBuilder(mem memory.Allocator, dtype arrow.BinaryDataType) *BinaryBuilder {
+ var (
+ offsets bufBuilder
+ offsetValFn func(int)
+ maxCapacity uint64
+ offsetByteWidth int
+ getOffsetVal func(int) int
+ )
+ switch dtype.Layout().Buffers[1].ByteWidth {
+ case 4:
+ b := newInt32BufferBuilder(mem)
+ offsetValFn = func(v int) { b.AppendValue(int32(v)) }
+ getOffsetVal = func(i int) int { return int(b.Value(i)) }
+ offsets = b
+ maxCapacity = math.MaxInt32
+ offsetByteWidth = arrow.Int32SizeBytes
+ case 8:
+ b := newInt64BufferBuilder(mem)
+ offsetValFn = func(v int) { b.AppendValue(int64(v)) }
+ getOffsetVal = func(i int) int { return int(b.Value(i)) }
+ offsets = b
+ maxCapacity = math.MaxInt64
+ offsetByteWidth = arrow.Int64SizeBytes
+ }
+
+ b := &BinaryBuilder{
+ builder: builder{refCount: 1, mem: mem},
+ dtype: dtype,
+ offsets: offsets,
+ values: newByteBufferBuilder(mem),
+ appendOffsetVal: offsetValFn,
+ maxCapacity: maxCapacity,
+ offsetByteWidth: offsetByteWidth,
+ getOffsetVal: getOffsetVal,
+ }
+ return b
+}
+
+func (b *BinaryBuilder) Type() arrow.DataType { return b.dtype }
+
+// Release decreases the reference count by 1.
+// When the reference count goes to zero, the memory is freed.
+// Release may be called simultaneously from multiple goroutines.
+func (b *BinaryBuilder) Release() {
+ debug.Assert(atomic.LoadInt64(&b.refCount) > 0, "too many releases")
+
+ if atomic.AddInt64(&b.refCount, -1) == 0 {
+ if b.nullBitmap != nil {
+ b.nullBitmap.Release()
+ b.nullBitmap = nil
+ }
+ if b.offsets != nil {
+ b.offsets.Release()
+ b.offsets = nil
+ }
+ if b.values != nil {
+ b.values.Release()
+ b.values = nil
+ }
+ }
+}
+
+func (b *BinaryBuilder) Append(v []byte) {
+ b.Reserve(1)
+ b.appendNextOffset()
+ b.values.Append(v)
+ b.UnsafeAppendBoolToBitmap(true)
+}
+
+func (b *BinaryBuilder) AppendString(v string) {
+ b.Append([]byte(v))
+}
+
+func (b *BinaryBuilder) AppendNull() {
+ b.Reserve(1)
+ b.appendNextOffset()
+ b.UnsafeAppendBoolToBitmap(false)
+}
+
+func (b *BinaryBuilder) AppendNulls(n int) {
+ for i := 0; i < n; i++ {
+ b.AppendNull()
+ }
+}
+
+func (b *BinaryBuilder) AppendEmptyValue() {
+ b.Reserve(1)
+ b.appendNextOffset()
+ b.UnsafeAppendBoolToBitmap(true)
+}
+
+func (b *BinaryBuilder) AppendEmptyValues(n int) {
+ for i := 0; i < n; i++ {
+ b.AppendEmptyValue()
+ }
+}
+
+// AppendValues will append the values in the v slice. The valid slice determines which values
+// in v are valid (not null). The valid slice must either be empty or be equal in length to v. If empty,
+// all values in v are appended and considered valid.
+func (b *BinaryBuilder) AppendValues(v [][]byte, valid []bool) {
+ if len(v) != len(valid) && len(valid) != 0 {
+ panic("len(v) != len(valid) && len(valid) != 0")
+ }
+
+ if len(v) == 0 {
+ return
+ }
+
+ b.Reserve(len(v))
+ for _, vv := range v {
+ b.appendNextOffset()
+ b.values.Append(vv)
+ }
+
+ b.builder.unsafeAppendBoolsToBitmap(valid, len(v))
+}
+
+// AppendStringValues will append the values in the v slice. The valid slice determines which values
+// in v are valid (not null). The valid slice must either be empty or be equal in length to v. If empty,
+// all values in v are appended and considered valid.
+func (b *BinaryBuilder) AppendStringValues(v []string, valid []bool) {
+ if len(v) != len(valid) && len(valid) != 0 {
+ panic("len(v) != len(valid) && len(valid) != 0")
+ }
+
+ if len(v) == 0 {
+ return
+ }
+
+ b.Reserve(len(v))
+ for _, vv := range v {
+ b.appendNextOffset()
+ b.values.Append([]byte(vv))
+ }
+
+ b.builder.unsafeAppendBoolsToBitmap(valid, len(v))
+}
+
+func (b *BinaryBuilder) UnsafeAppend(v []byte) {
+ b.appendNextOffset()
+ b.values.unsafeAppend(v)
+ b.UnsafeAppendBoolToBitmap(true)
+}
+
+func (b *BinaryBuilder) Value(i int) []byte {
+ start := b.getOffsetVal(i)
+ var end int
+ if i == (b.length - 1) {
+ end = b.values.Len()
+ } else {
+ end = b.getOffsetVal(i + 1)
+ }
+ return b.values.Bytes()[start:end]
+}
+
+func (b *BinaryBuilder) init(capacity int) {
+ b.builder.init(capacity)
+ b.offsets.resize((capacity + 1) * b.offsetByteWidth)
+}
+
+// DataLen returns the number of bytes in the data array.
+func (b *BinaryBuilder) DataLen() int { return b.values.length }
+
+// DataCap returns the total number of bytes that can be stored
+// without allocating additional memory.
+func (b *BinaryBuilder) DataCap() int { return b.values.capacity }
+
+// Reserve ensures there is enough space for appending n elements
+// by checking the capacity and calling Resize if necessary.
+func (b *BinaryBuilder) Reserve(n int) {
+ b.builder.reserve(n, b.Resize)
+}
+
+// ReserveData ensures there is enough space for appending n bytes
+// by checking the capacity and resizing the data buffer if necessary.
+func (b *BinaryBuilder) ReserveData(n int) {
+ if b.values.capacity < b.values.length+n {
+ b.values.resize(b.values.Len() + n)
+ }
+}
+
+// Resize adjusts the space allocated by b to n elements. If n is greater than b.Cap(),
+// additional memory will be allocated. If n is smaller, the allocated memory may be reduced.
+func (b *BinaryBuilder) Resize(n int) {
+ b.offsets.resize((n + 1) * b.offsetByteWidth)
+ if (n * b.offsetByteWidth) < b.offsets.Len() {
+ b.offsets.SetLength(n * b.offsetByteWidth)
+ }
+ b.builder.resize(n, b.init)
+}
+
+func (b *BinaryBuilder) ResizeData(n int) {
+ b.values.length = n
+}
+
+// NewArray creates a Binary array from the memory buffers used by the builder and resets the BinaryBuilder
+// so it can be used to build a new array.
+//
+// Builds the appropriate Binary or LargeBinary array based on the datatype
+// it was initialized with.
+func (b *BinaryBuilder) NewArray() arrow.Array {
+ if b.offsetByteWidth == arrow.Int32SizeBytes {
+ return b.NewBinaryArray()
+ }
+ return b.NewLargeBinaryArray()
+}
+
+// NewBinaryArray creates a Binary array from the memory buffers used by the builder and resets the BinaryBuilder
+// so it can be used to build a new array.
+func (b *BinaryBuilder) NewBinaryArray() (a *Binary) {
+ if b.offsetByteWidth != arrow.Int32SizeBytes {
+ panic("arrow/array: invalid call to NewBinaryArray when building a LargeBinary array")
+ }
+
+ data := b.newData()
+ a = NewBinaryData(data)
+ data.Release()
+ return
+}
+
+func (b *BinaryBuilder) NewLargeBinaryArray() (a *LargeBinary) {
+ if b.offsetByteWidth != arrow.Int64SizeBytes {
+ panic("arrow/array: invalid call to NewLargeBinaryArray when building a Binary array")
+ }
+
+ data := b.newData()
+ a = NewLargeBinaryData(data)
+ data.Release()
+ return
+}
+
+func (b *BinaryBuilder) newData() (data *Data) {
+ b.appendNextOffset()
+ offsets, values := b.offsets.Finish(), b.values.Finish()
+ data = NewData(b.dtype, b.length, []*memory.Buffer{b.nullBitmap, offsets, values}, nil, b.nulls, 0)
+ if offsets != nil {
+ offsets.Release()
+ }
+
+ if values != nil {
+ values.Release()
+ }
+
+ b.builder.reset()
+
+ return
+}
+
+func (b *BinaryBuilder) appendNextOffset() {
+ numBytes := b.values.Len()
+ debug.Assert(uint64(numBytes) <= b.maxCapacity, "exceeded maximum capacity of binary array")
+ b.appendOffsetVal(numBytes)
+}
+
+func (b *BinaryBuilder) AppendValueFromString(s string) error {
+ if s == NullValueStr {
+ b.AppendNull()
+ return nil
+ }
+
+ if b.dtype.IsUtf8() {
+ b.Append([]byte(s))
+ return nil
+ }
+
+ decodedVal, err := base64.StdEncoding.DecodeString(s)
+ if err != nil {
+ return fmt.Errorf("could not decode base64 string: %w", err)
+ }
+ b.Append(decodedVal)
+ return nil
+}
+
+func (b *BinaryBuilder) UnmarshalOne(dec *json.Decoder) error {
+ t, err := dec.Token()
+ if err != nil {
+ return err
+ }
+
+ switch v := t.(type) {
+ case string:
+ data, err := base64.StdEncoding.DecodeString(v)
+ if err != nil {
+ return err
+ }
+ b.Append(data)
+ case []byte:
+ b.Append(v)
+ case nil:
+ b.AppendNull()
+ default:
+ return &json.UnmarshalTypeError{
+ Value: fmt.Sprint(t),
+ Type: reflect.TypeOf([]byte{}),
+ Offset: dec.InputOffset(),
+ }
+ }
+ return nil
+}
+
+func (b *BinaryBuilder) Unmarshal(dec *json.Decoder) error {
+ for dec.More() {
+ if err := b.UnmarshalOne(dec); err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+func (b *BinaryBuilder) UnmarshalJSON(data []byte) error {
+ dec := json.NewDecoder(bytes.NewReader(data))
+ t, err := dec.Token()
+ if err != nil {
+ return err
+ }
+
+ if delim, ok := t.(json.Delim); !ok || delim != '[' {
+ return fmt.Errorf("binary builder must unpack from json array, found %s", delim)
+ }
+
+ return b.Unmarshal(dec)
+}
+
+var (
+ _ Builder = (*BinaryBuilder)(nil)
+)
diff --git a/vendor/github.com/apache/arrow/go/v14/arrow/array/boolean.go b/vendor/github.com/apache/arrow/go/v14/arrow/array/boolean.go
new file mode 100644
index 000000000..464cef48b
--- /dev/null
+++ b/vendor/github.com/apache/arrow/go/v14/arrow/array/boolean.go
@@ -0,0 +1,126 @@
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements. See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership. The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License. You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package array
+
+import (
+ "fmt"
+ "strconv"
+ "strings"
+
+ "github.com/apache/arrow/go/v14/arrow"
+ "github.com/apache/arrow/go/v14/arrow/bitutil"
+ "github.com/apache/arrow/go/v14/arrow/memory"
+ "github.com/apache/arrow/go/v14/internal/json"
+)
+
+// A type which represents an immutable sequence of boolean values.
+type Boolean struct {
+ array
+ values []byte
+}
+
+// NewBoolean creates a boolean array from the data memory.Buffer and contains length elements.
+// The nullBitmap buffer can be nil of there are no null values.
+// If nulls is not known, use UnknownNullCount to calculate the value of NullN at runtime from the nullBitmap buffer.
+func NewBoolean(length int, data *memory.Buffer, nullBitmap *memory.Buffer, nulls int) *Boolean {
+ arrdata := NewData(arrow.FixedWidthTypes.Boolean, length, []*memory.Buffer{nullBitmap, data}, nil, nulls, 0)
+ defer arrdata.Release()
+ return NewBooleanData(arrdata)
+}
+
+func NewBooleanData(data arrow.ArrayData) *Boolean {
+ a := &Boolean{}
+ a.refCount = 1
+ a.setData(data.(*Data))
+ return a
+}
+
+func (a *Boolean) Value(i int) bool {
+ if i < 0 || i >= a.array.data.length {
+ panic("arrow/array: index out of range")
+ }
+ return bitutil.BitIsSet(a.values, a.array.data.offset+i)
+}
+
+func (a *Boolean) ValueStr(i int) string {
+ if a.IsNull(i) {
+ return NullValueStr
+ } else {
+ return strconv.FormatBool(a.Value(i))
+ }
+}
+
+func (a *Boolean) String() string {
+ o := new(strings.Builder)
+ o.WriteString("[")
+ for i := 0; i < a.Len(); i++ {
+ if i > 0 {
+ fmt.Fprintf(o, " ")
+ }
+ switch {
+ case a.IsNull(i):
+ o.WriteString(NullValueStr)
+ default:
+ fmt.Fprintf(o, "%v", a.Value(i))
+ }
+ }
+ o.WriteString("]")
+ return o.String()
+}
+
+func (a *Boolean) setData(data *Data) {
+ a.array.setData(data)
+ vals := data.buffers[1]
+ if vals != nil {
+ a.values = vals.Bytes()
+ }
+}
+
+func (a *Boolean) GetOneForMarshal(i int) interface{} {
+ if a.IsValid(i) {
+ return a.Value(i)
+ }
+ return nil
+}
+
+func (a *Boolean) MarshalJSON() ([]byte, error) {
+ vals := make([]interface{}, a.Len())
+ for i := 0; i < a.Len(); i++ {
+ if a.IsValid(i) {
+ vals[i] = a.Value(i)
+ } else {
+ vals[i] = nil
+ }
+ }
+ return json.Marshal(vals)
+}
+
+func arrayEqualBoolean(left, right *Boolean) bool {
+ for i := 0; i < left.Len(); i++ {
+ if left.IsNull(i) {
+ continue
+ }
+ if left.Value(i) != right.Value(i) {
+ return false
+ }
+ }
+ return true
+}
+
+var (
+ _ arrow.Array = (*Boolean)(nil)
+)
diff --git a/vendor/github.com/apache/arrow/go/v14/arrow/array/booleanbuilder.go b/vendor/github.com/apache/arrow/go/v14/arrow/array/booleanbuilder.go
new file mode 100644
index 000000000..10b7405aa
--- /dev/null
+++ b/vendor/github.com/apache/arrow/go/v14/arrow/array/booleanbuilder.go
@@ -0,0 +1,263 @@
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements. See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership. The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License. You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package array
+
+import (
+ "bytes"
+ "fmt"
+ "reflect"
+ "strconv"
+ "sync/atomic"
+
+ "github.com/apache/arrow/go/v14/arrow"
+ "github.com/apache/arrow/go/v14/arrow/bitutil"
+ "github.com/apache/arrow/go/v14/arrow/internal/debug"
+ "github.com/apache/arrow/go/v14/arrow/memory"
+ "github.com/apache/arrow/go/v14/internal/json"
+)
+
+type BooleanBuilder struct {
+ builder
+
+ data *memory.Buffer
+ rawData []byte
+}
+
+func NewBooleanBuilder(mem memory.Allocator) *BooleanBuilder {
+ return &BooleanBuilder{builder: builder{refCount: 1, mem: mem}}
+}
+
+func (b *BooleanBuilder) Type() arrow.DataType { return arrow.FixedWidthTypes.Boolean }
+
+// Release decreases the reference count by 1.
+// When the reference count goes to zero, the memory is freed.
+// Release may be called simultaneously from multiple goroutines.
+func (b *BooleanBuilder) Release() {
+ debug.Assert(atomic.LoadInt64(&b.refCount) > 0, "too many releases")
+
+ if atomic.AddInt64(&b.refCount, -1) == 0 {
+ if b.nullBitmap != nil {
+ b.nullBitmap.Release()
+ b.nullBitmap = nil
+ }
+ if b.data != nil {
+ b.data.Release()
+ b.data = nil
+ b.rawData = nil
+ }
+ }
+}
+
+func (b *BooleanBuilder) Append(v bool) {
+ b.Reserve(1)
+ b.UnsafeAppend(v)
+}
+
+func (b *BooleanBuilder) AppendByte(v byte) {
+ b.Reserve(1)
+ b.UnsafeAppend(v != 0)
+}
+
+func (b *BooleanBuilder) AppendNull() {
+ b.Reserve(1)
+ b.UnsafeAppendBoolToBitmap(false)
+}
+
+func (b *BooleanBuilder) AppendNulls(n int) {
+ for i := 0; i < n; i++ {
+ b.AppendNull()
+ }
+}
+
+func (b *BooleanBuilder) AppendEmptyValue() {
+ b.Reserve(1)
+ b.UnsafeAppend(false)
+}
+
+func (b *BooleanBuilder) AppendEmptyValues(n int) {
+ for i := 0; i < n; i++ {
+ b.AppendEmptyValue()
+ }
+}
+
+func (b *BooleanBuilder) AppendValueFromString(s string) error {
+ if s == NullValueStr {
+ b.AppendNull()
+ return nil
+ }
+ val, err := strconv.ParseBool(s)
+ if err != nil {
+ return err
+ }
+ b.Append(val)
+ return nil
+}
+
+func (b *BooleanBuilder) UnsafeAppend(v bool) {
+ bitutil.SetBit(b.nullBitmap.Bytes(), b.length)
+ if v {
+ bitutil.SetBit(b.rawData, b.length)
+ } else {
+ bitutil.ClearBit(b.rawData, b.length)
+ }
+ b.length++
+}
+
+func (b *BooleanBuilder) AppendValues(v []bool, valid []bool) {
+ if len(v) != len(valid) && len(valid) != 0 {
+ panic("len(v) != len(valid) && len(valid) != 0")
+ }
+
+ if len(v) == 0 {
+ return
+ }
+
+ b.Reserve(len(v))
+ for i, vv := range v {
+ bitutil.SetBitTo(b.rawData, b.length+i, vv)
+ }
+ b.builder.unsafeAppendBoolsToBitmap(valid, len(v))
+}
+
+func (b *BooleanBuilder) init(capacity int) {
+ b.builder.init(capacity)
+
+ b.data = memory.NewResizableBuffer(b.mem)
+ bytesN := arrow.BooleanTraits.BytesRequired(capacity)
+ b.data.Resize(bytesN)
+ b.rawData = b.data.Bytes()
+}
+
+// Reserve ensures there is enough space for appending n elements
+// by checking the capacity and calling Resize if necessary.
+func (b *BooleanBuilder) Reserve(n int) {
+ b.builder.reserve(n, b.Resize)
+}
+
+// Resize adjusts the space allocated by b to n elements. If n is greater than b.Cap(),
+// additional memory will be allocated. If n is smaller, the allocated memory may reduced.
+func (b *BooleanBuilder) Resize(n int) {
+ if n < minBuilderCapacity {
+ n = minBuilderCapacity
+ }
+
+ if b.capacity == 0 {
+ b.init(n)
+ } else {
+ b.builder.resize(n, b.init)
+ b.data.Resize(arrow.BooleanTraits.BytesRequired(n))
+ b.rawData = b.data.Bytes()
+ }
+}
+
+// NewArray creates a Boolean array from the memory buffers used by the builder and resets the BooleanBuilder
+// so it can be used to build a new array.
+func (b *BooleanBuilder) NewArray() arrow.Array {
+ return b.NewBooleanArray()
+}
+
+// NewBooleanArray creates a Boolean array from the memory buffers used by the builder and resets the BooleanBuilder
+// so it can be used to build a new array.
+func (b *BooleanBuilder) NewBooleanArray() (a *Boolean) {
+ data := b.newData()
+ a = NewBooleanData(data)
+ data.Release()
+ return
+}
+
+func (b *BooleanBuilder) newData() *Data {
+ bytesRequired := arrow.BooleanTraits.BytesRequired(b.length)
+ if bytesRequired > 0 && bytesRequired < b.data.Len() {
+ // trim buffers
+ b.data.Resize(bytesRequired)
+ }
+ res := NewData(arrow.FixedWidthTypes.Boolean, b.length, []*memory.Buffer{b.nullBitmap, b.data}, nil, b.nulls, 0)
+ b.reset()
+
+ if b.data != nil {
+ b.data.Release()
+ b.data = nil
+ b.rawData = nil
+ }
+
+ return res
+}
+
+func (b *BooleanBuilder) UnmarshalOne(dec *json.Decoder) error {
+ t, err := dec.Token()
+ if err != nil {
+ return err
+ }
+
+ switch v := t.(type) {
+ case bool:
+ b.Append(v)
+ case string:
+ val, err := strconv.ParseBool(v)
+ if err != nil {
+ return err
+ }
+ b.Append(val)
+ case json.Number:
+ val, err := strconv.ParseBool(v.String())
+ if err != nil {
+ return err
+ }
+ b.Append(val)
+ case nil:
+ b.AppendNull()
+ default:
+ return &json.UnmarshalTypeError{
+ Value: fmt.Sprint(t),
+ Type: reflect.TypeOf(true),
+ Offset: dec.InputOffset(),
+ }
+ }
+ return nil
+}
+
+func (b *BooleanBuilder) Unmarshal(dec *json.Decoder) error {
+ for dec.More() {
+ if err := b.UnmarshalOne(dec); err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+func (b *BooleanBuilder) UnmarshalJSON(data []byte) error {
+ dec := json.NewDecoder(bytes.NewReader(data))
+ dec.UseNumber()
+ t, err := dec.Token()
+ if err != nil {
+ return err
+ }
+
+ if delim, ok := t.(json.Delim); !ok || delim != '[' {
+ return fmt.Errorf("boolean builder must unpack from json array, found %s", delim)
+ }
+
+ return b.Unmarshal(dec)
+}
+
+func (b *BooleanBuilder) Value(i int) bool {
+ return bitutil.BitIsSet(b.rawData, i)
+}
+
+var (
+ _ Builder = (*BooleanBuilder)(nil)
+)
diff --git a/vendor/github.com/apache/arrow/go/v14/arrow/array/bufferbuilder.go b/vendor/github.com/apache/arrow/go/v14/arrow/array/bufferbuilder.go
new file mode 100644
index 000000000..e023b0d90
--- /dev/null
+++ b/vendor/github.com/apache/arrow/go/v14/arrow/array/bufferbuilder.go
@@ -0,0 +1,153 @@
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements. See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership. The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License. You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package array
+
+import (
+ "sync/atomic"
+
+ "github.com/apache/arrow/go/v14/arrow/bitutil"
+ "github.com/apache/arrow/go/v14/arrow/internal/debug"
+ "github.com/apache/arrow/go/v14/arrow/memory"
+)
+
+type bufBuilder interface {
+ Retain()
+ Release()
+ Len() int
+ Cap() int
+ Bytes() []byte
+ resize(int)
+ Advance(int)
+ SetLength(int)
+ Append([]byte)
+ Reset()
+ Finish() *memory.Buffer
+}
+
+// A bufferBuilder provides common functionality for populating memory with a sequence of type-specific values.
+// Specialized implementations provide type-safe APIs for appending and accessing the memory.
+type bufferBuilder struct {
+ refCount int64
+ mem memory.Allocator
+ buffer *memory.Buffer
+ length int
+ capacity int
+
+ bytes []byte
+}
+
+// Retain increases the reference count by 1.
+// Retain may be called simultaneously from multiple goroutines.
+func (b *bufferBuilder) Retain() {
+ atomic.AddInt64(&b.refCount, 1)
+}
+
+// Release decreases the reference count by 1.
+// When the reference count goes to zero, the memory is freed.
+// Release may be called simultaneously from multiple goroutines.
+func (b *bufferBuilder) Release() {
+ debug.Assert(atomic.LoadInt64(&b.refCount) > 0, "too many releases")
+
+ if atomic.AddInt64(&b.refCount, -1) == 0 {
+ if b.buffer != nil {
+ b.buffer.Release()
+ b.buffer, b.bytes = nil, nil
+ }
+ }
+}
+
+// Len returns the length of the memory buffer in bytes.
+func (b *bufferBuilder) Len() int { return b.length }
+
+// Cap returns the total number of bytes that can be stored without allocating additional memory.
+func (b *bufferBuilder) Cap() int { return b.capacity }
+
+// Bytes returns a slice of length b.Len().
+// The slice is only valid for use until the next buffer modification. That is, until the next call
+// to Advance, Reset, Finish or any Append function. The slice aliases the buffer content at least until the next
+// buffer modification.
+func (b *bufferBuilder) Bytes() []byte { return b.bytes[:b.length] }
+
+func (b *bufferBuilder) resize(elements int) {
+ if b.buffer == nil {
+ b.buffer = memory.NewResizableBuffer(b.mem)
+ }
+
+ b.buffer.ResizeNoShrink(elements)
+ oldCapacity := b.capacity
+ b.capacity = b.buffer.Cap()
+ b.bytes = b.buffer.Buf()
+
+ if b.capacity > oldCapacity {
+ memory.Set(b.bytes[oldCapacity:], 0)
+ }
+}
+
+func (b *bufferBuilder) SetLength(length int) {
+ if length > b.length {
+ b.Advance(length)
+ return
+ }
+
+ b.length = length
+}
+
+// Advance increases the buffer by length and initializes the skipped bytes to zero.
+func (b *bufferBuilder) Advance(length int) {
+ if b.capacity < b.length+length {
+ newCapacity := bitutil.NextPowerOf2(b.length + length)
+ b.resize(newCapacity)
+ }
+ b.length += length
+}
+
+// Append appends the contents of v to the buffer, resizing it if necessary.
+func (b *bufferBuilder) Append(v []byte) {
+ if b.capacity < b.length+len(v) {
+ newCapacity := bitutil.NextPowerOf2(b.length + len(v))
+ b.resize(newCapacity)
+ }
+ b.unsafeAppend(v)
+}
+
+// Reset returns the buffer to an empty state. Reset releases the memory and sets the length and capacity to zero.
+func (b *bufferBuilder) Reset() {
+ if b.buffer != nil {
+ b.buffer.Release()
+ }
+ b.buffer, b.bytes = nil, nil
+ b.capacity, b.length = 0, 0
+}
+
+// Finish TODO(sgc)
+func (b *bufferBuilder) Finish() (buffer *memory.Buffer) {
+ if b.length > 0 {
+ b.buffer.ResizeNoShrink(b.length)
+ }
+ buffer = b.buffer
+ b.buffer = nil
+ b.Reset()
+ if buffer == nil {
+ buffer = memory.NewBufferBytes(nil)
+ }
+ return
+}
+
+func (b *bufferBuilder) unsafeAppend(data []byte) {
+ copy(b.bytes[b.length:], data)
+ b.length += len(data)
+}
diff --git a/vendor/github.com/apache/arrow/go/v14/arrow/array/bufferbuilder_byte.go b/vendor/github.com/apache/arrow/go/v14/arrow/array/bufferbuilder_byte.go
new file mode 100644
index 000000000..00a0d1c21
--- /dev/null
+++ b/vendor/github.com/apache/arrow/go/v14/arrow/array/bufferbuilder_byte.go
@@ -0,0 +1,30 @@
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements. See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership. The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License. You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package array
+
+import "github.com/apache/arrow/go/v14/arrow/memory"
+
+type byteBufferBuilder struct {
+ bufferBuilder
+}
+
+func newByteBufferBuilder(mem memory.Allocator) *byteBufferBuilder {
+ return &byteBufferBuilder{bufferBuilder: bufferBuilder{refCount: 1, mem: mem}}
+}
+
+func (b *byteBufferBuilder) Values() []byte { return b.Bytes() }
+func (b *byteBufferBuilder) Value(i int) byte { return b.bytes[i] }
diff --git a/vendor/github.com/apache/arrow/go/v14/arrow/array/bufferbuilder_numeric.gen.go b/vendor/github.com/apache/arrow/go/v14/arrow/array/bufferbuilder_numeric.gen.go
new file mode 100644
index 000000000..879bc9f57
--- /dev/null
+++ b/vendor/github.com/apache/arrow/go/v14/arrow/array/bufferbuilder_numeric.gen.go
@@ -0,0 +1,124 @@
+// Code generated by array/bufferbuilder_numeric.gen.go.tmpl. DO NOT EDIT.
+
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements. See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership. The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License. You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package array
+
+import (
+ "github.com/apache/arrow/go/v14/arrow"
+ "github.com/apache/arrow/go/v14/arrow/bitutil"
+ "github.com/apache/arrow/go/v14/arrow/memory"
+)
+
+type int64BufferBuilder struct {
+ bufferBuilder
+}
+
+func newInt64BufferBuilder(mem memory.Allocator) *int64BufferBuilder {
+ return &int64BufferBuilder{bufferBuilder: bufferBuilder{refCount: 1, mem: mem}}
+}
+
+// AppendValues appends the contents of v to the buffer, growing the buffer as needed.
+func (b *int64BufferBuilder) AppendValues(v []int64) { b.Append(arrow.Int64Traits.CastToBytes(v)) }
+
+// Values returns a slice of length b.Len().
+// The slice is only valid for use until the next buffer modification. That is, until the next call
+// to Advance, Reset, Finish or any Append function. The slice aliases the buffer content at least until the next
+// buffer modification.
+func (b *int64BufferBuilder) Values() []int64 { return arrow.Int64Traits.CastFromBytes(b.Bytes()) }
+
+// Value returns the int64 element at the index i. Value will panic if i is negative or ≥ Len.
+func (b *int64BufferBuilder) Value(i int) int64 { return b.Values()[i] }
+
+// Len returns the number of int64 elements in the buffer.
+func (b *int64BufferBuilder) Len() int { return b.length / arrow.Int64SizeBytes }
+
+// AppendValue appends v to the buffer, growing the buffer as needed.
+func (b *int64BufferBuilder) AppendValue(v int64) {
+ if b.capacity < b.length+arrow.Int64SizeBytes {
+ newCapacity := bitutil.NextPowerOf2(b.length + arrow.Int64SizeBytes)
+ b.resize(newCapacity)
+ }
+ arrow.Int64Traits.PutValue(b.bytes[b.length:], v)
+ b.length += arrow.Int64SizeBytes
+}
+
+type int32BufferBuilder struct {
+ bufferBuilder
+}
+
+func newInt32BufferBuilder(mem memory.Allocator) *int32BufferBuilder {
+ return &int32BufferBuilder{bufferBuilder: bufferBuilder{refCount: 1, mem: mem}}
+}
+
+// AppendValues appends the contents of v to the buffer, growing the buffer as needed.
+func (b *int32BufferBuilder) AppendValues(v []int32) { b.Append(arrow.Int32Traits.CastToBytes(v)) }
+
+// Values returns a slice of length b.Len().
+// The slice is only valid for use until the next buffer modification. That is, until the next call
+// to Advance, Reset, Finish or any Append function. The slice aliases the buffer content at least until the next
+// buffer modification.
+func (b *int32BufferBuilder) Values() []int32 { return arrow.Int32Traits.CastFromBytes(b.Bytes()) }
+
+// Value returns the int32 element at the index i. Value will panic if i is negative or ≥ Len.
+func (b *int32BufferBuilder) Value(i int) int32 { return b.Values()[i] }
+
+// Len returns the number of int32 elements in the buffer.
+func (b *int32BufferBuilder) Len() int { return b.length / arrow.Int32SizeBytes }
+
+// AppendValue appends v to the buffer, growing the buffer as needed.
+func (b *int32BufferBuilder) AppendValue(v int32) {
+ if b.capacity < b.length+arrow.Int32SizeBytes {
+ newCapacity := bitutil.NextPowerOf2(b.length + arrow.Int32SizeBytes)
+ b.resize(newCapacity)
+ }
+ arrow.Int32Traits.PutValue(b.bytes[b.length:], v)
+ b.length += arrow.Int32SizeBytes
+}
+
+type int8BufferBuilder struct {
+ bufferBuilder
+}
+
+func newInt8BufferBuilder(mem memory.Allocator) *int8BufferBuilder {
+ return &int8BufferBuilder{bufferBuilder: bufferBuilder{refCount: 1, mem: mem}}
+}
+
+// AppendValues appends the contents of v to the buffer, growing the buffer as needed.
+func (b *int8BufferBuilder) AppendValues(v []int8) { b.Append(arrow.Int8Traits.CastToBytes(v)) }
+
+// Values returns a slice of length b.Len().
+// The slice is only valid for use until the next buffer modification. That is, until the next call
+// to Advance, Reset, Finish or any Append function. The slice aliases the buffer content at least until the next
+// buffer modification.
+func (b *int8BufferBuilder) Values() []int8 { return arrow.Int8Traits.CastFromBytes(b.Bytes()) }
+
+// Value returns the int8 element at the index i. Value will panic if i is negative or ≥ Len.
+func (b *int8BufferBuilder) Value(i int) int8 { return b.Values()[i] }
+
+// Len returns the number of int8 elements in the buffer.
+func (b *int8BufferBuilder) Len() int { return b.length / arrow.Int8SizeBytes }
+
+// AppendValue appends v to the buffer, growing the buffer as needed.
+func (b *int8BufferBuilder) AppendValue(v int8) {
+ if b.capacity < b.length+arrow.Int8SizeBytes {
+ newCapacity := bitutil.NextPowerOf2(b.length + arrow.Int8SizeBytes)
+ b.resize(newCapacity)
+ }
+ arrow.Int8Traits.PutValue(b.bytes[b.length:], v)
+ b.length += arrow.Int8SizeBytes
+}
diff --git a/vendor/github.com/apache/arrow/go/v14/arrow/array/bufferbuilder_numeric.gen.go.tmpl b/vendor/github.com/apache/arrow/go/v14/arrow/array/bufferbuilder_numeric.gen.go.tmpl
new file mode 100644
index 000000000..e859b5bff
--- /dev/null
+++ b/vendor/github.com/apache/arrow/go/v14/arrow/array/bufferbuilder_numeric.gen.go.tmpl
@@ -0,0 +1,61 @@
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements. See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership. The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License. You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package array
+
+import (
+ "github.com/apache/arrow/go/v14/arrow"
+ "github.com/apache/arrow/go/v14/arrow/bitutil"
+ "github.com/apache/arrow/go/v14/arrow/memory"
+)
+
+{{range .In}}
+{{$TypeNamePrefix := .name}}
+{{if .Opt.BufferBuilder}}
+type {{$TypeNamePrefix}}BufferBuilder struct {
+ bufferBuilder
+}
+
+func new{{.Name}}BufferBuilder(mem memory.Allocator) *{{$TypeNamePrefix}}BufferBuilder {
+ return &{{$TypeNamePrefix}}BufferBuilder{bufferBuilder:bufferBuilder{refCount: 1, mem:mem}}
+}
+
+// AppendValues appends the contents of v to the buffer, growing the buffer as needed.
+func (b *{{$TypeNamePrefix}}BufferBuilder) AppendValues(v []{{.Type}}) { b.Append(arrow.{{.Name}}Traits.CastToBytes(v)) }
+
+// Values returns a slice of length b.Len().
+// The slice is only valid for use until the next buffer modification. That is, until the next call
+// to Advance, Reset, Finish or any Append function. The slice aliases the buffer content at least until the next
+// buffer modification.
+func (b *{{$TypeNamePrefix}}BufferBuilder) Values() []{{.Type}} { return arrow.{{.Name}}Traits.CastFromBytes(b.Bytes()) }
+
+// Value returns the {{.Type}} element at the index i. Value will panic if i is negative or ≥ Len.
+func (b *{{$TypeNamePrefix}}BufferBuilder) Value(i int) {{.Type}} { return b.Values()[i] }
+
+// Len returns the number of {{.Type}} elements in the buffer.
+func (b *{{$TypeNamePrefix}}BufferBuilder) Len() int { return b.length/arrow.{{.Name}}SizeBytes }
+
+// AppendValue appends v to the buffer, growing the buffer as needed.
+func (b *{{$TypeNamePrefix}}BufferBuilder) AppendValue(v {{.Type}}) {
+ if b.capacity < b.length+arrow.{{.Name}}SizeBytes {
+ newCapacity := bitutil.NextPowerOf2(b.length + arrow.{{.Name}}SizeBytes)
+ b.resize(newCapacity)
+ }
+ arrow.{{.Name}}Traits.PutValue(b.bytes[b.length:], v)
+ b.length+=arrow.{{.Name}}SizeBytes
+}
+{{end}}
+{{end}}
diff --git a/vendor/github.com/apache/arrow/go/v14/arrow/array/builder.go b/vendor/github.com/apache/arrow/go/v14/arrow/array/builder.go
new file mode 100644
index 000000000..2f15ac965
--- /dev/null
+++ b/vendor/github.com/apache/arrow/go/v14/arrow/array/builder.go
@@ -0,0 +1,369 @@
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements. See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership. The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License. You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package array
+
+import (
+ "fmt"
+ "sync/atomic"
+
+ "github.com/apache/arrow/go/v14/arrow"
+ "github.com/apache/arrow/go/v14/arrow/bitutil"
+ "github.com/apache/arrow/go/v14/arrow/memory"
+ "github.com/apache/arrow/go/v14/internal/json"
+)
+
+const (
+ minBuilderCapacity = 1 << 5
+)
+
+// Builder provides an interface to build arrow arrays.
+type Builder interface {
+ // you can unmarshal a json array to add the values to a builder
+ json.Unmarshaler
+
+ // Type returns the datatype that this is building
+ Type() arrow.DataType
+
+ // Retain increases the reference count by 1.
+ // Retain may be called simultaneously from multiple goroutines.
+ Retain()
+
+ // Release decreases the reference count by 1.
+ Release()
+
+ // Len returns the number of elements in the array builder.
+ Len() int
+
+ // Cap returns the total number of elements that can be stored
+ // without allocating additional memory.
+ Cap() int
+
+ // NullN returns the number of null values in the array builder.
+ NullN() int
+
+ // AppendNull adds a new null value to the array being built.
+ AppendNull()
+
+ // AppendNulls adds new n null values to the array being built.
+ AppendNulls(n int)
+
+ // AppendEmptyValue adds a new zero value of the appropriate type
+ AppendEmptyValue()
+
+ // AppendEmptyValues adds new n zero values of the appropriate type
+ AppendEmptyValues(n int)
+
+ // AppendValueFromString adds a new value from a string. Inverse of array.ValueStr(i int) string
+ AppendValueFromString(string) error
+
+ // Reserve ensures there is enough space for appending n elements
+ // by checking the capacity and calling Resize if necessary.
+ Reserve(n int)
+
+ // Resize adjusts the space allocated by b to n elements. If n is greater than b.Cap(),
+ // additional memory will be allocated. If n is smaller, the allocated memory may reduced.
+ Resize(n int)
+
+ // NewArray creates a new array from the memory buffers used
+ // by the builder and resets the Builder so it can be used to build
+ // a new array.
+ NewArray() arrow.Array
+
+ // IsNull returns if a previously appended value at a given index is null or not.
+ IsNull(i int) bool
+
+ // SetNull sets the value at index i to null.
+ SetNull(i int)
+
+ UnsafeAppendBoolToBitmap(bool)
+
+ init(capacity int)
+ resize(newBits int, init func(int))
+
+ UnmarshalOne(*json.Decoder) error
+ Unmarshal(*json.Decoder) error
+
+ newData() *Data
+}
+
+// builder provides common functionality for managing the validity bitmap (nulls) when building arrays.
+type builder struct {
+ refCount int64
+ mem memory.Allocator
+ nullBitmap *memory.Buffer
+ nulls int
+ length int
+ capacity int
+}
+
+// Retain increases the reference count by 1.
+// Retain may be called simultaneously from multiple goroutines.
+func (b *builder) Retain() {
+ atomic.AddInt64(&b.refCount, 1)
+}
+
+// Len returns the number of elements in the array builder.
+func (b *builder) Len() int { return b.length }
+
+// Cap returns the total number of elements that can be stored without allocating additional memory.
+func (b *builder) Cap() int { return b.capacity }
+
+// NullN returns the number of null values in the array builder.
+func (b *builder) NullN() int { return b.nulls }
+
+func (b *builder) IsNull(i int) bool {
+ return b.nullBitmap.Len() != 0 && bitutil.BitIsNotSet(b.nullBitmap.Bytes(), i)
+}
+
+func (b *builder) SetNull(i int) {
+ if i < 0 || i >= b.length {
+ panic("arrow/array: index out of range")
+ }
+ bitutil.ClearBit(b.nullBitmap.Bytes(), i)
+}
+
+func (b *builder) init(capacity int) {
+ toAlloc := bitutil.CeilByte(capacity) / 8
+ b.nullBitmap = memory.NewResizableBuffer(b.mem)
+ b.nullBitmap.Resize(toAlloc)
+ b.capacity = capacity
+ memory.Set(b.nullBitmap.Buf(), 0)
+}
+
+func (b *builder) reset() {
+ if b.nullBitmap != nil {
+ b.nullBitmap.Release()
+ b.nullBitmap = nil
+ }
+
+ b.nulls = 0
+ b.length = 0
+ b.capacity = 0
+}
+
+func (b *builder) resize(newBits int, init func(int)) {
+ if b.nullBitmap == nil {
+ init(newBits)
+ return
+ }
+
+ newBytesN := bitutil.CeilByte(newBits) / 8
+ oldBytesN := b.nullBitmap.Len()
+ b.nullBitmap.Resize(newBytesN)
+ b.capacity = newBits
+ if oldBytesN < newBytesN {
+ // TODO(sgc): necessary?
+ memory.Set(b.nullBitmap.Buf()[oldBytesN:], 0)
+ }
+ if newBits < b.length {
+ b.length = newBits
+ b.nulls = newBits - bitutil.CountSetBits(b.nullBitmap.Buf(), 0, newBits)
+ }
+}
+
+func (b *builder) reserve(elements int, resize func(int)) {
+ if b.nullBitmap == nil {
+ b.nullBitmap = memory.NewResizableBuffer(b.mem)
+ }
+ if b.length+elements > b.capacity {
+ newCap := bitutil.NextPowerOf2(b.length + elements)
+ resize(newCap)
+ }
+}
+
+// unsafeAppendBoolsToBitmap appends the contents of valid to the validity bitmap.
+// As an optimization, if the valid slice is empty, the next length bits will be set to valid (not null).
+func (b *builder) unsafeAppendBoolsToBitmap(valid []bool, length int) {
+ if len(valid) == 0 {
+ b.unsafeSetValid(length)
+ return
+ }
+
+ byteOffset := b.length / 8
+ bitOffset := byte(b.length % 8)
+ nullBitmap := b.nullBitmap.Bytes()
+ bitSet := nullBitmap[byteOffset]
+
+ for _, v := range valid {
+ if bitOffset == 8 {
+ bitOffset = 0
+ nullBitmap[byteOffset] = bitSet
+ byteOffset++
+ bitSet = nullBitmap[byteOffset]
+ }
+
+ if v {
+ bitSet |= bitutil.BitMask[bitOffset]
+ } else {
+ bitSet &= bitutil.FlippedBitMask[bitOffset]
+ b.nulls++
+ }
+ bitOffset++
+ }
+
+ if bitOffset != 0 {
+ nullBitmap[byteOffset] = bitSet
+ }
+ b.length += len(valid)
+}
+
+// unsafeSetValid sets the next length bits to valid in the validity bitmap.
+func (b *builder) unsafeSetValid(length int) {
+ padToByte := min(8-(b.length%8), length)
+ if padToByte == 8 {
+ padToByte = 0
+ }
+ bits := b.nullBitmap.Bytes()
+ for i := b.length; i < b.length+padToByte; i++ {
+ bitutil.SetBit(bits, i)
+ }
+
+ start := (b.length + padToByte) / 8
+ fastLength := (length - padToByte) / 8
+ memory.Set(bits[start:start+fastLength], 0xff)
+
+ newLength := b.length + length
+ // trailing bytes
+ for i := b.length + padToByte + (fastLength * 8); i < newLength; i++ {
+ bitutil.SetBit(bits, i)
+ }
+
+ b.length = newLength
+}
+
+func (b *builder) UnsafeAppendBoolToBitmap(isValid bool) {
+ if isValid {
+ bitutil.SetBit(b.nullBitmap.Bytes(), b.length)
+ } else {
+ b.nulls++
+ }
+ b.length++
+}
+
+func NewBuilder(mem memory.Allocator, dtype arrow.DataType) Builder {
+ // FIXME(sbinet): use a type switch on dtype instead?
+ switch dtype.ID() {
+ case arrow.NULL:
+ return NewNullBuilder(mem)
+ case arrow.BOOL:
+ return NewBooleanBuilder(mem)
+ case arrow.UINT8:
+ return NewUint8Builder(mem)
+ case arrow.INT8:
+ return NewInt8Builder(mem)
+ case arrow.UINT16:
+ return NewUint16Builder(mem)
+ case arrow.INT16:
+ return NewInt16Builder(mem)
+ case arrow.UINT32:
+ return NewUint32Builder(mem)
+ case arrow.INT32:
+ return NewInt32Builder(mem)
+ case arrow.UINT64:
+ return NewUint64Builder(mem)
+ case arrow.INT64:
+ return NewInt64Builder(mem)
+ case arrow.FLOAT16:
+ return NewFloat16Builder(mem)
+ case arrow.FLOAT32:
+ return NewFloat32Builder(mem)
+ case arrow.FLOAT64:
+ return NewFloat64Builder(mem)
+ case arrow.STRING:
+ return NewStringBuilder(mem)
+ case arrow.LARGE_STRING:
+ return NewLargeStringBuilder(mem)
+ case arrow.BINARY:
+ return NewBinaryBuilder(mem, arrow.BinaryTypes.Binary)
+ case arrow.LARGE_BINARY:
+ return NewBinaryBuilder(mem, arrow.BinaryTypes.LargeBinary)
+ case arrow.FIXED_SIZE_BINARY:
+ typ := dtype.(*arrow.FixedSizeBinaryType)
+ return NewFixedSizeBinaryBuilder(mem, typ)
+ case arrow.DATE32:
+ return NewDate32Builder(mem)
+ case arrow.DATE64:
+ return NewDate64Builder(mem)
+ case arrow.TIMESTAMP:
+ typ := dtype.(*arrow.TimestampType)
+ return NewTimestampBuilder(mem, typ)
+ case arrow.TIME32:
+ typ := dtype.(*arrow.Time32Type)
+ return NewTime32Builder(mem, typ)
+ case arrow.TIME64:
+ typ := dtype.(*arrow.Time64Type)
+ return NewTime64Builder(mem, typ)
+ case arrow.INTERVAL_MONTHS:
+ return NewMonthIntervalBuilder(mem)
+ case arrow.INTERVAL_DAY_TIME:
+ return NewDayTimeIntervalBuilder(mem)
+ case arrow.INTERVAL_MONTH_DAY_NANO:
+ return NewMonthDayNanoIntervalBuilder(mem)
+ case arrow.DECIMAL128:
+ if typ, ok := dtype.(*arrow.Decimal128Type); ok {
+ return NewDecimal128Builder(mem, typ)
+ }
+ case arrow.DECIMAL256:
+ if typ, ok := dtype.(*arrow.Decimal256Type); ok {
+ return NewDecimal256Builder(mem, typ)
+ }
+ case arrow.LIST:
+ typ := dtype.(*arrow.ListType)
+ return NewListBuilderWithField(mem, typ.ElemField())
+ case arrow.STRUCT:
+ typ := dtype.(*arrow.StructType)
+ return NewStructBuilder(mem, typ)
+ case arrow.SPARSE_UNION:
+ typ := dtype.(*arrow.SparseUnionType)
+ return NewSparseUnionBuilder(mem, typ)
+ case arrow.DENSE_UNION:
+ typ := dtype.(*arrow.DenseUnionType)
+ return NewDenseUnionBuilder(mem, typ)
+ case arrow.DICTIONARY:
+ typ := dtype.(*arrow.DictionaryType)
+ return NewDictionaryBuilder(mem, typ)
+ case arrow.LARGE_LIST:
+ typ := dtype.(*arrow.LargeListType)
+ return NewLargeListBuilderWithField(mem, typ.ElemField())
+ case arrow.MAP:
+ typ := dtype.(*arrow.MapType)
+ return NewMapBuilderWithType(mem, typ)
+ case arrow.LIST_VIEW:
+ typ := dtype.(*arrow.ListViewType)
+ return NewListViewBuilderWithField(mem, typ.ElemField())
+ case arrow.LARGE_LIST_VIEW:
+ typ := dtype.(*arrow.LargeListViewType)
+ return NewLargeListViewBuilderWithField(mem, typ.ElemField())
+ case arrow.EXTENSION:
+ typ := dtype.(arrow.ExtensionType)
+ bldr := NewExtensionBuilder(mem, typ)
+ if custom, ok := typ.(ExtensionBuilderWrapper); ok {
+ return custom.NewBuilder(bldr)
+ }
+ return bldr
+ case arrow.FIXED_SIZE_LIST:
+ typ := dtype.(*arrow.FixedSizeListType)
+ return NewFixedSizeListBuilder(mem, typ.Len(), typ.Elem())
+ case arrow.DURATION:
+ typ := dtype.(*arrow.DurationType)
+ return NewDurationBuilder(mem, typ)
+ case arrow.RUN_END_ENCODED:
+ typ := dtype.(*arrow.RunEndEncodedType)
+ return NewRunEndEncodedBuilder(mem, typ.RunEnds(), typ.Encoded())
+ }
+ panic(fmt.Errorf("arrow/array: unsupported builder for %T", dtype))
+}
diff --git a/vendor/github.com/apache/arrow/go/v14/arrow/array/compare.go b/vendor/github.com/apache/arrow/go/v14/arrow/array/compare.go
new file mode 100644
index 000000000..e70716bee
--- /dev/null
+++ b/vendor/github.com/apache/arrow/go/v14/arrow/array/compare.go
@@ -0,0 +1,842 @@
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements. See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership. The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License. You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package array
+
+import (
+ "fmt"
+ "math"
+
+ "github.com/apache/arrow/go/v14/arrow"
+ "github.com/apache/arrow/go/v14/arrow/float16"
+ "github.com/apache/arrow/go/v14/internal/bitutils"
+)
+
+// RecordEqual reports whether the two provided records are equal.
+func RecordEqual(left, right arrow.Record) bool {
+ switch {
+ case left.NumCols() != right.NumCols():
+ return false
+ case left.NumRows() != right.NumRows():
+ return false
+ }
+
+ for i := range left.Columns() {
+ lc := left.Column(i)
+ rc := right.Column(i)
+ if !Equal(lc, rc) {
+ return false
+ }
+ }
+ return true
+}
+
+// RecordApproxEqual reports whether the two provided records are approximately equal.
+// For non-floating point columns, it is equivalent to RecordEqual.
+func RecordApproxEqual(left, right arrow.Record, opts ...EqualOption) bool {
+ switch {
+ case left.NumCols() != right.NumCols():
+ return false
+ case left.NumRows() != right.NumRows():
+ return false
+ }
+
+ opt := newEqualOption(opts...)
+
+ for i := range left.Columns() {
+ lc := left.Column(i)
+ rc := right.Column(i)
+ if !arrayApproxEqual(lc, rc, opt) {
+ return false
+ }
+ }
+ return true
+}
+
+// helper function to evaluate a function on two chunked object having possibly different
+// chunk layouts. the function passed in will be called for each corresponding slice of the
+// two chunked arrays and if the function returns false it will end the loop early.
+func chunkedBinaryApply(left, right *arrow.Chunked, fn func(left arrow.Array, lbeg, lend int64, right arrow.Array, rbeg, rend int64) bool) {
+ var (
+ pos int64
+ length int64 = int64(left.Len())
+ leftIdx, rightIdx int
+ leftPos, rightPos int64
+ )
+
+ for pos < length {
+ var cleft, cright arrow.Array
+ for {
+ cleft, cright = left.Chunk(leftIdx), right.Chunk(rightIdx)
+ if leftPos == int64(cleft.Len()) {
+ leftPos = 0
+ leftIdx++
+ continue
+ }
+ if rightPos == int64(cright.Len()) {
+ rightPos = 0
+ rightIdx++
+ continue
+ }
+ break
+ }
+
+ sz := int64(min(cleft.Len()-int(leftPos), cright.Len()-int(rightPos)))
+ pos += sz
+ if !fn(cleft, leftPos, leftPos+sz, cright, rightPos, rightPos+sz) {
+ return
+ }
+
+ leftPos += sz
+ rightPos += sz
+ }
+}
+
+// ChunkedEqual reports whether two chunked arrays are equal regardless of their chunkings
+func ChunkedEqual(left, right *arrow.Chunked) bool {
+ switch {
+ case left == right:
+ return true
+ case left.Len() != right.Len():
+ return false
+ case left.NullN() != right.NullN():
+ return false
+ case !arrow.TypeEqual(left.DataType(), right.DataType()):
+ return false
+ }
+
+ var isequal bool = true
+ chunkedBinaryApply(left, right, func(left arrow.Array, lbeg, lend int64, right arrow.Array, rbeg, rend int64) bool {
+ isequal = SliceEqual(left, lbeg, lend, right, rbeg, rend)
+ return isequal
+ })
+
+ return isequal
+}
+
+// ChunkedApproxEqual reports whether two chunked arrays are approximately equal regardless of their chunkings
+// for non-floating point arrays, this is equivalent to ChunkedEqual
+func ChunkedApproxEqual(left, right *arrow.Chunked, opts ...EqualOption) bool {
+ switch {
+ case left == right:
+ return true
+ case left.Len() != right.Len():
+ return false
+ case left.NullN() != right.NullN():
+ return false
+ case !arrow.TypeEqual(left.DataType(), right.DataType()):
+ return false
+ }
+
+ var isequal bool
+ chunkedBinaryApply(left, right, func(left arrow.Array, lbeg, lend int64, right arrow.Array, rbeg, rend int64) bool {
+ isequal = SliceApproxEqual(left, lbeg, lend, right, rbeg, rend, opts...)
+ return isequal
+ })
+
+ return isequal
+}
+
+// TableEqual returns if the two tables have the same data in the same schema
+func TableEqual(left, right arrow.Table) bool {
+ switch {
+ case left.NumCols() != right.NumCols():
+ return false
+ case left.NumRows() != right.NumRows():
+ return false
+ }
+
+ for i := 0; int64(i) < left.NumCols(); i++ {
+ lc := left.Column(i)
+ rc := right.Column(i)
+ if !lc.Field().Equal(rc.Field()) {
+ return false
+ }
+
+ if !ChunkedEqual(lc.Data(), rc.Data()) {
+ return false
+ }
+ }
+ return true
+}
+
+// TableEqual returns if the two tables have the approximately equal data in the same schema
+func TableApproxEqual(left, right arrow.Table, opts ...EqualOption) bool {
+ switch {
+ case left.NumCols() != right.NumCols():
+ return false
+ case left.NumRows() != right.NumRows():
+ return false
+ }
+
+ for i := 0; int64(i) < left.NumCols(); i++ {
+ lc := left.Column(i)
+ rc := right.Column(i)
+ if !lc.Field().Equal(rc.Field()) {
+ return false
+ }
+
+ if !ChunkedApproxEqual(lc.Data(), rc.Data(), opts...) {
+ return false
+ }
+ }
+ return true
+}
+
+// Equal reports whether the two provided arrays are equal.
+func Equal(left, right arrow.Array) bool {
+ switch {
+ case !baseArrayEqual(left, right):
+ return false
+ case left.Len() == 0:
+ return true
+ case left.NullN() == left.Len():
+ return true
+ }
+
+ // at this point, we know both arrays have same type, same length, same number of nulls
+ // and nulls at the same place.
+ // compare the values.
+
+ switch l := left.(type) {
+ case *Null:
+ return true
+ case *Boolean:
+ r := right.(*Boolean)
+ return arrayEqualBoolean(l, r)
+ case *FixedSizeBinary:
+ r := right.(*FixedSizeBinary)
+ return arrayEqualFixedSizeBinary(l, r)
+ case *Binary:
+ r := right.(*Binary)
+ return arrayEqualBinary(l, r)
+ case *String:
+ r := right.(*String)
+ return arrayEqualString(l, r)
+ case *LargeBinary:
+ r := right.(*LargeBinary)
+ return arrayEqualLargeBinary(l, r)
+ case *LargeString:
+ r := right.(*LargeString)
+ return arrayEqualLargeString(l, r)
+ case *Int8:
+ r := right.(*Int8)
+ return arrayEqualInt8(l, r)
+ case *Int16:
+ r := right.(*Int16)
+ return arrayEqualInt16(l, r)
+ case *Int32:
+ r := right.(*Int32)
+ return arrayEqualInt32(l, r)
+ case *Int64:
+ r := right.(*Int64)
+ return arrayEqualInt64(l, r)
+ case *Uint8:
+ r := right.(*Uint8)
+ return arrayEqualUint8(l, r)
+ case *Uint16:
+ r := right.(*Uint16)
+ return arrayEqualUint16(l, r)
+ case *Uint32:
+ r := right.(*Uint32)
+ return arrayEqualUint32(l, r)
+ case *Uint64:
+ r := right.(*Uint64)
+ return arrayEqualUint64(l, r)
+ case *Float16:
+ r := right.(*Float16)
+ return arrayEqualFloat16(l, r)
+ case *Float32:
+ r := right.(*Float32)
+ return arrayEqualFloat32(l, r)
+ case *Float64:
+ r := right.(*Float64)
+ return arrayEqualFloat64(l, r)
+ case *Decimal128:
+ r := right.(*Decimal128)
+ return arrayEqualDecimal128(l, r)
+ case *Decimal256:
+ r := right.(*Decimal256)
+ return arrayEqualDecimal256(l, r)
+ case *Date32:
+ r := right.(*Date32)
+ return arrayEqualDate32(l, r)
+ case *Date64:
+ r := right.(*Date64)
+ return arrayEqualDate64(l, r)
+ case *Time32:
+ r := right.(*Time32)
+ return arrayEqualTime32(l, r)
+ case *Time64:
+ r := right.(*Time64)
+ return arrayEqualTime64(l, r)
+ case *Timestamp:
+ r := right.(*Timestamp)
+ return arrayEqualTimestamp(l, r)
+ case *List:
+ r := right.(*List)
+ return arrayEqualList(l, r)
+ case *LargeList:
+ r := right.(*LargeList)
+ return arrayEqualLargeList(l, r)
+ case *ListView:
+ r := right.(*ListView)
+ return arrayEqualListView(l, r)
+ case *LargeListView:
+ r := right.(*LargeListView)
+ return arrayEqualLargeListView(l, r)
+ case *FixedSizeList:
+ r := right.(*FixedSizeList)
+ return arrayEqualFixedSizeList(l, r)
+ case *Struct:
+ r := right.(*Struct)
+ return arrayEqualStruct(l, r)
+ case *MonthInterval:
+ r := right.(*MonthInterval)
+ return arrayEqualMonthInterval(l, r)
+ case *DayTimeInterval:
+ r := right.(*DayTimeInterval)
+ return arrayEqualDayTimeInterval(l, r)
+ case *MonthDayNanoInterval:
+ r := right.(*MonthDayNanoInterval)
+ return arrayEqualMonthDayNanoInterval(l, r)
+ case *Duration:
+ r := right.(*Duration)
+ return arrayEqualDuration(l, r)
+ case *Map:
+ r := right.(*Map)
+ return arrayEqualMap(l, r)
+ case ExtensionArray:
+ r := right.(ExtensionArray)
+ return arrayEqualExtension(l, r)
+ case *Dictionary:
+ r := right.(*Dictionary)
+ return arrayEqualDict(l, r)
+ case *SparseUnion:
+ r := right.(*SparseUnion)
+ return arraySparseUnionEqual(l, r)
+ case *DenseUnion:
+ r := right.(*DenseUnion)
+ return arrayDenseUnionEqual(l, r)
+ case *RunEndEncoded:
+ r := right.(*RunEndEncoded)
+ return arrayRunEndEncodedEqual(l, r)
+ default:
+ panic(fmt.Errorf("arrow/array: unknown array type %T", l))
+ }
+}
+
+// SliceEqual reports whether slices left[lbeg:lend] and right[rbeg:rend] are equal.
+func SliceEqual(left arrow.Array, lbeg, lend int64, right arrow.Array, rbeg, rend int64) bool {
+ l := NewSlice(left, lbeg, lend)
+ defer l.Release()
+ r := NewSlice(right, rbeg, rend)
+ defer r.Release()
+
+ return Equal(l, r)
+}
+
+// SliceApproxEqual reports whether slices left[lbeg:lend] and right[rbeg:rend] are approximately equal.
+func SliceApproxEqual(left arrow.Array, lbeg, lend int64, right arrow.Array, rbeg, rend int64, opts ...EqualOption) bool {
+ opt := newEqualOption(opts...)
+ return sliceApproxEqual(left, lbeg, lend, right, rbeg, rend, opt)
+}
+
+func sliceApproxEqual(left arrow.Array, lbeg, lend int64, right arrow.Array, rbeg, rend int64, opt equalOption) bool {
+ l := NewSlice(left, lbeg, lend)
+ defer l.Release()
+ r := NewSlice(right, rbeg, rend)
+ defer r.Release()
+
+ return arrayApproxEqual(l, r, opt)
+}
+
+const defaultAbsoluteTolerance = 1e-5
+
+type equalOption struct {
+ atol float64 // absolute tolerance
+ nansEq bool // whether NaNs are considered equal.
+ unorderedMapKeys bool // whether maps are allowed to have different entries order
+}
+
+func (eq equalOption) f16(f1, f2 float16.Num) bool {
+ v1 := float64(f1.Float32())
+ v2 := float64(f2.Float32())
+ switch {
+ case eq.nansEq:
+ return math.Abs(v1-v2) <= eq.atol || (math.IsNaN(v1) && math.IsNaN(v2))
+ default:
+ return math.Abs(v1-v2) <= eq.atol
+ }
+}
+
+func (eq equalOption) f32(f1, f2 float32) bool {
+ v1 := float64(f1)
+ v2 := float64(f2)
+ switch {
+ case eq.nansEq:
+ return v1 == v2 || math.Abs(v1-v2) <= eq.atol || (math.IsNaN(v1) && math.IsNaN(v2))
+ default:
+ return v1 == v2 || math.Abs(v1-v2) <= eq.atol
+ }
+}
+
+func (eq equalOption) f64(v1, v2 float64) bool {
+ switch {
+ case eq.nansEq:
+ return v1 == v2 || math.Abs(v1-v2) <= eq.atol || (math.IsNaN(v1) && math.IsNaN(v2))
+ default:
+ return v1 == v2 || math.Abs(v1-v2) <= eq.atol
+ }
+}
+
+func newEqualOption(opts ...EqualOption) equalOption {
+ eq := equalOption{
+ atol: defaultAbsoluteTolerance,
+ nansEq: false,
+ }
+ for _, opt := range opts {
+ opt(&eq)
+ }
+
+ return eq
+}
+
+// EqualOption is a functional option type used to configure how Records and Arrays are compared.
+type EqualOption func(*equalOption)
+
+// WithNaNsEqual configures the comparison functions so that NaNs are considered equal.
+func WithNaNsEqual(v bool) EqualOption {
+ return func(o *equalOption) {
+ o.nansEq = v
+ }
+}
+
+// WithAbsTolerance configures the comparison functions so that 2 floating point values
+// v1 and v2 are considered equal if |v1-v2| <= atol.
+func WithAbsTolerance(atol float64) EqualOption {
+ return func(o *equalOption) {
+ o.atol = atol
+ }
+}
+
+// WithUnorderedMapKeys configures the comparison functions so that Map with different entries order are considered equal.
+func WithUnorderedMapKeys(v bool) EqualOption {
+ return func(o *equalOption) {
+ o.unorderedMapKeys = v
+ }
+}
+
+// ApproxEqual reports whether the two provided arrays are approximately equal.
+// For non-floating point arrays, it is equivalent to Equal.
+func ApproxEqual(left, right arrow.Array, opts ...EqualOption) bool {
+ opt := newEqualOption(opts...)
+ return arrayApproxEqual(left, right, opt)
+}
+
+func arrayApproxEqual(left, right arrow.Array, opt equalOption) bool {
+ switch {
+ case !baseArrayEqual(left, right):
+ return false
+ case left.Len() == 0:
+ return true
+ case left.NullN() == left.Len():
+ return true
+ }
+
+ // at this point, we know both arrays have same type, same length, same number of nulls
+ // and nulls at the same place.
+ // compare the values.
+
+ switch l := left.(type) {
+ case *Null:
+ return true
+ case *Boolean:
+ r := right.(*Boolean)
+ return arrayEqualBoolean(l, r)
+ case *FixedSizeBinary:
+ r := right.(*FixedSizeBinary)
+ return arrayEqualFixedSizeBinary(l, r)
+ case *Binary:
+ r := right.(*Binary)
+ return arrayEqualBinary(l, r)
+ case *String:
+ r := right.(*String)
+ return arrayEqualString(l, r)
+ case *LargeBinary:
+ r := right.(*LargeBinary)
+ return arrayEqualLargeBinary(l, r)
+ case *LargeString:
+ r := right.(*LargeString)
+ return arrayEqualLargeString(l, r)
+ case *Int8:
+ r := right.(*Int8)
+ return arrayEqualInt8(l, r)
+ case *Int16:
+ r := right.(*Int16)
+ return arrayEqualInt16(l, r)
+ case *Int32:
+ r := right.(*Int32)
+ return arrayEqualInt32(l, r)
+ case *Int64:
+ r := right.(*Int64)
+ return arrayEqualInt64(l, r)
+ case *Uint8:
+ r := right.(*Uint8)
+ return arrayEqualUint8(l, r)
+ case *Uint16:
+ r := right.(*Uint16)
+ return arrayEqualUint16(l, r)
+ case *Uint32:
+ r := right.(*Uint32)
+ return arrayEqualUint32(l, r)
+ case *Uint64:
+ r := right.(*Uint64)
+ return arrayEqualUint64(l, r)
+ case *Float16:
+ r := right.(*Float16)
+ return arrayApproxEqualFloat16(l, r, opt)
+ case *Float32:
+ r := right.(*Float32)
+ return arrayApproxEqualFloat32(l, r, opt)
+ case *Float64:
+ r := right.(*Float64)
+ return arrayApproxEqualFloat64(l, r, opt)
+ case *Decimal128:
+ r := right.(*Decimal128)
+ return arrayEqualDecimal128(l, r)
+ case *Decimal256:
+ r := right.(*Decimal256)
+ return arrayEqualDecimal256(l, r)
+ case *Date32:
+ r := right.(*Date32)
+ return arrayEqualDate32(l, r)
+ case *Date64:
+ r := right.(*Date64)
+ return arrayEqualDate64(l, r)
+ case *Time32:
+ r := right.(*Time32)
+ return arrayEqualTime32(l, r)
+ case *Time64:
+ r := right.(*Time64)
+ return arrayEqualTime64(l, r)
+ case *Timestamp:
+ r := right.(*Timestamp)
+ return arrayEqualTimestamp(l, r)
+ case *List:
+ r := right.(*List)
+ return arrayApproxEqualList(l, r, opt)
+ case *LargeList:
+ r := right.(*LargeList)
+ return arrayApproxEqualLargeList(l, r, opt)
+ case *ListView:
+ r := right.(*ListView)
+ return arrayApproxEqualListView(l, r, opt)
+ case *LargeListView:
+ r := right.(*LargeListView)
+ return arrayApproxEqualLargeListView(l, r, opt)
+ case *FixedSizeList:
+ r := right.(*FixedSizeList)
+ return arrayApproxEqualFixedSizeList(l, r, opt)
+ case *Struct:
+ r := right.(*Struct)
+ return arrayApproxEqualStruct(l, r, opt)
+ case *MonthInterval:
+ r := right.(*MonthInterval)
+ return arrayEqualMonthInterval(l, r)
+ case *DayTimeInterval:
+ r := right.(*DayTimeInterval)
+ return arrayEqualDayTimeInterval(l, r)
+ case *MonthDayNanoInterval:
+ r := right.(*MonthDayNanoInterval)
+ return arrayEqualMonthDayNanoInterval(l, r)
+ case *Duration:
+ r := right.(*Duration)
+ return arrayEqualDuration(l, r)
+ case *Map:
+ r := right.(*Map)
+ if opt.unorderedMapKeys {
+ return arrayApproxEqualMap(l, r, opt)
+ }
+ return arrayApproxEqualList(l.List, r.List, opt)
+ case *Dictionary:
+ r := right.(*Dictionary)
+ return arrayApproxEqualDict(l, r, opt)
+ case ExtensionArray:
+ r := right.(ExtensionArray)
+ return arrayApproxEqualExtension(l, r, opt)
+ case *SparseUnion:
+ r := right.(*SparseUnion)
+ return arraySparseUnionApproxEqual(l, r, opt)
+ case *DenseUnion:
+ r := right.(*DenseUnion)
+ return arrayDenseUnionApproxEqual(l, r, opt)
+ case *RunEndEncoded:
+ r := right.(*RunEndEncoded)
+ return arrayRunEndEncodedApproxEqual(l, r, opt)
+ default:
+ panic(fmt.Errorf("arrow/array: unknown array type %T", l))
+ }
+}
+
+func baseArrayEqual(left, right arrow.Array) bool {
+ switch {
+ case left.Len() != right.Len():
+ return false
+ case left.NullN() != right.NullN():
+ return false
+ case !arrow.TypeEqual(left.DataType(), right.DataType()): // We do not check for metadata as in the C++ implementation.
+ return false
+ case !validityBitmapEqual(left, right):
+ return false
+ }
+ return true
+}
+
+func validityBitmapEqual(left, right arrow.Array) bool {
+ // TODO(alexandreyc): make it faster by comparing byte slices of the validity bitmap?
+ n := left.Len()
+ if n != right.Len() {
+ return false
+ }
+ for i := 0; i < n; i++ {
+ if left.IsNull(i) != right.IsNull(i) {
+ return false
+ }
+ }
+ return true
+}
+
+func arrayApproxEqualFloat16(left, right *Float16, opt equalOption) bool {
+ for i := 0; i < left.Len(); i++ {
+ if left.IsNull(i) {
+ continue
+ }
+ if !opt.f16(left.Value(i), right.Value(i)) {
+ return false
+ }
+ }
+ return true
+}
+
+func arrayApproxEqualFloat32(left, right *Float32, opt equalOption) bool {
+ for i := 0; i < left.Len(); i++ {
+ if left.IsNull(i) {
+ continue
+ }
+ if !opt.f32(left.Value(i), right.Value(i)) {
+ return false
+ }
+ }
+ return true
+}
+
+func arrayApproxEqualFloat64(left, right *Float64, opt equalOption) bool {
+ for i := 0; i < left.Len(); i++ {
+ if left.IsNull(i) {
+ continue
+ }
+ if !opt.f64(left.Value(i), right.Value(i)) {
+ return false
+ }
+ }
+ return true
+}
+
+func arrayApproxEqualList(left, right *List, opt equalOption) bool {
+ for i := 0; i < left.Len(); i++ {
+ if left.IsNull(i) {
+ continue
+ }
+ o := func() bool {
+ l := left.newListValue(i)
+ defer l.Release()
+ r := right.newListValue(i)
+ defer r.Release()
+ return arrayApproxEqual(l, r, opt)
+ }()
+ if !o {
+ return false
+ }
+ }
+ return true
+}
+
+func arrayApproxEqualLargeList(left, right *LargeList, opt equalOption) bool {
+ for i := 0; i < left.Len(); i++ {
+ if left.IsNull(i) {
+ continue
+ }
+ o := func() bool {
+ l := left.newListValue(i)
+ defer l.Release()
+ r := right.newListValue(i)
+ defer r.Release()
+ return arrayApproxEqual(l, r, opt)
+ }()
+ if !o {
+ return false
+ }
+ }
+ return true
+}
+
+func arrayApproxEqualListView(left, right *ListView, opt equalOption) bool {
+ for i := 0; i < left.Len(); i++ {
+ if left.IsNull(i) {
+ continue
+ }
+ o := func() bool {
+ l := left.newListValue(i)
+ defer l.Release()
+ r := right.newListValue(i)
+ defer r.Release()
+ return arrayApproxEqual(l, r, opt)
+ }()
+ if !o {
+ return false
+ }
+ }
+ return true
+}
+
+func arrayApproxEqualLargeListView(left, right *LargeListView, opt equalOption) bool {
+ for i := 0; i < left.Len(); i++ {
+ if left.IsNull(i) {
+ continue
+ }
+ o := func() bool {
+ l := left.newListValue(i)
+ defer l.Release()
+ r := right.newListValue(i)
+ defer r.Release()
+ return arrayApproxEqual(l, r, opt)
+ }()
+ if !o {
+ return false
+ }
+ }
+ return true
+}
+
+func arrayApproxEqualFixedSizeList(left, right *FixedSizeList, opt equalOption) bool {
+ for i := 0; i < left.Len(); i++ {
+ if left.IsNull(i) {
+ continue
+ }
+ o := func() bool {
+ l := left.newListValue(i)
+ defer l.Release()
+ r := right.newListValue(i)
+ defer r.Release()
+ return arrayApproxEqual(l, r, opt)
+ }()
+ if !o {
+ return false
+ }
+ }
+ return true
+}
+
+func arrayApproxEqualStruct(left, right *Struct, opt equalOption) bool {
+ return bitutils.VisitSetBitRuns(
+ left.NullBitmapBytes(),
+ int64(left.Offset()), int64(left.Len()),
+ approxEqualStructRun(left, right, opt),
+ ) == nil
+}
+
+func approxEqualStructRun(left, right *Struct, opt equalOption) bitutils.VisitFn {
+ return func(pos int64, length int64) error {
+ for i := range left.fields {
+ if !sliceApproxEqual(left.fields[i], pos, pos+length, right.fields[i], pos, pos+length, opt) {
+ return arrow.ErrInvalid
+ }
+ }
+ return nil
+ }
+}
+
+// arrayApproxEqualMap doesn't care about the order of keys (in Go map traversal order is undefined)
+func arrayApproxEqualMap(left, right *Map, opt equalOption) bool {
+ for i := 0; i < left.Len(); i++ {
+ if left.IsNull(i) {
+ continue
+ }
+ if !arrayApproxEqualSingleMapEntry(left.newListValue(i).(*Struct), right.newListValue(i).(*Struct), opt) {
+ return false
+ }
+ }
+ return true
+}
+
+// arrayApproxEqualSingleMapEntry is a helper function that checks if a single entry pair is approx equal.
+// Basically, it doesn't care about key order.
+// structs passed will be released
+func arrayApproxEqualSingleMapEntry(left, right *Struct, opt equalOption) bool {
+ defer left.Release()
+ defer right.Release()
+
+ // we don't compare the validity bitmap, but we want other checks from baseArrayEqual
+ switch {
+ case left.Len() != right.Len():
+ return false
+ case left.NullN() != right.NullN():
+ return false
+ case !arrow.TypeEqual(left.DataType(), right.DataType()): // We do not check for metadata as in the C++ implementation.
+ return false
+ case left.NullN() == left.Len():
+ return true
+ }
+
+ used := make(map[int]bool, right.Len())
+ for i := 0; i < left.Len(); i++ {
+ if left.IsNull(i) {
+ continue
+ }
+
+ found := false
+ lBeg, lEnd := int64(i), int64(i+1)
+ for j := 0; j < right.Len(); j++ {
+ if used[j] {
+ continue
+ }
+ if right.IsNull(j) {
+ used[j] = true
+ continue
+ }
+
+ rBeg, rEnd := int64(j), int64(j+1)
+
+ // check keys (field 0)
+ if !sliceApproxEqual(left.Field(0), lBeg, lEnd, right.Field(0), rBeg, rEnd, opt) {
+ continue
+ }
+
+ // only now check the values
+ if sliceApproxEqual(left.Field(1), lBeg, lEnd, right.Field(1), rBeg, rEnd, opt) {
+ found = true
+ used[j] = true
+ break
+ }
+ }
+ if !found {
+ return false
+ }
+ }
+
+ return len(used) == right.Len()
+}
diff --git a/vendor/github.com/apache/arrow/go/v14/arrow/array/concat.go b/vendor/github.com/apache/arrow/go/v14/arrow/array/concat.go
new file mode 100644
index 000000000..9d815023c
--- /dev/null
+++ b/vendor/github.com/apache/arrow/go/v14/arrow/array/concat.go
@@ -0,0 +1,910 @@
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements. See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership. The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License. You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package array
+
+import (
+ "errors"
+ "fmt"
+ "math"
+ "math/bits"
+ "unsafe"
+
+ "github.com/apache/arrow/go/v14/arrow"
+ "github.com/apache/arrow/go/v14/arrow/bitutil"
+ "github.com/apache/arrow/go/v14/arrow/encoded"
+ "github.com/apache/arrow/go/v14/arrow/internal/debug"
+ "github.com/apache/arrow/go/v14/arrow/memory"
+ "github.com/apache/arrow/go/v14/internal/bitutils"
+ "github.com/apache/arrow/go/v14/internal/utils"
+)
+
+// Concatenate creates a new arrow.Array which is the concatenation of the
+// passed in arrays. Returns nil if an error is encountered.
+//
+// The passed in arrays still need to be released manually, and will not be
+// released by this function.
+func Concatenate(arrs []arrow.Array, mem memory.Allocator) (result arrow.Array, err error) {
+ if len(arrs) == 0 {
+ return nil, errors.New("array/concat: must pass at least one array")
+ }
+
+ // gather Data of inputs
+ data := make([]arrow.ArrayData, len(arrs))
+ for i, ar := range arrs {
+ if !arrow.TypeEqual(ar.DataType(), arrs[0].DataType()) {
+ return nil, fmt.Errorf("arrays to be concatenated must be identically typed, but %s and %s were encountered",
+ arrs[0].DataType(), ar.DataType())
+ }
+ data[i] = ar.Data()
+ }
+
+ out, err := concat(data, mem)
+ if err != nil {
+ return nil, err
+ }
+
+ defer out.Release()
+ return MakeFromData(out), nil
+}
+
+// simple struct to hold ranges
+type rng struct {
+ offset, len int
+}
+
+// simple bitmap struct to reference a specific slice of a bitmap where the range
+// offset and length are in bits
+type bitmap struct {
+ data []byte
+ rng rng
+}
+
+// gather up the bitmaps from the passed in data objects
+func gatherBitmaps(data []arrow.ArrayData, idx int) []bitmap {
+ out := make([]bitmap, len(data))
+ for i, d := range data {
+ if d.Buffers()[idx] != nil {
+ out[i].data = d.Buffers()[idx].Bytes()
+ }
+ out[i].rng.offset = d.Offset()
+ out[i].rng.len = d.Len()
+ }
+ return out
+}
+
+// gatherFixedBuffers gathers up the buffer objects of the given index, specifically
+// returning only the slices of the buffers which are relevant to the passed in arrays
+// in case they are themselves slices of other arrays. nil buffers are ignored and not
+// in the output slice.
+func gatherFixedBuffers(data []arrow.ArrayData, idx, byteWidth int) []*memory.Buffer {
+ out := make([]*memory.Buffer, 0, len(data))
+ for _, d := range data {
+ buf := d.Buffers()[idx]
+ if buf == nil {
+ continue
+ }
+
+ out = append(out, memory.NewBufferBytes(buf.Bytes()[d.Offset()*byteWidth:(d.Offset()+d.Len())*byteWidth]))
+ }
+ return out
+}
+
+// gatherBuffersFixedWidthType is like gatherFixedBuffers, but uses a datatype to determine the size
+// to use for determining the byte slice rather than a passed in bytewidth.
+func gatherBuffersFixedWidthType(data []arrow.ArrayData, idx int, fixed arrow.FixedWidthDataType) []*memory.Buffer {
+ return gatherFixedBuffers(data, idx, fixed.BitWidth()/8)
+}
+
+// gatherBufferRanges requires that len(ranges) == len(data) and returns a list of buffers
+// which represent the corresponding range of each buffer in the specified index of each
+// data object.
+func gatherBufferRanges(data []arrow.ArrayData, idx int, ranges []rng) []*memory.Buffer {
+ out := make([]*memory.Buffer, 0, len(data))
+ for i, d := range data {
+ buf := d.Buffers()[idx]
+ if buf == nil {
+ debug.Assert(ranges[i].len == 0, "misaligned buffer value ranges")
+ continue
+ }
+
+ out = append(out, memory.NewBufferBytes(buf.Bytes()[ranges[i].offset:ranges[i].offset+ranges[i].len]))
+ }
+ return out
+}
+
+// gatherChildren gathers the children data objects for child of index idx for all of the data objects.
+func gatherChildren(data []arrow.ArrayData, idx int) []arrow.ArrayData {
+ return gatherChildrenMultiplier(data, idx, 1)
+}
+
+// gatherChildrenMultiplier gathers the full data slice of the underlying values from the children data objects
+// such as the values data for a list array so that it can return a slice of the buffer for a given
+// index into the children.
+func gatherChildrenMultiplier(data []arrow.ArrayData, idx, multiplier int) []arrow.ArrayData {
+ out := make([]arrow.ArrayData, len(data))
+ for i, d := range data {
+ out[i] = NewSliceData(d.Children()[idx], int64(d.Offset()*multiplier), int64(d.Offset()+d.Len())*int64(multiplier))
+ }
+ return out
+}
+
+// gatherChildrenRanges returns a slice of Data objects which each represent slices of the given ranges from the
+// child in the specified index from each data object.
+func gatherChildrenRanges(data []arrow.ArrayData, idx int, ranges []rng) []arrow.ArrayData {
+ debug.Assert(len(data) == len(ranges), "mismatched children ranges for concat")
+ out := make([]arrow.ArrayData, len(data))
+ for i, d := range data {
+ out[i] = NewSliceData(d.Children()[idx], int64(ranges[i].offset), int64(ranges[i].offset+ranges[i].len))
+ }
+ return out
+}
+
+// creates a single contiguous buffer which contains the concatenation of all of the passed
+// in buffer objects.
+func concatBuffers(bufs []*memory.Buffer, mem memory.Allocator) *memory.Buffer {
+ outLen := 0
+ for _, b := range bufs {
+ outLen += b.Len()
+ }
+ out := memory.NewResizableBuffer(mem)
+ out.Resize(outLen)
+
+ data := out.Bytes()
+ for _, b := range bufs {
+ copy(data, b.Bytes())
+ data = data[b.Len():]
+ }
+ return out
+}
+
+func handle32BitOffsets(outLen int, buffers []*memory.Buffer, out *memory.Buffer) (*memory.Buffer, []rng, error) {
+ dst := arrow.Int32Traits.CastFromBytes(out.Bytes())
+ valuesRanges := make([]rng, len(buffers))
+ nextOffset := int32(0)
+ nextElem := int(0)
+ for i, b := range buffers {
+ if b.Len() == 0 {
+ valuesRanges[i].offset = 0
+ valuesRanges[i].len = 0
+ continue
+ }
+
+ // when we gather our buffers, we sliced off the last offset from the buffer
+ // so that we could count the lengths accurately
+ src := arrow.Int32Traits.CastFromBytes(b.Bytes())
+ valuesRanges[i].offset = int(src[0])
+ // expand our slice to see that final offset
+ expand := src[:len(src)+1]
+ // compute the length of this range by taking the final offset and subtracting where we started.
+ valuesRanges[i].len = int(expand[len(src)]) - valuesRanges[i].offset
+
+ if nextOffset > math.MaxInt32-int32(valuesRanges[i].len) {
+ return nil, nil, errors.New("offset overflow while concatenating arrays")
+ }
+
+ // adjust each offset by the difference between our last ending point and our starting point
+ adj := nextOffset - src[0]
+ for j, o := range src {
+ dst[nextElem+j] = adj + o
+ }
+
+ // the next index for an element in the output buffer
+ nextElem += b.Len() / arrow.Int32SizeBytes
+ // update our offset counter to be the total current length of our output
+ nextOffset += int32(valuesRanges[i].len)
+ }
+
+ // final offset should point to the end of the data
+ dst[outLen] = nextOffset
+ return out, valuesRanges, nil
+}
+
+func unifyDictionaries(mem memory.Allocator, data []arrow.ArrayData, dt *arrow.DictionaryType) ([]*memory.Buffer, arrow.Array, error) {
+ unifier, err := NewDictionaryUnifier(mem, dt.ValueType)
+ if err != nil {
+ return nil, nil, err
+ }
+ defer unifier.Release()
+
+ newLookup := make([]*memory.Buffer, len(data))
+ for i, d := range data {
+ dictArr := MakeFromData(d.Dictionary())
+ defer dictArr.Release()
+ newLookup[i], err = unifier.UnifyAndTranspose(dictArr)
+ if err != nil {
+ return nil, nil, err
+ }
+ }
+
+ unified, err := unifier.GetResultWithIndexType(dt.IndexType)
+ if err != nil {
+ for _, b := range newLookup {
+ b.Release()
+ }
+ return nil, nil, err
+ }
+ return newLookup, unified, nil
+}
+
+func concatDictIndices(mem memory.Allocator, data []arrow.ArrayData, idxType arrow.FixedWidthDataType, transpositions []*memory.Buffer) (out *memory.Buffer, err error) {
+ defer func() {
+ if err != nil && out != nil {
+ out.Release()
+ out = nil
+ }
+ }()
+
+ idxWidth := idxType.BitWidth() / 8
+ outLen := 0
+ for i, d := range data {
+ outLen += d.Len()
+ defer transpositions[i].Release()
+ }
+
+ out = memory.NewResizableBuffer(mem)
+ out.Resize(outLen * idxWidth)
+
+ outData := out.Bytes()
+ for i, d := range data {
+ transposeMap := arrow.Int32Traits.CastFromBytes(transpositions[i].Bytes())
+ src := d.Buffers()[1].Bytes()
+ if d.Buffers()[0] == nil {
+ if err = utils.TransposeIntsBuffers(idxType, idxType, src, outData, d.Offset(), 0, d.Len(), transposeMap); err != nil {
+ return
+ }
+ } else {
+ rdr := bitutils.NewBitRunReader(d.Buffers()[0].Bytes(), int64(d.Offset()), int64(d.Len()))
+ pos := 0
+ for {
+ run := rdr.NextRun()
+ if run.Len == 0 {
+ break
+ }
+
+ if run.Set {
+ err = utils.TransposeIntsBuffers(idxType, idxType, src, outData, d.Offset()+pos, pos, int(run.Len), transposeMap)
+ if err != nil {
+ return
+ }
+ } else {
+ memory.Set(outData[pos:pos+(int(run.Len)*idxWidth)], 0x00)
+ }
+
+ pos += int(run.Len)
+ }
+ }
+ outData = outData[d.Len()*idxWidth:]
+ }
+ return
+}
+
+func handle64BitOffsets(outLen int, buffers []*memory.Buffer, out *memory.Buffer) (*memory.Buffer, []rng, error) {
+ dst := arrow.Int64Traits.CastFromBytes(out.Bytes())
+ valuesRanges := make([]rng, len(buffers))
+ nextOffset := int64(0)
+ nextElem := int(0)
+ for i, b := range buffers {
+ if b.Len() == 0 {
+ valuesRanges[i].offset = 0
+ valuesRanges[i].len = 0
+ continue
+ }
+
+ // when we gather our buffers, we sliced off the last offset from the buffer
+ // so that we could count the lengths accurately
+ src := arrow.Int64Traits.CastFromBytes(b.Bytes())
+ valuesRanges[i].offset = int(src[0])
+ // expand our slice to see that final offset
+ expand := src[:len(src)+1]
+ // compute the length of this range by taking the final offset and subtracting where we started.
+ valuesRanges[i].len = int(expand[len(src)]) - valuesRanges[i].offset
+
+ if nextOffset > math.MaxInt64-int64(valuesRanges[i].len) {
+ return nil, nil, errors.New("offset overflow while concatenating arrays")
+ }
+
+ // adjust each offset by the difference between our last ending point and our starting point
+ adj := nextOffset - src[0]
+ for j, o := range src {
+ dst[nextElem+j] = adj + o
+ }
+
+ // the next index for an element in the output buffer
+ nextElem += b.Len() / arrow.Int64SizeBytes
+ // update our offset counter to be the total current length of our output
+ nextOffset += int64(valuesRanges[i].len)
+ }
+
+ // final offset should point to the end of the data
+ dst[outLen] = nextOffset
+ return out, valuesRanges, nil
+}
+
+// concatOffsets creates a single offset buffer which represents the concatenation of all of the
+// offsets buffers, adjusting the offsets appropriately to their new relative locations.
+//
+// It also returns the list of ranges that need to be fetched for the corresponding value buffers
+// to construct the final concatenated value buffer.
+func concatOffsets(buffers []*memory.Buffer, byteWidth int, mem memory.Allocator) (*memory.Buffer, []rng, error) {
+ outLen := 0
+ for _, b := range buffers {
+ outLen += b.Len() / byteWidth
+ }
+
+ out := memory.NewResizableBuffer(mem)
+ out.Resize(byteWidth * (outLen + 1))
+
+ switch byteWidth {
+ case arrow.Int64SizeBytes:
+ return handle64BitOffsets(outLen, buffers, out)
+ default:
+ return handle32BitOffsets(outLen, buffers, out)
+ }
+}
+
+func sumArraySizes(data []arrow.ArrayData) int {
+ outSize := 0
+ for _, arr := range data {
+ outSize += arr.Len()
+ }
+ return outSize
+}
+
+func getListViewBufferValues[T int32 | int64](data arrow.ArrayData, i int) []T {
+ bytes := data.Buffers()[i].Bytes()
+ base := (*T)(unsafe.Pointer(&bytes[0]))
+ ret := unsafe.Slice(base, data.Offset()+data.Len())
+ return ret[data.Offset():]
+}
+
+func putListViewOffsets32(in arrow.ArrayData, displacement int32, out *memory.Buffer, outOff int) {
+ debug.Assert(in.DataType().ID() == arrow.LIST_VIEW, "putListViewOffsets32: expected LIST_VIEW data")
+ inOff, inLen := in.Offset(), in.Len()
+ if inLen == 0 {
+ return
+ }
+ bitmap := in.Buffers()[0]
+ srcOffsets := getListViewBufferValues[int32](in, 1)
+ srcSizes := getListViewBufferValues[int32](in, 2)
+ isValidAndNonEmpty := func(i int) bool {
+ return (bitmap == nil || bitutil.BitIsSet(bitmap.Bytes(), inOff+i)) && srcSizes[i] > 0
+ }
+
+ dstOffsets := arrow.Int32Traits.CastFromBytes(out.Bytes())
+ for i, offset := range srcOffsets {
+ if isValidAndNonEmpty(i) {
+ // This is guaranteed by RangeOfValuesUsed returning the smallest offset
+ // of valid and non-empty list-views.
+ debug.Assert(offset+displacement >= 0, "putListViewOffsets32: offset underflow while concatenating arrays")
+ dstOffsets[outOff+i] = offset + displacement
+ } else {
+ dstOffsets[outOff+i] = 0
+ }
+ }
+}
+
+func putListViewOffsets64(in arrow.ArrayData, displacement int64, out *memory.Buffer, outOff int) {
+ debug.Assert(in.DataType().ID() == arrow.LARGE_LIST_VIEW, "putListViewOffsets64: expected LARGE_LIST_VIEW data")
+ inOff, inLen := in.Offset(), in.Len()
+ if inLen == 0 {
+ return
+ }
+ bitmap := in.Buffers()[0]
+ srcOffsets := getListViewBufferValues[int64](in, 1)
+ srcSizes := getListViewBufferValues[int64](in, 2)
+ isValidAndNonEmpty := func(i int) bool {
+ return (bitmap == nil || bitutil.BitIsSet(bitmap.Bytes(), inOff+i)) && srcSizes[i] > 0
+ }
+
+ dstOffsets := arrow.Int64Traits.CastFromBytes(out.Bytes())
+ for i, offset := range srcOffsets {
+ if isValidAndNonEmpty(i) {
+ // This is guaranteed by RangeOfValuesUsed returning the smallest offset
+ // of valid and non-empty list-views.
+ debug.Assert(offset+displacement >= 0, "putListViewOffsets64: offset underflow while concatenating arrays")
+ dstOffsets[outOff+i] = offset + displacement
+ } else {
+ dstOffsets[outOff+i] = 0
+ }
+ }
+}
+
+// Concatenate buffers holding list-view offsets into a single buffer of offsets
+//
+// valueRanges contains the relevant ranges of values in the child array actually
+// referenced to by the views. Most commonly, these ranges will start from 0,
+// but when that is not the case, we need to adjust the displacement of offsets.
+// The concatenated child array does not contain values from the beginning
+// if they are not referenced to by any view.
+func concatListViewOffsets(data []arrow.ArrayData, byteWidth int, valueRanges []rng, mem memory.Allocator) (*memory.Buffer, error) {
+ outSize := sumArraySizes(data)
+ if byteWidth == 4 && outSize > math.MaxInt32 {
+ return nil, fmt.Errorf("%w: offset overflow while concatenating arrays", arrow.ErrInvalid)
+ }
+ out := memory.NewResizableBuffer(mem)
+ out.Resize(byteWidth * outSize)
+
+ numChildValues, elementsLength := 0, 0
+ for i, arr := range data {
+ displacement := numChildValues - valueRanges[i].offset
+ if byteWidth == 4 {
+ putListViewOffsets32(arr, int32(displacement), out, elementsLength)
+ } else {
+ putListViewOffsets64(arr, int64(displacement), out, elementsLength)
+ }
+ elementsLength += arr.Len()
+ numChildValues += valueRanges[i].len
+ }
+ debug.Assert(elementsLength == outSize, "implementation error")
+
+ return out, nil
+}
+
+func zeroNullListViewSizes[T int32 | int64](data arrow.ArrayData) {
+ if data.Len() == 0 || data.Buffers()[0] == nil {
+ return
+ }
+ validity := data.Buffers()[0].Bytes()
+ sizes := getListViewBufferValues[T](data, 2)
+
+ for i := 0; i < data.Len(); i++ {
+ if !bitutil.BitIsSet(validity, data.Offset()+i) {
+ sizes[i] = 0
+ }
+ }
+}
+
+func concatListView(data []arrow.ArrayData, offsetType arrow.FixedWidthDataType, out *Data, mem memory.Allocator) (err error) {
+ // Calculate the ranges of values that each list-view array uses
+ valueRanges := make([]rng, len(data))
+ for i, input := range data {
+ offset, len := rangeOfValuesUsed(input)
+ valueRanges[i].offset = offset
+ valueRanges[i].len = len
+ }
+
+ // Gather the children ranges of each input array
+ childData := gatherChildrenRanges(data, 0, valueRanges)
+ for _, c := range childData {
+ defer c.Release()
+ }
+
+ // Concatenate the values
+ values, err := concat(childData, mem)
+ if err != nil {
+ return err
+ }
+
+ // Concatenate the offsets
+ offsetBuffer, err := concatListViewOffsets(data, offsetType.Bytes(), valueRanges, mem)
+ if err != nil {
+ return err
+ }
+
+ // Concatenate the sizes
+ sizeBuffers := gatherBuffersFixedWidthType(data, 2, offsetType)
+ sizeBuffer := concatBuffers(sizeBuffers, mem)
+
+ out.childData = []arrow.ArrayData{values}
+ out.buffers[1] = offsetBuffer
+ out.buffers[2] = sizeBuffer
+
+ // To make sure the sizes don't reference values that are not in the new
+ // concatenated values array, we zero the sizes of null list-view values.
+ if offsetType.ID() == arrow.INT32 {
+ zeroNullListViewSizes[int32](out)
+ } else {
+ zeroNullListViewSizes[int64](out)
+ }
+
+ return nil
+}
+
+// concat is the implementation for actually performing the concatenation of the arrow.ArrayData
+// objects that we can call internally for nested types.
+func concat(data []arrow.ArrayData, mem memory.Allocator) (arr arrow.ArrayData, err error) {
+ out := &Data{refCount: 1, dtype: data[0].DataType(), nulls: 0}
+ defer func() {
+ if pErr := recover(); pErr != nil {
+ switch e := pErr.(type) {
+ case error:
+ err = fmt.Errorf("arrow/concat: %w", e)
+ default:
+ err = fmt.Errorf("arrow/concat: %v", pErr)
+ }
+ }
+ if err != nil {
+ out.Release()
+ }
+ }()
+ for _, d := range data {
+ out.length += d.Len()
+ if out.nulls == UnknownNullCount || d.NullN() == UnknownNullCount {
+ out.nulls = UnknownNullCount
+ continue
+ }
+ out.nulls += d.NullN()
+ }
+
+ out.buffers = make([]*memory.Buffer, len(data[0].Buffers()))
+ if out.nulls != 0 && out.dtype.ID() != arrow.NULL {
+ bm, err := concatBitmaps(gatherBitmaps(data, 0), mem)
+ if err != nil {
+ return nil, err
+ }
+ out.buffers[0] = bm
+ }
+
+ dt := out.dtype
+ if dt.ID() == arrow.EXTENSION {
+ dt = dt.(arrow.ExtensionType).StorageType()
+ }
+
+ switch dt := dt.(type) {
+ case *arrow.NullType:
+ case *arrow.BooleanType:
+ bm, err := concatBitmaps(gatherBitmaps(data, 1), mem)
+ if err != nil {
+ return nil, err
+ }
+ out.buffers[1] = bm
+ case *arrow.DictionaryType:
+ idxType := dt.IndexType.(arrow.FixedWidthDataType)
+ // two cases: all dictionaries are the same or we need to unify them
+ dictsSame := true
+ dict0 := MakeFromData(data[0].Dictionary())
+ defer dict0.Release()
+ for _, d := range data {
+ dict := MakeFromData(d.Dictionary())
+ if !Equal(dict0, dict) {
+ dict.Release()
+ dictsSame = false
+ break
+ }
+ dict.Release()
+ }
+
+ indexBuffers := gatherBuffersFixedWidthType(data, 1, idxType)
+ if dictsSame {
+ out.dictionary = dict0.Data().(*Data)
+ out.dictionary.Retain()
+ out.buffers[1] = concatBuffers(indexBuffers, mem)
+ break
+ }
+
+ indexLookup, unifiedDict, err := unifyDictionaries(mem, data, dt)
+ if err != nil {
+ return nil, err
+ }
+ defer unifiedDict.Release()
+ out.dictionary = unifiedDict.Data().(*Data)
+ out.dictionary.Retain()
+
+ out.buffers[1], err = concatDictIndices(mem, data, idxType, indexLookup)
+ if err != nil {
+ return nil, err
+ }
+ case arrow.FixedWidthDataType:
+ out.buffers[1] = concatBuffers(gatherBuffersFixedWidthType(data, 1, dt), mem)
+ case arrow.BinaryDataType:
+ offsetWidth := dt.Layout().Buffers[1].ByteWidth
+ offsetBuffer, valueRanges, err := concatOffsets(gatherFixedBuffers(data, 1, offsetWidth), offsetWidth, mem)
+ if err != nil {
+ return nil, err
+ }
+ out.buffers[1] = offsetBuffer
+ out.buffers[2] = concatBuffers(gatherBufferRanges(data, 2, valueRanges), mem)
+ case *arrow.ListType:
+ offsetWidth := dt.Layout().Buffers[1].ByteWidth
+ offsetBuffer, valueRanges, err := concatOffsets(gatherFixedBuffers(data, 1, offsetWidth), offsetWidth, mem)
+ if err != nil {
+ return nil, err
+ }
+ childData := gatherChildrenRanges(data, 0, valueRanges)
+ for _, c := range childData {
+ defer c.Release()
+ }
+
+ out.buffers[1] = offsetBuffer
+ out.childData = make([]arrow.ArrayData, 1)
+ out.childData[0], err = concat(childData, mem)
+ if err != nil {
+ return nil, err
+ }
+ case *arrow.LargeListType:
+ offsetWidth := dt.Layout().Buffers[1].ByteWidth
+ offsetBuffer, valueRanges, err := concatOffsets(gatherFixedBuffers(data, 1, offsetWidth), offsetWidth, mem)
+ if err != nil {
+ return nil, err
+ }
+ childData := gatherChildrenRanges(data, 0, valueRanges)
+ for _, c := range childData {
+ defer c.Release()
+ }
+
+ out.buffers[1] = offsetBuffer
+ out.childData = make([]arrow.ArrayData, 1)
+ out.childData[0], err = concat(childData, mem)
+ if err != nil {
+ return nil, err
+ }
+ case *arrow.ListViewType:
+ offsetType := arrow.PrimitiveTypes.Int32.(arrow.FixedWidthDataType)
+ err := concatListView(data, offsetType, out, mem)
+ if err != nil {
+ return nil, err
+ }
+ case *arrow.LargeListViewType:
+ offsetType := arrow.PrimitiveTypes.Int64.(arrow.FixedWidthDataType)
+ err := concatListView(data, offsetType, out, mem)
+ if err != nil {
+ return nil, err
+ }
+ case *arrow.FixedSizeListType:
+ childData := gatherChildrenMultiplier(data, 0, int(dt.Len()))
+ for _, c := range childData {
+ defer c.Release()
+ }
+
+ children, err := concat(childData, mem)
+ if err != nil {
+ return nil, err
+ }
+ out.childData = []arrow.ArrayData{children}
+ case *arrow.StructType:
+ out.childData = make([]arrow.ArrayData, len(dt.Fields()))
+ for i := range dt.Fields() {
+ children := gatherChildren(data, i)
+ for _, c := range children {
+ defer c.Release()
+ }
+
+ childData, err := concat(children, mem)
+ if err != nil {
+ return nil, err
+ }
+ out.childData[i] = childData
+ }
+ case *arrow.MapType:
+ offsetWidth := dt.Layout().Buffers[1].ByteWidth
+ offsetBuffer, valueRanges, err := concatOffsets(gatherFixedBuffers(data, 1, offsetWidth), offsetWidth, mem)
+ if err != nil {
+ return nil, err
+ }
+ childData := gatherChildrenRanges(data, 0, valueRanges)
+ for _, c := range childData {
+ defer c.Release()
+ }
+
+ out.buffers[1] = offsetBuffer
+ out.childData = make([]arrow.ArrayData, 1)
+ out.childData[0], err = concat(childData, mem)
+ if err != nil {
+ return nil, err
+ }
+ case *arrow.RunEndEncodedType:
+ physicalLength, overflow := int(0), false
+ // we can't use gatherChildren because the Offset and Len of
+ // data doesn't correspond to the physical length or offset
+ runs := make([]arrow.ArrayData, len(data))
+ values := make([]arrow.ArrayData, len(data))
+ for i, d := range data {
+ plen := encoded.GetPhysicalLength(d)
+ off := encoded.FindPhysicalOffset(d)
+
+ runs[i] = NewSliceData(d.Children()[0], int64(off), int64(off+plen))
+ defer runs[i].Release()
+ values[i] = NewSliceData(d.Children()[1], int64(off), int64(off+plen))
+ defer values[i].Release()
+
+ physicalLength, overflow = addOvf(physicalLength, plen)
+ if overflow {
+ return nil, fmt.Errorf("%w: run end encoded array length must fit into a 32-bit signed integer",
+ arrow.ErrInvalid)
+ }
+ }
+
+ runEndsByteWidth := runs[0].DataType().(arrow.FixedWidthDataType).Bytes()
+ runEndsBuffers := gatherFixedBuffers(runs, 1, runEndsByteWidth)
+ outRunEndsLen := physicalLength * runEndsByteWidth
+ outRunEndsBuf := memory.NewResizableBuffer(mem)
+ outRunEndsBuf.Resize(outRunEndsLen)
+ defer outRunEndsBuf.Release()
+
+ if err := updateRunEnds(runEndsByteWidth, data, runEndsBuffers, outRunEndsBuf); err != nil {
+ return nil, err
+ }
+
+ out.childData = make([]arrow.ArrayData, 2)
+ out.childData[0] = NewData(data[0].Children()[0].DataType(), int(physicalLength),
+ []*memory.Buffer{nil, outRunEndsBuf}, nil, 0, 0)
+
+ var err error
+ out.childData[1], err = concat(values, mem)
+ if err != nil {
+ out.childData[0].Release()
+ return nil, err
+ }
+
+ default:
+ return nil, fmt.Errorf("concatenate not implemented for type %s", dt)
+ }
+
+ return out, nil
+}
+
+// check overflow in the addition, taken from bits.Add but adapted for signed integers
+// rather than unsigned integers. bits.UintSize will be either 32 or 64 based on
+// whether our architecture is 32 bit or 64. The operation is the same for both cases,
+// the only difference is how much we need to shift by 30 for 32 bit and 62 for 64 bit.
+// Thus, bits.UintSize - 2 is how much we shift right by to check if we had an overflow
+// in the signed addition.
+//
+// First return is the result of the sum, the second return is true if there was an overflow
+func addOvf(x, y int) (int, bool) {
+ sum := x + y
+ return sum, ((x&y)|((x|y)&^sum))>>(bits.UintSize-2) == 1
+}
+
+// concatenate bitmaps together and return a buffer with the combined bitmaps
+func concatBitmaps(bitmaps []bitmap, mem memory.Allocator) (*memory.Buffer, error) {
+ var (
+ outlen int
+ overflow bool
+ )
+
+ for _, bm := range bitmaps {
+ if outlen, overflow = addOvf(outlen, bm.rng.len); overflow {
+ return nil, errors.New("length overflow when concatenating arrays")
+ }
+ }
+
+ out := memory.NewResizableBuffer(mem)
+ out.Resize(int(bitutil.BytesForBits(int64(outlen))))
+ dst := out.Bytes()
+
+ offset := 0
+ for _, bm := range bitmaps {
+ if bm.data == nil { // if the bitmap is nil, that implies that the value is true for all elements
+ bitutil.SetBitsTo(out.Bytes(), int64(offset), int64(bm.rng.len), true)
+ } else {
+ bitutil.CopyBitmap(bm.data, bm.rng.offset, bm.rng.len, dst, offset)
+ }
+ offset += bm.rng.len
+ }
+ return out, nil
+}
+
+func updateRunEnds(byteWidth int, inputData []arrow.ArrayData, inputBuffers []*memory.Buffer, outputBuffer *memory.Buffer) error {
+ switch byteWidth {
+ case 2:
+ out := arrow.Int16Traits.CastFromBytes(outputBuffer.Bytes())
+ return updateRunsInt16(inputData, inputBuffers, out)
+ case 4:
+ out := arrow.Int32Traits.CastFromBytes(outputBuffer.Bytes())
+ return updateRunsInt32(inputData, inputBuffers, out)
+ case 8:
+ out := arrow.Int64Traits.CastFromBytes(outputBuffer.Bytes())
+ return updateRunsInt64(inputData, inputBuffers, out)
+ }
+ return fmt.Errorf("%w: invalid dataType for RLE runEnds", arrow.ErrInvalid)
+}
+
+func updateRunsInt16(inputData []arrow.ArrayData, inputBuffers []*memory.Buffer, output []int16) error {
+ // for now we will not attempt to optimize by checking if we
+ // can fold the end and beginning of each array we're concatenating
+ // into a single run
+ pos := 0
+ for i, buf := range inputBuffers {
+ if buf.Len() == 0 {
+ continue
+ }
+ src := arrow.Int16Traits.CastFromBytes(buf.Bytes())
+ if pos == 0 {
+ pos += copy(output, src)
+ continue
+ }
+
+ lastEnd := output[pos-1]
+ // we can check the last runEnd in the src and add it to the
+ // last value that we're adjusting them all by to see if we
+ // are going to overflow
+ if int64(lastEnd)+int64(int(src[len(src)-1])-inputData[i].Offset()) > math.MaxInt16 {
+ return fmt.Errorf("%w: overflow in run-length-encoded run ends concat", arrow.ErrInvalid)
+ }
+
+ // adjust all of the run ends by first normalizing them (e - data[i].offset)
+ // then adding the previous value we ended on. Since the offset
+ // is a logical length offset it should be accurate to just subtract
+ // it from each value.
+ for j, e := range src {
+ output[pos+j] = lastEnd + int16(int(e)-inputData[i].Offset())
+ }
+ pos += len(src)
+ }
+ return nil
+}
+
+func updateRunsInt32(inputData []arrow.ArrayData, inputBuffers []*memory.Buffer, output []int32) error {
+ // for now we will not attempt to optimize by checking if we
+ // can fold the end and beginning of each array we're concatenating
+ // into a single run
+ pos := 0
+ for i, buf := range inputBuffers {
+ if buf.Len() == 0 {
+ continue
+ }
+ src := arrow.Int32Traits.CastFromBytes(buf.Bytes())
+ if pos == 0 {
+ pos += copy(output, src)
+ continue
+ }
+
+ lastEnd := output[pos-1]
+ // we can check the last runEnd in the src and add it to the
+ // last value that we're adjusting them all by to see if we
+ // are going to overflow
+ if int64(lastEnd)+int64(int(src[len(src)-1])-inputData[i].Offset()) > math.MaxInt32 {
+ return fmt.Errorf("%w: overflow in run-length-encoded run ends concat", arrow.ErrInvalid)
+ }
+
+ // adjust all of the run ends by first normalizing them (e - data[i].offset)
+ // then adding the previous value we ended on. Since the offset
+ // is a logical length offset it should be accurate to just subtract
+ // it from each value.
+ for j, e := range src {
+ output[pos+j] = lastEnd + int32(int(e)-inputData[i].Offset())
+ }
+ pos += len(src)
+ }
+ return nil
+}
+
+func updateRunsInt64(inputData []arrow.ArrayData, inputBuffers []*memory.Buffer, output []int64) error {
+ // for now we will not attempt to optimize by checking if we
+ // can fold the end and beginning of each array we're concatenating
+ // into a single run
+ pos := 0
+ for i, buf := range inputBuffers {
+ if buf.Len() == 0 {
+ continue
+ }
+ src := arrow.Int64Traits.CastFromBytes(buf.Bytes())
+ if pos == 0 {
+ pos += copy(output, src)
+ continue
+ }
+
+ lastEnd := output[pos-1]
+ // we can check the last runEnd in the src and add it to the
+ // last value that we're adjusting them all by to see if we
+ // are going to overflow
+ if uint64(lastEnd)+uint64(int(src[len(src)-1])-inputData[i].Offset()) > math.MaxInt64 {
+ return fmt.Errorf("%w: overflow in run-length-encoded run ends concat", arrow.ErrInvalid)
+ }
+
+ // adjust all of the run ends by first normalizing them (e - data[i].offset)
+ // then adding the previous value we ended on. Since the offset
+ // is a logical length offset it should be accurate to just subtract
+ // it from each value.
+ for j, e := range src {
+ output[pos+j] = lastEnd + e - int64(inputData[i].Offset())
+ }
+ pos += len(src)
+ }
+ return nil
+}
diff --git a/vendor/github.com/apache/arrow/go/v14/arrow/array/data.go b/vendor/github.com/apache/arrow/go/v14/arrow/array/data.go
new file mode 100644
index 000000000..49df06fb1
--- /dev/null
+++ b/vendor/github.com/apache/arrow/go/v14/arrow/array/data.go
@@ -0,0 +1,250 @@
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements. See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership. The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License. You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package array
+
+import (
+ "hash/maphash"
+ "math/bits"
+ "sync/atomic"
+ "unsafe"
+
+ "github.com/apache/arrow/go/v14/arrow"
+ "github.com/apache/arrow/go/v14/arrow/internal/debug"
+ "github.com/apache/arrow/go/v14/arrow/memory"
+)
+
+// Data represents the memory and metadata of an Arrow array.
+type Data struct {
+ refCount int64
+ dtype arrow.DataType
+ nulls int
+ offset int
+ length int
+
+ // for dictionary arrays: buffers will be the null validity bitmap and the indexes that reference
+ // values in the dictionary member. childData would be empty in a dictionary array
+ buffers []*memory.Buffer // TODO(sgc): should this be an interface?
+ childData []arrow.ArrayData // TODO(sgc): managed by ListArray, StructArray and UnionArray types
+ dictionary *Data // only populated for dictionary arrays
+}
+
+// NewData creates a new Data.
+func NewData(dtype arrow.DataType, length int, buffers []*memory.Buffer, childData []arrow.ArrayData, nulls, offset int) *Data {
+ for _, b := range buffers {
+ if b != nil {
+ b.Retain()
+ }
+ }
+
+ for _, child := range childData {
+ if child != nil {
+ child.Retain()
+ }
+ }
+
+ return &Data{
+ refCount: 1,
+ dtype: dtype,
+ nulls: nulls,
+ length: length,
+ offset: offset,
+ buffers: buffers,
+ childData: childData,
+ }
+}
+
+// NewDataWithDictionary creates a new data object, but also sets the provided dictionary into the data if it's not nil
+func NewDataWithDictionary(dtype arrow.DataType, length int, buffers []*memory.Buffer, nulls, offset int, dict *Data) *Data {
+ data := NewData(dtype, length, buffers, nil, nulls, offset)
+ if dict != nil {
+ dict.Retain()
+ }
+ data.dictionary = dict
+ return data
+}
+
+func (d *Data) Copy() *Data {
+ // don't pass the slices directly, otherwise it retains the connection
+ // we need to make new slices and populate them with the same pointers
+ bufs := make([]*memory.Buffer, len(d.buffers))
+ copy(bufs, d.buffers)
+ children := make([]arrow.ArrayData, len(d.childData))
+ copy(children, d.childData)
+
+ data := NewData(d.dtype, d.length, bufs, children, d.nulls, d.offset)
+ data.SetDictionary(d.dictionary)
+ return data
+}
+
+// Reset sets the Data for re-use.
+func (d *Data) Reset(dtype arrow.DataType, length int, buffers []*memory.Buffer, childData []arrow.ArrayData, nulls, offset int) {
+ // Retain new buffers before releasing existing buffers in-case they're the same ones to prevent accidental premature
+ // release.
+ for _, b := range buffers {
+ if b != nil {
+ b.Retain()
+ }
+ }
+ for _, b := range d.buffers {
+ if b != nil {
+ b.Release()
+ }
+ }
+ d.buffers = buffers
+
+ // Retain new children data before releasing existing children data in-case they're the same ones to prevent accidental
+ // premature release.
+ for _, d := range childData {
+ if d != nil {
+ d.Retain()
+ }
+ }
+ for _, d := range d.childData {
+ if d != nil {
+ d.Release()
+ }
+ }
+ d.childData = childData
+
+ d.dtype = dtype
+ d.length = length
+ d.nulls = nulls
+ d.offset = offset
+}
+
+// Retain increases the reference count by 1.
+// Retain may be called simultaneously from multiple goroutines.
+func (d *Data) Retain() {
+ atomic.AddInt64(&d.refCount, 1)
+}
+
+// Release decreases the reference count by 1.
+// When the reference count goes to zero, the memory is freed.
+// Release may be called simultaneously from multiple goroutines.
+func (d *Data) Release() {
+ debug.Assert(atomic.LoadInt64(&d.refCount) > 0, "too many releases")
+
+ if atomic.AddInt64(&d.refCount, -1) == 0 {
+ for _, b := range d.buffers {
+ if b != nil {
+ b.Release()
+ }
+ }
+
+ for _, b := range d.childData {
+ b.Release()
+ }
+
+ if d.dictionary != nil {
+ d.dictionary.Release()
+ }
+ d.dictionary, d.buffers, d.childData = nil, nil, nil
+ }
+}
+
+// DataType returns the DataType of the data.
+func (d *Data) DataType() arrow.DataType { return d.dtype }
+
+func (d *Data) SetNullN(n int) { d.nulls = n }
+
+// NullN returns the number of nulls.
+func (d *Data) NullN() int { return d.nulls }
+
+// Len returns the length.
+func (d *Data) Len() int { return d.length }
+
+// Offset returns the offset.
+func (d *Data) Offset() int { return d.offset }
+
+// Buffers returns the buffers.
+func (d *Data) Buffers() []*memory.Buffer { return d.buffers }
+
+func (d *Data) Children() []arrow.ArrayData { return d.childData }
+
+// Dictionary returns the ArrayData object for the dictionary member, or nil
+func (d *Data) Dictionary() arrow.ArrayData { return d.dictionary }
+
+// SetDictionary allows replacing the dictionary for this particular Data object
+func (d *Data) SetDictionary(dict arrow.ArrayData) {
+ if d.dictionary != nil {
+ d.dictionary.Release()
+ d.dictionary = nil
+ }
+ if dict.(*Data) != nil {
+ dict.Retain()
+ d.dictionary = dict.(*Data)
+ }
+}
+
+// NewSliceData returns a new slice that shares backing data with the input.
+// The returned Data slice starts at i and extends j-i elements, such as:
+// slice := data[i:j]
+// The returned value must be Release'd after use.
+//
+// NewSliceData panics if the slice is outside the valid range of the input Data.
+// NewSliceData panics if j < i.
+func NewSliceData(data arrow.ArrayData, i, j int64) arrow.ArrayData {
+ if j > int64(data.Len()) || i > j || data.Offset()+int(i) > data.Offset()+data.Len() {
+ panic("arrow/array: index out of range")
+ }
+
+ for _, b := range data.Buffers() {
+ if b != nil {
+ b.Retain()
+ }
+ }
+
+ for _, child := range data.Children() {
+ if child != nil {
+ child.Retain()
+ }
+ }
+
+ if data.(*Data).dictionary != nil {
+ data.(*Data).dictionary.Retain()
+ }
+
+ o := &Data{
+ refCount: 1,
+ dtype: data.DataType(),
+ nulls: UnknownNullCount,
+ length: int(j - i),
+ offset: data.Offset() + int(i),
+ buffers: data.Buffers(),
+ childData: data.Children(),
+ dictionary: data.(*Data).dictionary,
+ }
+
+ if data.NullN() == 0 {
+ o.nulls = 0
+ }
+
+ return o
+}
+
+func Hash(h *maphash.Hash, data arrow.ArrayData) {
+ a := data.(*Data)
+
+ h.Write((*[bits.UintSize / 8]byte)(unsafe.Pointer(&a.length))[:])
+ h.Write((*[bits.UintSize / 8]byte)(unsafe.Pointer(&a.length))[:])
+ if len(a.buffers) > 0 && a.buffers[0] != nil {
+ h.Write(a.buffers[0].Bytes())
+ }
+ for _, c := range a.childData {
+ Hash(h, c)
+ }
+}
diff --git a/vendor/github.com/apache/arrow/go/v14/arrow/array/decimal128.go b/vendor/github.com/apache/arrow/go/v14/arrow/array/decimal128.go
new file mode 100644
index 000000000..331753168
--- /dev/null
+++ b/vendor/github.com/apache/arrow/go/v14/arrow/array/decimal128.go
@@ -0,0 +1,365 @@
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements. See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership. The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License. You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package array
+
+import (
+ "bytes"
+ "fmt"
+ "math"
+ "math/big"
+ "reflect"
+ "strings"
+ "sync/atomic"
+
+ "github.com/apache/arrow/go/v14/arrow"
+ "github.com/apache/arrow/go/v14/arrow/bitutil"
+ "github.com/apache/arrow/go/v14/arrow/decimal128"
+ "github.com/apache/arrow/go/v14/arrow/internal/debug"
+ "github.com/apache/arrow/go/v14/arrow/memory"
+ "github.com/apache/arrow/go/v14/internal/json"
+)
+
+// A type which represents an immutable sequence of 128-bit decimal values.
+type Decimal128 struct {
+ array
+
+ values []decimal128.Num
+}
+
+func NewDecimal128Data(data arrow.ArrayData) *Decimal128 {
+ a := &Decimal128{}
+ a.refCount = 1
+ a.setData(data.(*Data))
+ return a
+}
+
+func (a *Decimal128) Value(i int) decimal128.Num { return a.values[i] }
+
+func (a *Decimal128) ValueStr(i int) string {
+ if a.IsNull(i) {
+ return NullValueStr
+ }
+ return a.GetOneForMarshal(i).(string)
+}
+
+func (a *Decimal128) Values() []decimal128.Num { return a.values }
+
+func (a *Decimal128) String() string {
+ o := new(strings.Builder)
+ o.WriteString("[")
+ for i := 0; i < a.Len(); i++ {
+ if i > 0 {
+ fmt.Fprintf(o, " ")
+ }
+ switch {
+ case a.IsNull(i):
+ o.WriteString(NullValueStr)
+ default:
+ fmt.Fprintf(o, "%v", a.Value(i))
+ }
+ }
+ o.WriteString("]")
+ return o.String()
+}
+
+func (a *Decimal128) setData(data *Data) {
+ a.array.setData(data)
+ vals := data.buffers[1]
+ if vals != nil {
+ a.values = arrow.Decimal128Traits.CastFromBytes(vals.Bytes())
+ beg := a.array.data.offset
+ end := beg + a.array.data.length
+ a.values = a.values[beg:end]
+ }
+}
+
+func (a *Decimal128) GetOneForMarshal(i int) interface{} {
+ if a.IsNull(i) {
+ return nil
+ }
+
+ typ := a.DataType().(*arrow.Decimal128Type)
+ f := (&big.Float{}).SetInt(a.Value(i).BigInt())
+ f.Quo(f, big.NewFloat(math.Pow10(int(typ.Scale))))
+ return f.Text('g', int(typ.Precision))
+}
+
+// ["1.23", ]
+func (a *Decimal128) MarshalJSON() ([]byte, error) {
+ vals := make([]interface{}, a.Len())
+ for i := 0; i < a.Len(); i++ {
+ vals[i] = a.GetOneForMarshal(i)
+ }
+ return json.Marshal(vals)
+}
+
+func arrayEqualDecimal128(left, right *Decimal128) bool {
+ for i := 0; i < left.Len(); i++ {
+ if left.IsNull(i) {
+ continue
+ }
+ if left.Value(i) != right.Value(i) {
+ return false
+ }
+ }
+ return true
+}
+
+type Decimal128Builder struct {
+ builder
+
+ dtype *arrow.Decimal128Type
+ data *memory.Buffer
+ rawData []decimal128.Num
+}
+
+func NewDecimal128Builder(mem memory.Allocator, dtype *arrow.Decimal128Type) *Decimal128Builder {
+ return &Decimal128Builder{
+ builder: builder{refCount: 1, mem: mem},
+ dtype: dtype,
+ }
+}
+
+func (b *Decimal128Builder) Type() arrow.DataType { return b.dtype }
+
+// Release decreases the reference count by 1.
+// When the reference count goes to zero, the memory is freed.
+func (b *Decimal128Builder) Release() {
+ debug.Assert(atomic.LoadInt64(&b.refCount) > 0, "too many releases")
+
+ if atomic.AddInt64(&b.refCount, -1) == 0 {
+ if b.nullBitmap != nil {
+ b.nullBitmap.Release()
+ b.nullBitmap = nil
+ }
+ if b.data != nil {
+ b.data.Release()
+ b.data = nil
+ b.rawData = nil
+ }
+ }
+}
+
+func (b *Decimal128Builder) Append(v decimal128.Num) {
+ b.Reserve(1)
+ b.UnsafeAppend(v)
+}
+
+func (b *Decimal128Builder) UnsafeAppend(v decimal128.Num) {
+ bitutil.SetBit(b.nullBitmap.Bytes(), b.length)
+ b.rawData[b.length] = v
+ b.length++
+}
+
+func (b *Decimal128Builder) AppendNull() {
+ b.Reserve(1)
+ b.UnsafeAppendBoolToBitmap(false)
+}
+
+func (b *Decimal128Builder) AppendNulls(n int) {
+ for i := 0; i < n; i++ {
+ b.AppendNull()
+ }
+}
+
+func (b *Decimal128Builder) AppendEmptyValue() {
+ b.Append(decimal128.Num{})
+}
+
+func (b *Decimal128Builder) AppendEmptyValues(n int) {
+ for i := 0; i < n; i++ {
+ b.AppendEmptyValue()
+ }
+}
+
+func (b *Decimal128Builder) UnsafeAppendBoolToBitmap(isValid bool) {
+ if isValid {
+ bitutil.SetBit(b.nullBitmap.Bytes(), b.length)
+ } else {
+ b.nulls++
+ }
+ b.length++
+}
+
+// AppendValues will append the values in the v slice. The valid slice determines which values
+// in v are valid (not null). The valid slice must either be empty or be equal in length to v. If empty,
+// all values in v are appended and considered valid.
+func (b *Decimal128Builder) AppendValues(v []decimal128.Num, valid []bool) {
+ if len(v) != len(valid) && len(valid) != 0 {
+ panic("len(v) != len(valid) && len(valid) != 0")
+ }
+
+ if len(v) == 0 {
+ return
+ }
+
+ b.Reserve(len(v))
+ if len(v) > 0 {
+ arrow.Decimal128Traits.Copy(b.rawData[b.length:], v)
+ }
+ b.builder.unsafeAppendBoolsToBitmap(valid, len(v))
+}
+
+func (b *Decimal128Builder) init(capacity int) {
+ b.builder.init(capacity)
+
+ b.data = memory.NewResizableBuffer(b.mem)
+ bytesN := arrow.Decimal128Traits.BytesRequired(capacity)
+ b.data.Resize(bytesN)
+ b.rawData = arrow.Decimal128Traits.CastFromBytes(b.data.Bytes())
+}
+
+// Reserve ensures there is enough space for appending n elements
+// by checking the capacity and calling Resize if necessary.
+func (b *Decimal128Builder) Reserve(n int) {
+ b.builder.reserve(n, b.Resize)
+}
+
+// Resize adjusts the space allocated by b to n elements. If n is greater than b.Cap(),
+// additional memory will be allocated. If n is smaller, the allocated memory may reduced.
+func (b *Decimal128Builder) Resize(n int) {
+ nBuilder := n
+ if n < minBuilderCapacity {
+ n = minBuilderCapacity
+ }
+
+ if b.capacity == 0 {
+ b.init(n)
+ } else {
+ b.builder.resize(nBuilder, b.init)
+ b.data.Resize(arrow.Decimal128Traits.BytesRequired(n))
+ b.rawData = arrow.Decimal128Traits.CastFromBytes(b.data.Bytes())
+ }
+}
+
+// NewArray creates a Decimal128 array from the memory buffers used by the builder and resets the Decimal128Builder
+// so it can be used to build a new array.
+func (b *Decimal128Builder) NewArray() arrow.Array {
+ return b.NewDecimal128Array()
+}
+
+// NewDecimal128Array creates a Decimal128 array from the memory buffers used by the builder and resets the Decimal128Builder
+// so it can be used to build a new array.
+func (b *Decimal128Builder) NewDecimal128Array() (a *Decimal128) {
+ data := b.newData()
+ a = NewDecimal128Data(data)
+ data.Release()
+ return
+}
+
+func (b *Decimal128Builder) newData() (data *Data) {
+ bytesRequired := arrow.Decimal128Traits.BytesRequired(b.length)
+ if bytesRequired > 0 && bytesRequired < b.data.Len() {
+ // trim buffers
+ b.data.Resize(bytesRequired)
+ }
+ data = NewData(b.dtype, b.length, []*memory.Buffer{b.nullBitmap, b.data}, nil, b.nulls, 0)
+ b.reset()
+
+ if b.data != nil {
+ b.data.Release()
+ b.data = nil
+ b.rawData = nil
+ }
+
+ return
+}
+
+func (b *Decimal128Builder) AppendValueFromString(s string) error {
+ if s == NullValueStr {
+ b.AppendNull()
+ return nil
+ }
+ val, err := decimal128.FromString(s, b.dtype.Precision, b.dtype.Scale)
+ if err != nil {
+ b.AppendNull()
+ return err
+ }
+ b.Append(val)
+ return nil
+}
+
+func (b *Decimal128Builder) UnmarshalOne(dec *json.Decoder) error {
+ t, err := dec.Token()
+ if err != nil {
+ return err
+ }
+
+ switch v := t.(type) {
+ case float64:
+ val, err := decimal128.FromFloat64(v, b.dtype.Precision, b.dtype.Scale)
+ if err != nil {
+ return err
+ }
+ b.Append(val)
+ case string:
+ val, err := decimal128.FromString(v, b.dtype.Precision, b.dtype.Scale)
+ if err != nil {
+ return err
+ }
+ b.Append(val)
+ case json.Number:
+ val, err := decimal128.FromString(v.String(), b.dtype.Precision, b.dtype.Scale)
+ if err != nil {
+ return err
+ }
+ b.Append(val)
+ case nil:
+ b.AppendNull()
+ return nil
+ default:
+ return &json.UnmarshalTypeError{
+ Value: fmt.Sprint(t),
+ Type: reflect.TypeOf(decimal128.Num{}),
+ Offset: dec.InputOffset(),
+ }
+ }
+
+ return nil
+}
+
+func (b *Decimal128Builder) Unmarshal(dec *json.Decoder) error {
+ for dec.More() {
+ if err := b.UnmarshalOne(dec); err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+// UnmarshalJSON will add the unmarshalled values to this builder.
+//
+// If the values are strings, they will get parsed with big.ParseFloat using
+// a rounding mode of big.ToNearestAway currently.
+func (b *Decimal128Builder) UnmarshalJSON(data []byte) error {
+ dec := json.NewDecoder(bytes.NewReader(data))
+ t, err := dec.Token()
+ if err != nil {
+ return err
+ }
+
+ if delim, ok := t.(json.Delim); !ok || delim != '[' {
+ return fmt.Errorf("decimal128 builder must unpack from json array, found %s", delim)
+ }
+
+ return b.Unmarshal(dec)
+}
+
+var (
+ _ arrow.Array = (*Decimal128)(nil)
+ _ Builder = (*Decimal128Builder)(nil)
+)
diff --git a/vendor/github.com/apache/arrow/go/v14/arrow/array/decimal256.go b/vendor/github.com/apache/arrow/go/v14/arrow/array/decimal256.go
new file mode 100644
index 000000000..d63544f78
--- /dev/null
+++ b/vendor/github.com/apache/arrow/go/v14/arrow/array/decimal256.go
@@ -0,0 +1,364 @@
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements. See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership. The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License. You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package array
+
+import (
+ "bytes"
+ "fmt"
+ "math"
+ "math/big"
+ "reflect"
+ "strings"
+ "sync/atomic"
+
+ "github.com/apache/arrow/go/v14/arrow"
+ "github.com/apache/arrow/go/v14/arrow/bitutil"
+ "github.com/apache/arrow/go/v14/arrow/decimal256"
+ "github.com/apache/arrow/go/v14/arrow/internal/debug"
+ "github.com/apache/arrow/go/v14/arrow/memory"
+ "github.com/apache/arrow/go/v14/internal/json"
+)
+
+// Decimal256 is a type that represents an immutable sequence of 256-bit decimal values.
+type Decimal256 struct {
+ array
+
+ values []decimal256.Num
+}
+
+func NewDecimal256Data(data arrow.ArrayData) *Decimal256 {
+ a := &Decimal256{}
+ a.refCount = 1
+ a.setData(data.(*Data))
+ return a
+}
+
+func (a *Decimal256) Value(i int) decimal256.Num { return a.values[i] }
+
+func (a *Decimal256) ValueStr(i int) string {
+ if a.IsNull(i) {
+ return NullValueStr
+ }
+ return a.GetOneForMarshal(i).(string)
+}
+
+func (a *Decimal256) Values() []decimal256.Num { return a.values }
+
+func (a *Decimal256) String() string {
+ o := new(strings.Builder)
+ o.WriteString("[")
+ for i := 0; i < a.Len(); i++ {
+ if i > 0 {
+ fmt.Fprintf(o, " ")
+ }
+ switch {
+ case a.IsNull(i):
+ o.WriteString(NullValueStr)
+ default:
+ fmt.Fprintf(o, "%v", a.Value(i))
+ }
+ }
+ o.WriteString("]")
+ return o.String()
+}
+
+func (a *Decimal256) setData(data *Data) {
+ a.array.setData(data)
+ vals := data.buffers[1]
+ if vals != nil {
+ a.values = arrow.Decimal256Traits.CastFromBytes(vals.Bytes())
+ beg := a.array.data.offset
+ end := beg + a.array.data.length
+ a.values = a.values[beg:end]
+ }
+}
+
+func (a *Decimal256) GetOneForMarshal(i int) interface{} {
+ if a.IsNull(i) {
+ return nil
+ }
+
+ typ := a.DataType().(*arrow.Decimal256Type)
+ f := (&big.Float{}).SetInt(a.Value(i).BigInt())
+ f.Quo(f, big.NewFloat(math.Pow10(int(typ.Scale))))
+ return f.Text('g', int(typ.Precision))
+}
+
+func (a *Decimal256) MarshalJSON() ([]byte, error) {
+ vals := make([]interface{}, a.Len())
+ for i := 0; i < a.Len(); i++ {
+ vals[i] = a.GetOneForMarshal(i)
+ }
+ return json.Marshal(vals)
+}
+
+func arrayEqualDecimal256(left, right *Decimal256) bool {
+ for i := 0; i < left.Len(); i++ {
+ if left.IsNull(i) {
+ continue
+ }
+ if left.Value(i) != right.Value(i) {
+ return false
+ }
+ }
+ return true
+}
+
+type Decimal256Builder struct {
+ builder
+
+ dtype *arrow.Decimal256Type
+ data *memory.Buffer
+ rawData []decimal256.Num
+}
+
+func NewDecimal256Builder(mem memory.Allocator, dtype *arrow.Decimal256Type) *Decimal256Builder {
+ return &Decimal256Builder{
+ builder: builder{refCount: 1, mem: mem},
+ dtype: dtype,
+ }
+}
+
+// Release decreases the reference count by 1.
+// When the reference count goes to zero, the memory is freed.
+func (b *Decimal256Builder) Release() {
+ debug.Assert(atomic.LoadInt64(&b.refCount) > 0, "too many releases")
+
+ if atomic.AddInt64(&b.refCount, -1) == 0 {
+ if b.nullBitmap != nil {
+ b.nullBitmap.Release()
+ b.nullBitmap = nil
+ }
+ if b.data != nil {
+ b.data.Release()
+ b.data = nil
+ b.rawData = nil
+ }
+ }
+}
+
+func (b *Decimal256Builder) Append(v decimal256.Num) {
+ b.Reserve(1)
+ b.UnsafeAppend(v)
+}
+
+func (b *Decimal256Builder) UnsafeAppend(v decimal256.Num) {
+ bitutil.SetBit(b.nullBitmap.Bytes(), b.length)
+ b.rawData[b.length] = v
+ b.length++
+}
+
+func (b *Decimal256Builder) AppendNull() {
+ b.Reserve(1)
+ b.UnsafeAppendBoolToBitmap(false)
+}
+
+func (b *Decimal256Builder) AppendNulls(n int) {
+ for i := 0; i < n; i++ {
+ b.AppendNull()
+ }
+}
+
+func (b *Decimal256Builder) AppendEmptyValue() {
+ b.Append(decimal256.Num{})
+}
+
+func (b *Decimal256Builder) AppendEmptyValues(n int) {
+ for i := 0; i < n; i++ {
+ b.AppendEmptyValue()
+ }
+}
+
+func (b *Decimal256Builder) Type() arrow.DataType { return b.dtype }
+
+func (b *Decimal256Builder) UnsafeAppendBoolToBitmap(isValid bool) {
+ if isValid {
+ bitutil.SetBit(b.nullBitmap.Bytes(), b.length)
+ } else {
+ b.nulls++
+ }
+ b.length++
+}
+
+// AppendValues will append the values in the v slice. The valid slice determines which values
+// in v are valid (not null). The valid slice must either be empty or be equal in length to v. If empty,
+// all values in v are appended and considered valid.
+func (b *Decimal256Builder) AppendValues(v []decimal256.Num, valid []bool) {
+ if len(v) != len(valid) && len(valid) != 0 {
+ panic("arrow/array: len(v) != len(valid) && len(valid) != 0")
+ }
+
+ if len(v) == 0 {
+ return
+ }
+
+ b.Reserve(len(v))
+ if len(v) > 0 {
+ arrow.Decimal256Traits.Copy(b.rawData[b.length:], v)
+ }
+ b.builder.unsafeAppendBoolsToBitmap(valid, len(v))
+}
+
+func (b *Decimal256Builder) init(capacity int) {
+ b.builder.init(capacity)
+
+ b.data = memory.NewResizableBuffer(b.mem)
+ bytesN := arrow.Decimal256Traits.BytesRequired(capacity)
+ b.data.Resize(bytesN)
+ b.rawData = arrow.Decimal256Traits.CastFromBytes(b.data.Bytes())
+}
+
+// Reserve ensures there is enough space for appending n elements
+// by checking the capacity and calling Resize if necessary.
+func (b *Decimal256Builder) Reserve(n int) {
+ b.builder.reserve(n, b.Resize)
+}
+
+// Resize adjusts the space allocated by b to n elements. If n is greater than b.Cap(),
+// additional memory will be allocated. If n is smaller, the allocated memory may reduced.
+func (b *Decimal256Builder) Resize(n int) {
+ nBuilder := n
+ if n < minBuilderCapacity {
+ n = minBuilderCapacity
+ }
+
+ if b.capacity == 0 {
+ b.init(n)
+ } else {
+ b.builder.resize(nBuilder, b.init)
+ b.data.Resize(arrow.Decimal256Traits.BytesRequired(n))
+ b.rawData = arrow.Decimal256Traits.CastFromBytes(b.data.Bytes())
+ }
+}
+
+// NewArray creates a Decimal256 array from the memory buffers used by the builder and resets the Decimal256Builder
+// so it can be used to build a new array.
+func (b *Decimal256Builder) NewArray() arrow.Array {
+ return b.NewDecimal256Array()
+}
+
+// NewDecimal256Array creates a Decimal256 array from the memory buffers used by the builder and resets the Decimal256Builder
+// so it can be used to build a new array.
+func (b *Decimal256Builder) NewDecimal256Array() (a *Decimal256) {
+ data := b.newData()
+ a = NewDecimal256Data(data)
+ data.Release()
+ return
+}
+
+func (b *Decimal256Builder) newData() (data *Data) {
+ bytesRequired := arrow.Decimal256Traits.BytesRequired(b.length)
+ if bytesRequired > 0 && bytesRequired < b.data.Len() {
+ // trim buffers
+ b.data.Resize(bytesRequired)
+ }
+ data = NewData(b.dtype, b.length, []*memory.Buffer{b.nullBitmap, b.data}, nil, b.nulls, 0)
+ b.reset()
+
+ if b.data != nil {
+ b.data.Release()
+ b.data = nil
+ b.rawData = nil
+ }
+
+ return
+}
+
+func (b *Decimal256Builder) AppendValueFromString(s string) error {
+ if s == NullValueStr {
+ b.AppendNull()
+ return nil
+ }
+ val, err := decimal256.FromString(s, b.dtype.Precision, b.dtype.Scale)
+ if err != nil {
+ b.AppendNull()
+ return err
+ }
+ b.Append(val)
+ return nil
+}
+
+func (b *Decimal256Builder) UnmarshalOne(dec *json.Decoder) error {
+ t, err := dec.Token()
+ if err != nil {
+ return err
+ }
+
+ switch v := t.(type) {
+ case float64:
+ val, err := decimal256.FromFloat64(v, b.dtype.Precision, b.dtype.Scale)
+ if err != nil {
+ return err
+ }
+ b.Append(val)
+ case string:
+ out, err := decimal256.FromString(v, b.dtype.Precision, b.dtype.Scale)
+ if err != nil {
+ return err
+ }
+ b.Append(out)
+ case json.Number:
+ out, err := decimal256.FromString(v.String(), b.dtype.Precision, b.dtype.Scale)
+ if err != nil {
+ return err
+ }
+ b.Append(out)
+ case nil:
+ b.AppendNull()
+ return nil
+ default:
+ return &json.UnmarshalTypeError{
+ Value: fmt.Sprint(t),
+ Type: reflect.TypeOf(decimal256.Num{}),
+ Offset: dec.InputOffset(),
+ }
+ }
+
+ return nil
+}
+
+func (b *Decimal256Builder) Unmarshal(dec *json.Decoder) error {
+ for dec.More() {
+ if err := b.UnmarshalOne(dec); err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+// UnmarshalJSON will add the unmarshalled values to this builder.
+//
+// If the values are strings, they will get parsed with big.ParseFloat using
+// a rounding mode of big.ToNearestAway currently.
+func (b *Decimal256Builder) UnmarshalJSON(data []byte) error {
+ dec := json.NewDecoder(bytes.NewReader(data))
+ t, err := dec.Token()
+ if err != nil {
+ return err
+ }
+
+ if delim, ok := t.(json.Delim); !ok || delim != '[' {
+ return fmt.Errorf("arrow/array: decimal256 builder must unpack from json array, found %s", delim)
+ }
+
+ return b.Unmarshal(dec)
+}
+
+var (
+ _ arrow.Array = (*Decimal256)(nil)
+ _ Builder = (*Decimal256Builder)(nil)
+)
diff --git a/vendor/github.com/apache/arrow/go/v14/arrow/array/dictionary.go b/vendor/github.com/apache/arrow/go/v14/arrow/array/dictionary.go
new file mode 100644
index 000000000..d0a1c4dc9
--- /dev/null
+++ b/vendor/github.com/apache/arrow/go/v14/arrow/array/dictionary.go
@@ -0,0 +1,1953 @@
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements. See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership. The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License. You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package array
+
+import (
+ "bytes"
+ "errors"
+ "fmt"
+ "math"
+ "math/bits"
+ "sync/atomic"
+ "unsafe"
+
+ "github.com/apache/arrow/go/v14/arrow"
+ "github.com/apache/arrow/go/v14/arrow/bitutil"
+ "github.com/apache/arrow/go/v14/arrow/decimal128"
+ "github.com/apache/arrow/go/v14/arrow/decimal256"
+ "github.com/apache/arrow/go/v14/arrow/float16"
+ "github.com/apache/arrow/go/v14/arrow/internal/debug"
+ "github.com/apache/arrow/go/v14/arrow/memory"
+ "github.com/apache/arrow/go/v14/internal/hashing"
+ "github.com/apache/arrow/go/v14/internal/json"
+ "github.com/apache/arrow/go/v14/internal/utils"
+)
+
+// Dictionary represents the type for dictionary-encoded data with a data
+// dependent dictionary.
+//
+// A dictionary array contains an array of non-negative integers (the "dictionary"
+// indices") along with a data type containing a "dictionary" corresponding to
+// the distinct values represented in the data.
+//
+// For example, the array:
+//
+// ["foo", "bar", "foo", "bar", "foo", "bar"]
+//
+// with dictionary ["bar", "foo"], would have the representation of:
+//
+// indices: [1, 0, 1, 0, 1, 0]
+// dictionary: ["bar", "foo"]
+//
+// The indices in principle may be any integer type.
+type Dictionary struct {
+ array
+
+ indices arrow.Array
+ dict arrow.Array
+}
+
+// NewDictionaryArray constructs a dictionary array with the provided indices
+// and dictionary using the given type.
+func NewDictionaryArray(typ arrow.DataType, indices, dict arrow.Array) *Dictionary {
+ a := &Dictionary{}
+ a.array.refCount = 1
+ dictdata := NewData(typ, indices.Len(), indices.Data().Buffers(), indices.Data().Children(), indices.NullN(), indices.Data().Offset())
+ dictdata.dictionary = dict.Data().(*Data)
+ dict.Data().Retain()
+
+ defer dictdata.Release()
+ a.setData(dictdata)
+ return a
+}
+
+// checkIndexBounds returns an error if any value in the provided integer
+// arraydata is >= the passed upperlimit or < 0. otherwise nil
+func checkIndexBounds(indices *Data, upperlimit uint64) error {
+ if indices.length == 0 {
+ return nil
+ }
+
+ var maxval uint64
+ switch indices.dtype.ID() {
+ case arrow.UINT8:
+ maxval = math.MaxUint8
+ case arrow.UINT16:
+ maxval = math.MaxUint16
+ case arrow.UINT32:
+ maxval = math.MaxUint32
+ case arrow.UINT64:
+ maxval = math.MaxUint64
+ }
+ // for unsigned integers, if the values array is larger than the maximum
+ // index value (especially for UINT8/UINT16), then there's no need to
+ // boundscheck. for signed integers we still need to bounds check
+ // because a value could be < 0.
+ isSigned := maxval == 0
+ if !isSigned && upperlimit > maxval {
+ return nil
+ }
+
+ start := indices.offset
+ end := indices.offset + indices.length
+
+ // TODO(ARROW-15950): lift BitSetRunReader from parquet to utils
+ // and use it here for performance improvement.
+
+ switch indices.dtype.ID() {
+ case arrow.INT8:
+ data := arrow.Int8Traits.CastFromBytes(indices.buffers[1].Bytes())
+ min, max := utils.GetMinMaxInt8(data[start:end])
+ if min < 0 || max >= int8(upperlimit) {
+ return fmt.Errorf("contains out of bounds index: min: %d, max: %d", min, max)
+ }
+ case arrow.UINT8:
+ data := arrow.Uint8Traits.CastFromBytes(indices.buffers[1].Bytes())
+ _, max := utils.GetMinMaxUint8(data[start:end])
+ if max >= uint8(upperlimit) {
+ return fmt.Errorf("contains out of bounds index: max: %d", max)
+ }
+ case arrow.INT16:
+ data := arrow.Int16Traits.CastFromBytes(indices.buffers[1].Bytes())
+ min, max := utils.GetMinMaxInt16(data[start:end])
+ if min < 0 || max >= int16(upperlimit) {
+ return fmt.Errorf("contains out of bounds index: min: %d, max: %d", min, max)
+ }
+ case arrow.UINT16:
+ data := arrow.Uint16Traits.CastFromBytes(indices.buffers[1].Bytes())
+ _, max := utils.GetMinMaxUint16(data[start:end])
+ if max >= uint16(upperlimit) {
+ return fmt.Errorf("contains out of bounds index: max: %d", max)
+ }
+ case arrow.INT32:
+ data := arrow.Int32Traits.CastFromBytes(indices.buffers[1].Bytes())
+ min, max := utils.GetMinMaxInt32(data[start:end])
+ if min < 0 || max >= int32(upperlimit) {
+ return fmt.Errorf("contains out of bounds index: min: %d, max: %d", min, max)
+ }
+ case arrow.UINT32:
+ data := arrow.Uint32Traits.CastFromBytes(indices.buffers[1].Bytes())
+ _, max := utils.GetMinMaxUint32(data[start:end])
+ if max >= uint32(upperlimit) {
+ return fmt.Errorf("contains out of bounds index: max: %d", max)
+ }
+ case arrow.INT64:
+ data := arrow.Int64Traits.CastFromBytes(indices.buffers[1].Bytes())
+ min, max := utils.GetMinMaxInt64(data[start:end])
+ if min < 0 || max >= int64(upperlimit) {
+ return fmt.Errorf("contains out of bounds index: min: %d, max: %d", min, max)
+ }
+ case arrow.UINT64:
+ data := arrow.Uint64Traits.CastFromBytes(indices.buffers[1].Bytes())
+ _, max := utils.GetMinMaxUint64(data[indices.offset : indices.offset+indices.length])
+ if max >= upperlimit {
+ return fmt.Errorf("contains out of bounds value: max: %d", max)
+ }
+ default:
+ return fmt.Errorf("invalid type for bounds checking: %T", indices.dtype)
+ }
+
+ return nil
+}
+
+// NewValidatedDictionaryArray constructs a dictionary array from the provided indices
+// and dictionary arrays, while also performing validation checks to ensure correctness
+// such as bounds checking at are usually skipped for performance.
+func NewValidatedDictionaryArray(typ *arrow.DictionaryType, indices, dict arrow.Array) (*Dictionary, error) {
+ if indices.DataType().ID() != typ.IndexType.ID() {
+ return nil, fmt.Errorf("dictionary type index (%T) does not match indices array type (%T)", typ.IndexType, indices.DataType())
+ }
+
+ if !arrow.TypeEqual(typ.ValueType, dict.DataType()) {
+ return nil, fmt.Errorf("dictionary value type (%T) does not match dict array type (%T)", typ.ValueType, dict.DataType())
+ }
+
+ if err := checkIndexBounds(indices.Data().(*Data), uint64(dict.Len())); err != nil {
+ return nil, err
+ }
+
+ return NewDictionaryArray(typ, indices, dict), nil
+}
+
+// NewDictionaryData creates a strongly typed Dictionary array from
+// an ArrayData object with a datatype of arrow.Dictionary and a dictionary
+func NewDictionaryData(data arrow.ArrayData) *Dictionary {
+ a := &Dictionary{}
+ a.refCount = 1
+ a.setData(data.(*Data))
+ return a
+}
+
+func (d *Dictionary) Retain() {
+ atomic.AddInt64(&d.refCount, 1)
+}
+
+func (d *Dictionary) Release() {
+ debug.Assert(atomic.LoadInt64(&d.refCount) > 0, "too many releases")
+
+ if atomic.AddInt64(&d.refCount, -1) == 0 {
+ d.data.Release()
+ d.data, d.nullBitmapBytes = nil, nil
+ d.indices.Release()
+ d.indices = nil
+ if d.dict != nil {
+ d.dict.Release()
+ d.dict = nil
+ }
+ }
+}
+
+func (d *Dictionary) setData(data *Data) {
+ d.array.setData(data)
+
+ dictType := data.dtype.(*arrow.DictionaryType)
+ if data.dictionary == nil {
+ if data.length > 0 {
+ panic("arrow/array: no dictionary set in Data for Dictionary array")
+ }
+ } else {
+ debug.Assert(arrow.TypeEqual(dictType.ValueType, data.dictionary.DataType()), "mismatched dictionary value types")
+ }
+
+ indexData := NewData(dictType.IndexType, data.length, data.buffers, data.childData, data.nulls, data.offset)
+ defer indexData.Release()
+ d.indices = MakeFromData(indexData)
+}
+
+// Dictionary returns the values array that makes up the dictionary for this
+// array.
+func (d *Dictionary) Dictionary() arrow.Array {
+ if d.dict == nil {
+ d.dict = MakeFromData(d.data.dictionary)
+ }
+ return d.dict
+}
+
+// Indices returns the underlying array of indices as it's own array
+func (d *Dictionary) Indices() arrow.Array {
+ return d.indices
+}
+
+// CanCompareIndices returns true if the dictionary arrays can be compared
+// without having to unify the dictionaries themselves first.
+// This means that the index types are equal too.
+func (d *Dictionary) CanCompareIndices(other *Dictionary) bool {
+ if !arrow.TypeEqual(d.indices.DataType(), other.indices.DataType()) {
+ return false
+ }
+
+ minlen := int64(min(d.data.dictionary.length, other.data.dictionary.length))
+ return SliceEqual(d.Dictionary(), 0, minlen, other.Dictionary(), 0, minlen)
+}
+
+func (d *Dictionary) ValueStr(i int) string {
+ if d.IsNull(i) {
+ return NullValueStr
+ }
+ return d.Dictionary().ValueStr(d.GetValueIndex(i))
+}
+
+func (d *Dictionary) String() string {
+ return fmt.Sprintf("{ dictionary: %v\n indices: %v }", d.Dictionary(), d.Indices())
+}
+
+// GetValueIndex returns the dictionary index for the value at index i of the array.
+// The actual value can be retrieved by using d.Dictionary().(valuetype).Value(d.GetValueIndex(i))
+func (d *Dictionary) GetValueIndex(i int) int {
+ indiceData := d.data.buffers[1].Bytes()
+ // we know the value is non-negative per the spec, so
+ // we can use the unsigned value regardless.
+ switch d.indices.DataType().ID() {
+ case arrow.UINT8, arrow.INT8:
+ return int(uint8(indiceData[d.data.offset+i]))
+ case arrow.UINT16, arrow.INT16:
+ return int(arrow.Uint16Traits.CastFromBytes(indiceData)[d.data.offset+i])
+ case arrow.UINT32, arrow.INT32:
+ idx := arrow.Uint32Traits.CastFromBytes(indiceData)[d.data.offset+i]
+ debug.Assert(bits.UintSize == 64 || idx <= math.MaxInt32, "arrow/dictionary: truncation of index value")
+ return int(idx)
+ case arrow.UINT64, arrow.INT64:
+ idx := arrow.Uint64Traits.CastFromBytes(indiceData)[d.data.offset+i]
+ debug.Assert((bits.UintSize == 32 && idx <= math.MaxInt32) || (bits.UintSize == 64 && idx <= math.MaxInt64), "arrow/dictionary: truncation of index value")
+ return int(idx)
+ }
+ debug.Assert(false, "unreachable dictionary index")
+ return -1
+}
+
+func (d *Dictionary) GetOneForMarshal(i int) interface{} {
+ if d.IsNull(i) {
+ return nil
+ }
+ vidx := d.GetValueIndex(i)
+ return d.Dictionary().GetOneForMarshal(vidx)
+}
+
+func (d *Dictionary) MarshalJSON() ([]byte, error) {
+ vals := make([]interface{}, d.Len())
+ for i := 0; i < d.Len(); i++ {
+ vals[i] = d.GetOneForMarshal(i)
+ }
+ return json.Marshal(vals)
+}
+
+func arrayEqualDict(l, r *Dictionary) bool {
+ return Equal(l.Dictionary(), r.Dictionary()) && Equal(l.indices, r.indices)
+}
+
+func arrayApproxEqualDict(l, r *Dictionary, opt equalOption) bool {
+ return arrayApproxEqual(l.Dictionary(), r.Dictionary(), opt) && arrayApproxEqual(l.indices, r.indices, opt)
+}
+
+// helper for building the properly typed indices of the dictionary builder
+type IndexBuilder struct {
+ Builder
+ Append func(int)
+}
+
+func createIndexBuilder(mem memory.Allocator, dt arrow.FixedWidthDataType) (ret IndexBuilder, err error) {
+ ret = IndexBuilder{Builder: NewBuilder(mem, dt)}
+ switch dt.ID() {
+ case arrow.INT8:
+ ret.Append = func(idx int) {
+ ret.Builder.(*Int8Builder).Append(int8(idx))
+ }
+ case arrow.UINT8:
+ ret.Append = func(idx int) {
+ ret.Builder.(*Uint8Builder).Append(uint8(idx))
+ }
+ case arrow.INT16:
+ ret.Append = func(idx int) {
+ ret.Builder.(*Int16Builder).Append(int16(idx))
+ }
+ case arrow.UINT16:
+ ret.Append = func(idx int) {
+ ret.Builder.(*Uint16Builder).Append(uint16(idx))
+ }
+ case arrow.INT32:
+ ret.Append = func(idx int) {
+ ret.Builder.(*Int32Builder).Append(int32(idx))
+ }
+ case arrow.UINT32:
+ ret.Append = func(idx int) {
+ ret.Builder.(*Uint32Builder).Append(uint32(idx))
+ }
+ case arrow.INT64:
+ ret.Append = func(idx int) {
+ ret.Builder.(*Int64Builder).Append(int64(idx))
+ }
+ case arrow.UINT64:
+ ret.Append = func(idx int) {
+ ret.Builder.(*Uint64Builder).Append(uint64(idx))
+ }
+ default:
+ debug.Assert(false, "dictionary index type must be integral")
+ err = fmt.Errorf("dictionary index type must be integral, not %s", dt)
+ }
+
+ return
+}
+
+// helper function to construct an appropriately typed memo table based on
+// the value type for the dictionary
+func createMemoTable(mem memory.Allocator, dt arrow.DataType) (ret hashing.MemoTable, err error) {
+ switch dt.ID() {
+ case arrow.INT8:
+ ret = hashing.NewInt8MemoTable(0)
+ case arrow.UINT8:
+ ret = hashing.NewUint8MemoTable(0)
+ case arrow.INT16:
+ ret = hashing.NewInt16MemoTable(0)
+ case arrow.UINT16:
+ ret = hashing.NewUint16MemoTable(0)
+ case arrow.INT32:
+ ret = hashing.NewInt32MemoTable(0)
+ case arrow.UINT32:
+ ret = hashing.NewUint32MemoTable(0)
+ case arrow.INT64:
+ ret = hashing.NewInt64MemoTable(0)
+ case arrow.UINT64:
+ ret = hashing.NewUint64MemoTable(0)
+ case arrow.DURATION, arrow.TIMESTAMP, arrow.DATE64, arrow.TIME64:
+ ret = hashing.NewInt64MemoTable(0)
+ case arrow.TIME32, arrow.DATE32, arrow.INTERVAL_MONTHS:
+ ret = hashing.NewInt32MemoTable(0)
+ case arrow.FLOAT16:
+ ret = hashing.NewUint16MemoTable(0)
+ case arrow.FLOAT32:
+ ret = hashing.NewFloat32MemoTable(0)
+ case arrow.FLOAT64:
+ ret = hashing.NewFloat64MemoTable(0)
+ case arrow.BINARY, arrow.FIXED_SIZE_BINARY, arrow.DECIMAL128, arrow.DECIMAL256, arrow.INTERVAL_DAY_TIME, arrow.INTERVAL_MONTH_DAY_NANO:
+ ret = hashing.NewBinaryMemoTable(0, 0, NewBinaryBuilder(mem, arrow.BinaryTypes.Binary))
+ case arrow.STRING:
+ ret = hashing.NewBinaryMemoTable(0, 0, NewBinaryBuilder(mem, arrow.BinaryTypes.String))
+ case arrow.NULL:
+ default:
+ err = fmt.Errorf("unimplemented dictionary value type, %s", dt)
+ }
+
+ return
+}
+
+type DictionaryBuilder interface {
+ Builder
+
+ NewDictionaryArray() *Dictionary
+ NewDelta() (indices, delta arrow.Array, err error)
+ AppendArray(arrow.Array) error
+ AppendIndices([]int, []bool)
+ ResetFull()
+}
+
+type dictionaryBuilder struct {
+ builder
+
+ dt *arrow.DictionaryType
+ deltaOffset int
+ memoTable hashing.MemoTable
+ idxBuilder IndexBuilder
+}
+
+// NewDictionaryBuilderWithDict initializes a dictionary builder and inserts the values from `init` as the first
+// values in the dictionary, but does not insert them as values into the array.
+func NewDictionaryBuilderWithDict(mem memory.Allocator, dt *arrow.DictionaryType, init arrow.Array) DictionaryBuilder {
+ if init != nil && !arrow.TypeEqual(dt.ValueType, init.DataType()) {
+ panic(fmt.Errorf("arrow/array: cannot initialize dictionary type %T with array of type %T", dt.ValueType, init.DataType()))
+ }
+
+ idxbldr, err := createIndexBuilder(mem, dt.IndexType.(arrow.FixedWidthDataType))
+ if err != nil {
+ panic(fmt.Errorf("arrow/array: unsupported builder for index type of %T", dt))
+ }
+
+ memo, err := createMemoTable(mem, dt.ValueType)
+ if err != nil {
+ panic(fmt.Errorf("arrow/array: unsupported builder for value type of %T", dt))
+ }
+
+ bldr := dictionaryBuilder{
+ builder: builder{refCount: 1, mem: mem},
+ idxBuilder: idxbldr,
+ memoTable: memo,
+ dt: dt,
+ }
+
+ switch dt.ValueType.ID() {
+ case arrow.NULL:
+ ret := &NullDictionaryBuilder{bldr}
+ debug.Assert(init == nil, "arrow/array: doesn't make sense to init a null dictionary")
+ return ret
+ case arrow.UINT8:
+ ret := &Uint8DictionaryBuilder{bldr}
+ if init != nil {
+ if err = ret.InsertDictValues(init.(*Uint8)); err != nil {
+ panic(err)
+ }
+ }
+ return ret
+ case arrow.INT8:
+ ret := &Int8DictionaryBuilder{bldr}
+ if init != nil {
+ if err = ret.InsertDictValues(init.(*Int8)); err != nil {
+ panic(err)
+ }
+ }
+ return ret
+ case arrow.UINT16:
+ ret := &Uint16DictionaryBuilder{bldr}
+ if init != nil {
+ if err = ret.InsertDictValues(init.(*Uint16)); err != nil {
+ panic(err)
+ }
+ }
+ return ret
+ case arrow.INT16:
+ ret := &Int16DictionaryBuilder{bldr}
+ if init != nil {
+ if err = ret.InsertDictValues(init.(*Int16)); err != nil {
+ panic(err)
+ }
+ }
+ return ret
+ case arrow.UINT32:
+ ret := &Uint32DictionaryBuilder{bldr}
+ if init != nil {
+ if err = ret.InsertDictValues(init.(*Uint32)); err != nil {
+ panic(err)
+ }
+ }
+ return ret
+ case arrow.INT32:
+ ret := &Int32DictionaryBuilder{bldr}
+ if init != nil {
+ if err = ret.InsertDictValues(init.(*Int32)); err != nil {
+ panic(err)
+ }
+ }
+ return ret
+ case arrow.UINT64:
+ ret := &Uint64DictionaryBuilder{bldr}
+ if init != nil {
+ if err = ret.InsertDictValues(init.(*Uint64)); err != nil {
+ panic(err)
+ }
+ }
+ return ret
+ case arrow.INT64:
+ ret := &Int64DictionaryBuilder{bldr}
+ if init != nil {
+ if err = ret.InsertDictValues(init.(*Int64)); err != nil {
+ panic(err)
+ }
+ }
+ return ret
+ case arrow.FLOAT16:
+ ret := &Float16DictionaryBuilder{bldr}
+ if init != nil {
+ if err = ret.InsertDictValues(init.(*Float16)); err != nil {
+ panic(err)
+ }
+ }
+ return ret
+ case arrow.FLOAT32:
+ ret := &Float32DictionaryBuilder{bldr}
+ if init != nil {
+ if err = ret.InsertDictValues(init.(*Float32)); err != nil {
+ panic(err)
+ }
+ }
+ return ret
+ case arrow.FLOAT64:
+ ret := &Float64DictionaryBuilder{bldr}
+ if init != nil {
+ if err = ret.InsertDictValues(init.(*Float64)); err != nil {
+ panic(err)
+ }
+ }
+ return ret
+ case arrow.STRING:
+ ret := &BinaryDictionaryBuilder{bldr}
+ if init != nil {
+ if err = ret.InsertStringDictValues(init.(*String)); err != nil {
+ panic(err)
+ }
+ }
+ return ret
+ case arrow.BINARY:
+ ret := &BinaryDictionaryBuilder{bldr}
+ if init != nil {
+ if err = ret.InsertDictValues(init.(*Binary)); err != nil {
+ panic(err)
+ }
+ }
+ return ret
+ case arrow.FIXED_SIZE_BINARY:
+ ret := &FixedSizeBinaryDictionaryBuilder{
+ bldr, dt.ValueType.(*arrow.FixedSizeBinaryType).ByteWidth,
+ }
+ if init != nil {
+ if err = ret.InsertDictValues(init.(*FixedSizeBinary)); err != nil {
+ panic(err)
+ }
+ }
+ return ret
+ case arrow.DATE32:
+ ret := &Date32DictionaryBuilder{bldr}
+ if init != nil {
+ if err = ret.InsertDictValues(init.(*Date32)); err != nil {
+ panic(err)
+ }
+ }
+ return ret
+ case arrow.DATE64:
+ ret := &Date64DictionaryBuilder{bldr}
+ if init != nil {
+ if err = ret.InsertDictValues(init.(*Date64)); err != nil {
+ panic(err)
+ }
+ }
+ return ret
+ case arrow.TIMESTAMP:
+ ret := &TimestampDictionaryBuilder{bldr}
+ if init != nil {
+ if err = ret.InsertDictValues(init.(*Timestamp)); err != nil {
+ panic(err)
+ }
+ }
+ return ret
+ case arrow.TIME32:
+ ret := &Time32DictionaryBuilder{bldr}
+ if init != nil {
+ if err = ret.InsertDictValues(init.(*Time32)); err != nil {
+ panic(err)
+ }
+ }
+ return ret
+ case arrow.TIME64:
+ ret := &Time64DictionaryBuilder{bldr}
+ if init != nil {
+ if err = ret.InsertDictValues(init.(*Time64)); err != nil {
+ panic(err)
+ }
+ }
+ return ret
+ case arrow.INTERVAL_MONTHS:
+ ret := &MonthIntervalDictionaryBuilder{bldr}
+ if init != nil {
+ if err = ret.InsertDictValues(init.(*MonthInterval)); err != nil {
+ panic(err)
+ }
+ }
+ return ret
+ case arrow.INTERVAL_DAY_TIME:
+ ret := &DayTimeDictionaryBuilder{bldr}
+ if init != nil {
+ if err = ret.InsertDictValues(init.(*DayTimeInterval)); err != nil {
+ panic(err)
+ }
+ }
+ return ret
+ case arrow.DECIMAL128:
+ ret := &Decimal128DictionaryBuilder{bldr}
+ if init != nil {
+ if err = ret.InsertDictValues(init.(*Decimal128)); err != nil {
+ panic(err)
+ }
+ }
+ return ret
+ case arrow.DECIMAL256:
+ ret := &Decimal256DictionaryBuilder{bldr}
+ if init != nil {
+ if err = ret.InsertDictValues(init.(*Decimal256)); err != nil {
+ panic(err)
+ }
+ }
+ return ret
+ case arrow.LIST:
+ case arrow.STRUCT:
+ case arrow.SPARSE_UNION:
+ case arrow.DENSE_UNION:
+ case arrow.DICTIONARY:
+ case arrow.MAP:
+ case arrow.EXTENSION:
+ case arrow.FIXED_SIZE_LIST:
+ case arrow.DURATION:
+ ret := &DurationDictionaryBuilder{bldr}
+ if init != nil {
+ if err = ret.InsertDictValues(init.(*Duration)); err != nil {
+ panic(err)
+ }
+ }
+ return ret
+ case arrow.LARGE_STRING:
+ case arrow.LARGE_BINARY:
+ case arrow.LARGE_LIST:
+ case arrow.INTERVAL_MONTH_DAY_NANO:
+ ret := &MonthDayNanoDictionaryBuilder{bldr}
+ if init != nil {
+ if err = ret.InsertDictValues(init.(*MonthDayNanoInterval)); err != nil {
+ panic(err)
+ }
+ }
+ return ret
+ }
+
+ panic("arrow/array: unimplemented dictionary key type")
+}
+
+func NewDictionaryBuilder(mem memory.Allocator, dt *arrow.DictionaryType) DictionaryBuilder {
+ return NewDictionaryBuilderWithDict(mem, dt, nil)
+}
+
+func (b *dictionaryBuilder) Type() arrow.DataType { return b.dt }
+
+func (b *dictionaryBuilder) Release() {
+ debug.Assert(atomic.LoadInt64(&b.refCount) > 0, "too many releases")
+
+ if atomic.AddInt64(&b.refCount, -1) == 0 {
+ b.idxBuilder.Release()
+ b.idxBuilder.Builder = nil
+ if binmemo, ok := b.memoTable.(*hashing.BinaryMemoTable); ok {
+ binmemo.Release()
+ }
+ b.memoTable = nil
+ }
+}
+
+func (b *dictionaryBuilder) AppendNull() {
+ b.length += 1
+ b.nulls += 1
+ b.idxBuilder.AppendNull()
+}
+
+func (b *dictionaryBuilder) AppendNulls(n int) {
+ for i := 0; i < n; i++ {
+ b.AppendNull()
+ }
+}
+
+func (b *dictionaryBuilder) AppendEmptyValue() {
+ b.length += 1
+ b.idxBuilder.AppendEmptyValue()
+}
+
+func (b *dictionaryBuilder) AppendEmptyValues(n int) {
+ for i := 0; i < n; i++ {
+ b.AppendEmptyValue()
+ }
+}
+
+func (b *dictionaryBuilder) Reserve(n int) {
+ b.idxBuilder.Reserve(n)
+}
+
+func (b *dictionaryBuilder) Resize(n int) {
+ b.idxBuilder.Resize(n)
+ b.length = b.idxBuilder.Len()
+}
+
+func (b *dictionaryBuilder) ResetFull() {
+ b.builder.reset()
+ b.idxBuilder.NewArray().Release()
+ b.memoTable.Reset()
+}
+
+func (b *dictionaryBuilder) Cap() int { return b.idxBuilder.Cap() }
+
+func (b *dictionaryBuilder) IsNull(i int) bool { return b.idxBuilder.IsNull(i) }
+
+func (b *dictionaryBuilder) UnmarshalJSON(data []byte) error {
+ dec := json.NewDecoder(bytes.NewReader(data))
+ t, err := dec.Token()
+ if err != nil {
+ return err
+ }
+
+ if delim, ok := t.(json.Delim); !ok || delim != '[' {
+ return fmt.Errorf("dictionary builder must upack from json array, found %s", delim)
+ }
+
+ return b.Unmarshal(dec)
+}
+
+func (b *dictionaryBuilder) Unmarshal(dec *json.Decoder) error {
+ bldr := NewBuilder(b.mem, b.dt.ValueType)
+ defer bldr.Release()
+
+ if err := bldr.Unmarshal(dec); err != nil {
+ return err
+ }
+
+ arr := bldr.NewArray()
+ defer arr.Release()
+ return b.AppendArray(arr)
+}
+
+func (b *dictionaryBuilder) AppendValueFromString(s string) error {
+ bldr := NewBuilder(b.mem, b.dt.ValueType)
+ defer bldr.Release()
+
+ if err := bldr.AppendValueFromString(s); err != nil {
+ return err
+ }
+
+ arr := bldr.NewArray()
+ defer arr.Release()
+ return b.AppendArray(arr)
+}
+
+func (b *dictionaryBuilder) UnmarshalOne(dec *json.Decoder) error {
+ bldr := NewBuilder(b.mem, b.dt.ValueType)
+ defer bldr.Release()
+
+ if err := bldr.UnmarshalOne(dec); err != nil {
+ return err
+ }
+
+ arr := bldr.NewArray()
+ defer arr.Release()
+ return b.AppendArray(arr)
+}
+
+func (b *dictionaryBuilder) NewArray() arrow.Array {
+ return b.NewDictionaryArray()
+}
+
+func (b *dictionaryBuilder) newData() *Data {
+ indices, dict, err := b.newWithDictOffset(0)
+ if err != nil {
+ panic(err)
+ }
+
+ indices.dtype = b.dt
+ indices.dictionary = dict
+ return indices
+}
+
+func (b *dictionaryBuilder) NewDictionaryArray() *Dictionary {
+ a := &Dictionary{}
+ a.refCount = 1
+
+ indices := b.newData()
+ a.setData(indices)
+ indices.Release()
+ return a
+}
+
+func (b *dictionaryBuilder) newWithDictOffset(offset int) (indices, dict *Data, err error) {
+ idxarr := b.idxBuilder.NewArray()
+ defer idxarr.Release()
+
+ indices = idxarr.Data().(*Data)
+
+ b.deltaOffset = b.memoTable.Size()
+ dict, err = GetDictArrayData(b.mem, b.dt.ValueType, b.memoTable, offset)
+ b.reset()
+ indices.Retain()
+ return
+}
+
+// NewDelta returns the dictionary indices and a delta dictionary since the
+// last time NewArray or NewDictionaryArray were called, and resets the state
+// of the builder (except for the dictionary / memotable)
+func (b *dictionaryBuilder) NewDelta() (indices, delta arrow.Array, err error) {
+ indicesData, deltaData, err := b.newWithDictOffset(b.deltaOffset)
+ if err != nil {
+ return nil, nil, err
+ }
+
+ defer indicesData.Release()
+ defer deltaData.Release()
+ indices, delta = MakeFromData(indicesData), MakeFromData(deltaData)
+ return
+}
+
+func (b *dictionaryBuilder) insertDictValue(val interface{}) error {
+ _, _, err := b.memoTable.GetOrInsert(val)
+ return err
+}
+
+func (b *dictionaryBuilder) insertDictBytes(val []byte) error {
+ _, _, err := b.memoTable.GetOrInsertBytes(val)
+ return err
+}
+
+func (b *dictionaryBuilder) appendValue(val interface{}) error {
+ idx, _, err := b.memoTable.GetOrInsert(val)
+ b.idxBuilder.Append(idx)
+ b.length += 1
+ return err
+}
+
+func (b *dictionaryBuilder) appendBytes(val []byte) error {
+ idx, _, err := b.memoTable.GetOrInsertBytes(val)
+ b.idxBuilder.Append(idx)
+ b.length += 1
+ return err
+}
+
+func getvalFn(arr arrow.Array) func(i int) interface{} {
+ switch typedarr := arr.(type) {
+ case *Int8:
+ return func(i int) interface{} { return typedarr.Value(i) }
+ case *Uint8:
+ return func(i int) interface{} { return typedarr.Value(i) }
+ case *Int16:
+ return func(i int) interface{} { return typedarr.Value(i) }
+ case *Uint16:
+ return func(i int) interface{} { return typedarr.Value(i) }
+ case *Int32:
+ return func(i int) interface{} { return typedarr.Value(i) }
+ case *Uint32:
+ return func(i int) interface{} { return typedarr.Value(i) }
+ case *Int64:
+ return func(i int) interface{} { return typedarr.Value(i) }
+ case *Uint64:
+ return func(i int) interface{} { return typedarr.Value(i) }
+ case *Float16:
+ return func(i int) interface{} { return typedarr.Value(i).Uint16() }
+ case *Float32:
+ return func(i int) interface{} { return typedarr.Value(i) }
+ case *Float64:
+ return func(i int) interface{} { return typedarr.Value(i) }
+ case *Duration:
+ return func(i int) interface{} { return int64(typedarr.Value(i)) }
+ case *Timestamp:
+ return func(i int) interface{} { return int64(typedarr.Value(i)) }
+ case *Date64:
+ return func(i int) interface{} { return int64(typedarr.Value(i)) }
+ case *Time64:
+ return func(i int) interface{} { return int64(typedarr.Value(i)) }
+ case *Time32:
+ return func(i int) interface{} { return int32(typedarr.Value(i)) }
+ case *Date32:
+ return func(i int) interface{} { return int32(typedarr.Value(i)) }
+ case *MonthInterval:
+ return func(i int) interface{} { return int32(typedarr.Value(i)) }
+ case *Binary:
+ return func(i int) interface{} { return typedarr.Value(i) }
+ case *FixedSizeBinary:
+ return func(i int) interface{} { return typedarr.Value(i) }
+ case *String:
+ return func(i int) interface{} { return typedarr.Value(i) }
+ case *Decimal128:
+ return func(i int) interface{} {
+ val := typedarr.Value(i)
+ return (*(*[arrow.Decimal128SizeBytes]byte)(unsafe.Pointer(&val)))[:]
+ }
+ case *Decimal256:
+ return func(i int) interface{} {
+ val := typedarr.Value(i)
+ return (*(*[arrow.Decimal256SizeBytes]byte)(unsafe.Pointer(&val)))[:]
+ }
+ case *DayTimeInterval:
+ return func(i int) interface{} {
+ val := typedarr.Value(i)
+ return (*(*[arrow.DayTimeIntervalSizeBytes]byte)(unsafe.Pointer(&val)))[:]
+ }
+ case *MonthDayNanoInterval:
+ return func(i int) interface{} {
+ val := typedarr.Value(i)
+ return (*(*[arrow.MonthDayNanoIntervalSizeBytes]byte)(unsafe.Pointer(&val)))[:]
+ }
+ }
+
+ panic("arrow/array: invalid dictionary value type")
+}
+
+func (b *dictionaryBuilder) AppendArray(arr arrow.Array) error {
+ debug.Assert(arrow.TypeEqual(b.dt.ValueType, arr.DataType()), "wrong value type of array to append to dict")
+
+ valfn := getvalFn(arr)
+ for i := 0; i < arr.Len(); i++ {
+ if arr.IsNull(i) {
+ b.AppendNull()
+ } else {
+ if err := b.appendValue(valfn(i)); err != nil {
+ return err
+ }
+ }
+ }
+ return nil
+}
+
+func (b *dictionaryBuilder) IndexBuilder() IndexBuilder {
+ return b.idxBuilder
+}
+
+func (b *dictionaryBuilder) AppendIndices(indices []int, valid []bool) {
+ b.length += len(indices)
+ switch idxbldr := b.idxBuilder.Builder.(type) {
+ case *Int8Builder:
+ vals := make([]int8, len(indices))
+ for i, v := range indices {
+ vals[i] = int8(v)
+ }
+ idxbldr.AppendValues(vals, valid)
+ case *Int16Builder:
+ vals := make([]int16, len(indices))
+ for i, v := range indices {
+ vals[i] = int16(v)
+ }
+ idxbldr.AppendValues(vals, valid)
+ case *Int32Builder:
+ vals := make([]int32, len(indices))
+ for i, v := range indices {
+ vals[i] = int32(v)
+ }
+ idxbldr.AppendValues(vals, valid)
+ case *Int64Builder:
+ vals := make([]int64, len(indices))
+ for i, v := range indices {
+ vals[i] = int64(v)
+ }
+ idxbldr.AppendValues(vals, valid)
+ case *Uint8Builder:
+ vals := make([]uint8, len(indices))
+ for i, v := range indices {
+ vals[i] = uint8(v)
+ }
+ idxbldr.AppendValues(vals, valid)
+ case *Uint16Builder:
+ vals := make([]uint16, len(indices))
+ for i, v := range indices {
+ vals[i] = uint16(v)
+ }
+ idxbldr.AppendValues(vals, valid)
+ case *Uint32Builder:
+ vals := make([]uint32, len(indices))
+ for i, v := range indices {
+ vals[i] = uint32(v)
+ }
+ idxbldr.AppendValues(vals, valid)
+ case *Uint64Builder:
+ vals := make([]uint64, len(indices))
+ for i, v := range indices {
+ vals[i] = uint64(v)
+ }
+ idxbldr.AppendValues(vals, valid)
+ }
+}
+
+type NullDictionaryBuilder struct {
+ dictionaryBuilder
+}
+
+func (b *NullDictionaryBuilder) NewArray() arrow.Array {
+ return b.NewDictionaryArray()
+}
+
+func (b *NullDictionaryBuilder) NewDictionaryArray() *Dictionary {
+ idxarr := b.idxBuilder.NewArray()
+ defer idxarr.Release()
+
+ out := idxarr.Data().(*Data)
+ dictarr := NewNull(0)
+ defer dictarr.Release()
+
+ dictarr.data.Retain()
+ out.dtype = b.dt
+ out.dictionary = dictarr.data
+
+ return NewDictionaryData(out)
+}
+
+func (b *NullDictionaryBuilder) AppendArray(arr arrow.Array) error {
+ if arr.DataType().ID() != arrow.NULL {
+ return fmt.Errorf("cannot append non-null array to null dictionary")
+ }
+
+ for i := 0; i < arr.(*Null).Len(); i++ {
+ b.AppendNull()
+ }
+ return nil
+}
+
+type Int8DictionaryBuilder struct {
+ dictionaryBuilder
+}
+
+func (b *Int8DictionaryBuilder) Append(v int8) error { return b.appendValue(v) }
+func (b *Int8DictionaryBuilder) InsertDictValues(arr *Int8) (err error) {
+ for _, v := range arr.values {
+ if err = b.insertDictValue(v); err != nil {
+ break
+ }
+ }
+ return
+}
+
+type Uint8DictionaryBuilder struct {
+ dictionaryBuilder
+}
+
+func (b *Uint8DictionaryBuilder) Append(v uint8) error { return b.appendValue(v) }
+func (b *Uint8DictionaryBuilder) InsertDictValues(arr *Uint8) (err error) {
+ for _, v := range arr.values {
+ if err = b.insertDictValue(v); err != nil {
+ break
+ }
+ }
+ return
+}
+
+type Int16DictionaryBuilder struct {
+ dictionaryBuilder
+}
+
+func (b *Int16DictionaryBuilder) Append(v int16) error { return b.appendValue(v) }
+func (b *Int16DictionaryBuilder) InsertDictValues(arr *Int16) (err error) {
+ for _, v := range arr.values {
+ if err = b.insertDictValue(v); err != nil {
+ break
+ }
+ }
+ return
+}
+
+type Uint16DictionaryBuilder struct {
+ dictionaryBuilder
+}
+
+func (b *Uint16DictionaryBuilder) Append(v uint16) error { return b.appendValue(v) }
+func (b *Uint16DictionaryBuilder) InsertDictValues(arr *Uint16) (err error) {
+ for _, v := range arr.values {
+ if err = b.insertDictValue(v); err != nil {
+ break
+ }
+ }
+ return
+}
+
+type Int32DictionaryBuilder struct {
+ dictionaryBuilder
+}
+
+func (b *Int32DictionaryBuilder) Append(v int32) error { return b.appendValue(v) }
+func (b *Int32DictionaryBuilder) InsertDictValues(arr *Int32) (err error) {
+ for _, v := range arr.values {
+ if err = b.insertDictValue(v); err != nil {
+ break
+ }
+ }
+ return
+}
+
+type Uint32DictionaryBuilder struct {
+ dictionaryBuilder
+}
+
+func (b *Uint32DictionaryBuilder) Append(v uint32) error { return b.appendValue(v) }
+func (b *Uint32DictionaryBuilder) InsertDictValues(arr *Uint32) (err error) {
+ for _, v := range arr.values {
+ if err = b.insertDictValue(v); err != nil {
+ break
+ }
+ }
+ return
+}
+
+type Int64DictionaryBuilder struct {
+ dictionaryBuilder
+}
+
+func (b *Int64DictionaryBuilder) Append(v int64) error { return b.appendValue(v) }
+func (b *Int64DictionaryBuilder) InsertDictValues(arr *Int64) (err error) {
+ for _, v := range arr.values {
+ if err = b.insertDictValue(v); err != nil {
+ break
+ }
+ }
+ return
+}
+
+type Uint64DictionaryBuilder struct {
+ dictionaryBuilder
+}
+
+func (b *Uint64DictionaryBuilder) Append(v uint64) error { return b.appendValue(v) }
+func (b *Uint64DictionaryBuilder) InsertDictValues(arr *Uint64) (err error) {
+ for _, v := range arr.values {
+ if err = b.insertDictValue(v); err != nil {
+ break
+ }
+ }
+ return
+}
+
+type DurationDictionaryBuilder struct {
+ dictionaryBuilder
+}
+
+func (b *DurationDictionaryBuilder) Append(v arrow.Duration) error { return b.appendValue(int64(v)) }
+func (b *DurationDictionaryBuilder) InsertDictValues(arr *Duration) (err error) {
+ for _, v := range arr.values {
+ if err = b.insertDictValue(int64(v)); err != nil {
+ break
+ }
+ }
+ return
+}
+
+type TimestampDictionaryBuilder struct {
+ dictionaryBuilder
+}
+
+func (b *TimestampDictionaryBuilder) Append(v arrow.Timestamp) error { return b.appendValue(int64(v)) }
+func (b *TimestampDictionaryBuilder) InsertDictValues(arr *Timestamp) (err error) {
+ for _, v := range arr.values {
+ if err = b.insertDictValue(int64(v)); err != nil {
+ break
+ }
+ }
+ return
+}
+
+type Time32DictionaryBuilder struct {
+ dictionaryBuilder
+}
+
+func (b *Time32DictionaryBuilder) Append(v arrow.Time32) error { return b.appendValue(int32(v)) }
+func (b *Time32DictionaryBuilder) InsertDictValues(arr *Time32) (err error) {
+ for _, v := range arr.values {
+ if err = b.insertDictValue(int32(v)); err != nil {
+ break
+ }
+ }
+ return
+}
+
+type Time64DictionaryBuilder struct {
+ dictionaryBuilder
+}
+
+func (b *Time64DictionaryBuilder) Append(v arrow.Time64) error { return b.appendValue(int64(v)) }
+func (b *Time64DictionaryBuilder) InsertDictValues(arr *Time64) (err error) {
+ for _, v := range arr.values {
+ if err = b.insertDictValue(int64(v)); err != nil {
+ break
+ }
+ }
+ return
+}
+
+type Date32DictionaryBuilder struct {
+ dictionaryBuilder
+}
+
+func (b *Date32DictionaryBuilder) Append(v arrow.Date32) error { return b.appendValue(int32(v)) }
+func (b *Date32DictionaryBuilder) InsertDictValues(arr *Date32) (err error) {
+ for _, v := range arr.values {
+ if err = b.insertDictValue(int32(v)); err != nil {
+ break
+ }
+ }
+ return
+}
+
+type Date64DictionaryBuilder struct {
+ dictionaryBuilder
+}
+
+func (b *Date64DictionaryBuilder) Append(v arrow.Date64) error { return b.appendValue(int64(v)) }
+func (b *Date64DictionaryBuilder) InsertDictValues(arr *Date64) (err error) {
+ for _, v := range arr.values {
+ if err = b.insertDictValue(int64(v)); err != nil {
+ break
+ }
+ }
+ return
+}
+
+type MonthIntervalDictionaryBuilder struct {
+ dictionaryBuilder
+}
+
+func (b *MonthIntervalDictionaryBuilder) Append(v arrow.MonthInterval) error {
+ return b.appendValue(int32(v))
+}
+func (b *MonthIntervalDictionaryBuilder) InsertDictValues(arr *MonthInterval) (err error) {
+ for _, v := range arr.values {
+ if err = b.insertDictValue(int32(v)); err != nil {
+ break
+ }
+ }
+ return
+}
+
+type Float16DictionaryBuilder struct {
+ dictionaryBuilder
+}
+
+func (b *Float16DictionaryBuilder) Append(v float16.Num) error { return b.appendValue(v.Uint16()) }
+func (b *Float16DictionaryBuilder) InsertDictValues(arr *Float16) (err error) {
+ for _, v := range arr.values {
+ if err = b.insertDictValue(v.Uint16()); err != nil {
+ break
+ }
+ }
+ return
+}
+
+type Float32DictionaryBuilder struct {
+ dictionaryBuilder
+}
+
+func (b *Float32DictionaryBuilder) Append(v float32) error { return b.appendValue(v) }
+func (b *Float32DictionaryBuilder) InsertDictValues(arr *Float32) (err error) {
+ for _, v := range arr.values {
+ if err = b.insertDictValue(v); err != nil {
+ break
+ }
+ }
+ return
+}
+
+type Float64DictionaryBuilder struct {
+ dictionaryBuilder
+}
+
+func (b *Float64DictionaryBuilder) Append(v float64) error { return b.appendValue(v) }
+func (b *Float64DictionaryBuilder) InsertDictValues(arr *Float64) (err error) {
+ for _, v := range arr.values {
+ if err = b.insertDictValue(v); err != nil {
+ break
+ }
+ }
+ return
+}
+
+type BinaryDictionaryBuilder struct {
+ dictionaryBuilder
+}
+
+func (b *BinaryDictionaryBuilder) Append(v []byte) error {
+ if v == nil {
+ b.AppendNull()
+ return nil
+ }
+
+ return b.appendBytes(v)
+}
+
+func (b *BinaryDictionaryBuilder) AppendString(v string) error { return b.appendBytes([]byte(v)) }
+func (b *BinaryDictionaryBuilder) InsertDictValues(arr *Binary) (err error) {
+ if !arrow.TypeEqual(arr.DataType(), b.dt.ValueType) {
+ return fmt.Errorf("dictionary insert type mismatch: cannot insert values of type %T to dictionary type %T", arr.DataType(), b.dt.ValueType)
+ }
+
+ for i := 0; i < arr.Len(); i++ {
+ if err = b.insertDictBytes(arr.Value(i)); err != nil {
+ break
+ }
+ }
+ return
+}
+func (b *BinaryDictionaryBuilder) InsertStringDictValues(arr *String) (err error) {
+ if !arrow.TypeEqual(arr.DataType(), b.dt.ValueType) {
+ return fmt.Errorf("dictionary insert type mismatch: cannot insert values of type %T to dictionary type %T", arr.DataType(), b.dt.ValueType)
+ }
+
+ for i := 0; i < arr.Len(); i++ {
+ if err = b.insertDictValue(arr.Value(i)); err != nil {
+ break
+ }
+ }
+ return
+}
+
+func (b *BinaryDictionaryBuilder) GetValueIndex(i int) int {
+ switch b := b.idxBuilder.Builder.(type) {
+ case *Uint8Builder:
+ return int(b.Value(i))
+ case *Int8Builder:
+ return int(b.Value(i))
+ case *Uint16Builder:
+ return int(b.Value(i))
+ case *Int16Builder:
+ return int(b.Value(i))
+ case *Uint32Builder:
+ return int(b.Value(i))
+ case *Int32Builder:
+ return int(b.Value(i))
+ case *Uint64Builder:
+ return int(b.Value(i))
+ case *Int64Builder:
+ return int(b.Value(i))
+ default:
+ return -1
+ }
+}
+
+func (b *BinaryDictionaryBuilder) Value(i int) []byte {
+ switch mt := b.memoTable.(type) {
+ case *hashing.BinaryMemoTable:
+ return mt.Value(i)
+ }
+ return nil
+}
+
+func (b *BinaryDictionaryBuilder) ValueStr(i int) string {
+ return string(b.Value(i))
+}
+
+type FixedSizeBinaryDictionaryBuilder struct {
+ dictionaryBuilder
+ byteWidth int
+}
+
+func (b *FixedSizeBinaryDictionaryBuilder) Append(v []byte) error {
+ return b.appendValue(v[:b.byteWidth])
+}
+func (b *FixedSizeBinaryDictionaryBuilder) InsertDictValues(arr *FixedSizeBinary) (err error) {
+ var (
+ beg = arr.array.data.offset * b.byteWidth
+ end = (arr.array.data.offset + arr.data.length) * b.byteWidth
+ )
+ data := arr.valueBytes[beg:end]
+ for len(data) > 0 {
+ if err = b.insertDictValue(data[:b.byteWidth]); err != nil {
+ break
+ }
+ data = data[b.byteWidth:]
+ }
+ return
+}
+
+type Decimal128DictionaryBuilder struct {
+ dictionaryBuilder
+}
+
+func (b *Decimal128DictionaryBuilder) Append(v decimal128.Num) error {
+ return b.appendValue((*(*[arrow.Decimal128SizeBytes]byte)(unsafe.Pointer(&v)))[:])
+}
+func (b *Decimal128DictionaryBuilder) InsertDictValues(arr *Decimal128) (err error) {
+ data := arrow.Decimal128Traits.CastToBytes(arr.values)
+ for len(data) > 0 {
+ if err = b.insertDictValue(data[:arrow.Decimal128SizeBytes]); err != nil {
+ break
+ }
+ data = data[arrow.Decimal128SizeBytes:]
+ }
+ return
+}
+
+type Decimal256DictionaryBuilder struct {
+ dictionaryBuilder
+}
+
+func (b *Decimal256DictionaryBuilder) Append(v decimal256.Num) error {
+ return b.appendValue((*(*[arrow.Decimal256SizeBytes]byte)(unsafe.Pointer(&v)))[:])
+}
+func (b *Decimal256DictionaryBuilder) InsertDictValues(arr *Decimal256) (err error) {
+ data := arrow.Decimal256Traits.CastToBytes(arr.values)
+ for len(data) > 0 {
+ if err = b.insertDictValue(data[:arrow.Decimal256SizeBytes]); err != nil {
+ break
+ }
+ data = data[arrow.Decimal256SizeBytes:]
+ }
+ return
+}
+
+type MonthDayNanoDictionaryBuilder struct {
+ dictionaryBuilder
+}
+
+func (b *MonthDayNanoDictionaryBuilder) Append(v arrow.MonthDayNanoInterval) error {
+ return b.appendValue((*(*[arrow.MonthDayNanoIntervalSizeBytes]byte)(unsafe.Pointer(&v)))[:])
+}
+func (b *MonthDayNanoDictionaryBuilder) InsertDictValues(arr *MonthDayNanoInterval) (err error) {
+ data := arrow.MonthDayNanoIntervalTraits.CastToBytes(arr.values)
+ for len(data) > 0 {
+ if err = b.insertDictValue(data[:arrow.MonthDayNanoIntervalSizeBytes]); err != nil {
+ break
+ }
+ data = data[arrow.MonthDayNanoIntervalSizeBytes:]
+ }
+ return
+}
+
+type DayTimeDictionaryBuilder struct {
+ dictionaryBuilder
+}
+
+func (b *DayTimeDictionaryBuilder) Append(v arrow.DayTimeInterval) error {
+ return b.appendValue((*(*[arrow.DayTimeIntervalSizeBytes]byte)(unsafe.Pointer(&v)))[:])
+}
+func (b *DayTimeDictionaryBuilder) InsertDictValues(arr *DayTimeInterval) (err error) {
+ data := arrow.DayTimeIntervalTraits.CastToBytes(arr.values)
+ for len(data) > 0 {
+ if err = b.insertDictValue(data[:arrow.DayTimeIntervalSizeBytes]); err != nil {
+ break
+ }
+ data = data[arrow.DayTimeIntervalSizeBytes:]
+ }
+ return
+}
+
+func IsTrivialTransposition(transposeMap []int32) bool {
+ for i, t := range transposeMap {
+ if t != int32(i) {
+ return false
+ }
+ }
+ return true
+}
+
+func TransposeDictIndices(mem memory.Allocator, data arrow.ArrayData, inType, outType arrow.DataType, dict arrow.ArrayData, transposeMap []int32) (arrow.ArrayData, error) {
+ // inType may be different from data->dtype if data is ExtensionType
+ if inType.ID() != arrow.DICTIONARY || outType.ID() != arrow.DICTIONARY {
+ return nil, errors.New("arrow/array: expected dictionary type")
+ }
+
+ var (
+ inDictType = inType.(*arrow.DictionaryType)
+ outDictType = outType.(*arrow.DictionaryType)
+ inIndexType = inDictType.IndexType
+ outIndexType = outDictType.IndexType.(arrow.FixedWidthDataType)
+ )
+
+ if inIndexType.ID() == outIndexType.ID() && IsTrivialTransposition(transposeMap) {
+ // index type and values will be identical, we can reuse the existing buffers
+ return NewDataWithDictionary(outType, data.Len(), []*memory.Buffer{data.Buffers()[0], data.Buffers()[1]},
+ data.NullN(), data.Offset(), dict.(*Data)), nil
+ }
+
+ // default path: compute the transposed indices as a new buffer
+ outBuf := memory.NewResizableBuffer(mem)
+ outBuf.Resize(data.Len() * int(bitutil.BytesForBits(int64(outIndexType.BitWidth()))))
+ defer outBuf.Release()
+
+ // shift null buffer if original offset is non-zero
+ var nullBitmap *memory.Buffer
+ if data.Offset() != 0 && data.NullN() != 0 {
+ nullBitmap = memory.NewResizableBuffer(mem)
+ nullBitmap.Resize(int(bitutil.BytesForBits(int64(data.Len()))))
+ bitutil.CopyBitmap(data.Buffers()[0].Bytes(), data.Offset(), data.Len(), nullBitmap.Bytes(), 0)
+ defer nullBitmap.Release()
+ } else {
+ nullBitmap = data.Buffers()[0]
+ }
+
+ outData := NewDataWithDictionary(outType, data.Len(),
+ []*memory.Buffer{nullBitmap, outBuf}, data.NullN(), 0, dict.(*Data))
+ err := utils.TransposeIntsBuffers(inIndexType, outIndexType,
+ data.Buffers()[1].Bytes(), outBuf.Bytes(), data.Offset(), outData.offset, data.Len(), transposeMap)
+ return outData, err
+}
+
+// DictionaryUnifier defines the interface used for unifying, and optionally producing
+// transposition maps for, multiple dictionary arrays incrementally.
+type DictionaryUnifier interface {
+ // Unify adds the provided array of dictionary values to be unified.
+ Unify(arrow.Array) error
+ // UnifyAndTranspose adds the provided array of dictionary values,
+ // just like Unify but returns an allocated buffer containing a mapping
+ // to transpose dictionary indices.
+ UnifyAndTranspose(dict arrow.Array) (transposed *memory.Buffer, err error)
+ // GetResult returns the dictionary type (choosing the smallest index type
+ // that can represent all the values) and the new unified dictionary.
+ //
+ // Calling GetResult clears the existing dictionary from the unifier so it
+ // can be reused by calling Unify/UnifyAndTranspose again with new arrays.
+ GetResult() (outType arrow.DataType, outDict arrow.Array, err error)
+ // GetResultWithIndexType is like GetResult, but allows specifying the type
+ // of the dictionary indexes rather than letting the unifier pick. If the
+ // passed in index type isn't large enough to represent all of the dictionary
+ // values, an error will be returned instead. The new unified dictionary
+ // is returned.
+ GetResultWithIndexType(indexType arrow.DataType) (arrow.Array, error)
+ // Release should be called to clean up any allocated scrach memo-table used
+ // for building the unified dictionary.
+ Release()
+}
+
+type unifier struct {
+ mem memory.Allocator
+ valueType arrow.DataType
+ memoTable hashing.MemoTable
+}
+
+// NewDictionaryUnifier constructs and returns a new dictionary unifier for dictionaries
+// of valueType, using the provided allocator for allocating the unified dictionary
+// and the memotable used for building it.
+//
+// This will only work for non-nested types currently. a nested valueType or dictionary type
+// will result in an error.
+func NewDictionaryUnifier(alloc memory.Allocator, valueType arrow.DataType) (DictionaryUnifier, error) {
+ memoTable, err := createMemoTable(alloc, valueType)
+ if err != nil {
+ return nil, err
+ }
+ return &unifier{
+ mem: alloc,
+ valueType: valueType,
+ memoTable: memoTable,
+ }, nil
+}
+
+func (u *unifier) Release() {
+ if bin, ok := u.memoTable.(*hashing.BinaryMemoTable); ok {
+ bin.Release()
+ }
+}
+
+func (u *unifier) Unify(dict arrow.Array) (err error) {
+ if !arrow.TypeEqual(u.valueType, dict.DataType()) {
+ return fmt.Errorf("dictionary type different from unifier: %s, expected: %s", dict.DataType(), u.valueType)
+ }
+
+ valFn := getvalFn(dict)
+ for i := 0; i < dict.Len(); i++ {
+ if dict.IsNull(i) {
+ u.memoTable.GetOrInsertNull()
+ continue
+ }
+
+ if _, _, err = u.memoTable.GetOrInsert(valFn(i)); err != nil {
+ return err
+ }
+ }
+ return
+}
+
+func (u *unifier) UnifyAndTranspose(dict arrow.Array) (transposed *memory.Buffer, err error) {
+ if !arrow.TypeEqual(u.valueType, dict.DataType()) {
+ return nil, fmt.Errorf("dictionary type different from unifier: %s, expected: %s", dict.DataType(), u.valueType)
+ }
+
+ transposed = memory.NewResizableBuffer(u.mem)
+ transposed.Resize(arrow.Int32Traits.BytesRequired(dict.Len()))
+
+ newIdxes := arrow.Int32Traits.CastFromBytes(transposed.Bytes())
+ valFn := getvalFn(dict)
+ for i := 0; i < dict.Len(); i++ {
+ if dict.IsNull(i) {
+ idx, _ := u.memoTable.GetOrInsertNull()
+ newIdxes[i] = int32(idx)
+ continue
+ }
+
+ idx, _, err := u.memoTable.GetOrInsert(valFn(i))
+ if err != nil {
+ transposed.Release()
+ return nil, err
+ }
+ newIdxes[i] = int32(idx)
+ }
+ return
+}
+
+func (u *unifier) GetResult() (outType arrow.DataType, outDict arrow.Array, err error) {
+ dictLen := u.memoTable.Size()
+ var indexType arrow.DataType
+ switch {
+ case dictLen <= math.MaxInt8:
+ indexType = arrow.PrimitiveTypes.Int8
+ case dictLen <= math.MaxInt16:
+ indexType = arrow.PrimitiveTypes.Int16
+ case dictLen <= math.MaxInt32:
+ indexType = arrow.PrimitiveTypes.Int32
+ default:
+ indexType = arrow.PrimitiveTypes.Int64
+ }
+ outType = &arrow.DictionaryType{IndexType: indexType, ValueType: u.valueType}
+
+ dictData, err := GetDictArrayData(u.mem, u.valueType, u.memoTable, 0)
+ if err != nil {
+ return nil, nil, err
+ }
+
+ u.memoTable.Reset()
+
+ defer dictData.Release()
+ outDict = MakeFromData(dictData)
+ return
+}
+
+func (u *unifier) GetResultWithIndexType(indexType arrow.DataType) (arrow.Array, error) {
+ dictLen := u.memoTable.Size()
+ var toobig bool
+ switch indexType.ID() {
+ case arrow.UINT8:
+ toobig = dictLen > math.MaxUint8
+ case arrow.INT8:
+ toobig = dictLen > math.MaxInt8
+ case arrow.UINT16:
+ toobig = dictLen > math.MaxUint16
+ case arrow.INT16:
+ toobig = dictLen > math.MaxInt16
+ case arrow.UINT32:
+ toobig = uint(dictLen) > math.MaxUint32
+ case arrow.INT32:
+ toobig = dictLen > math.MaxInt32
+ case arrow.UINT64:
+ toobig = uint64(dictLen) > uint64(math.MaxUint64)
+ case arrow.INT64:
+ default:
+ return nil, fmt.Errorf("arrow/array: invalid dictionary index type: %s, must be integral", indexType)
+ }
+ if toobig {
+ return nil, errors.New("arrow/array: cannot combine dictionaries. unified dictionary requires a larger index type")
+ }
+
+ dictData, err := GetDictArrayData(u.mem, u.valueType, u.memoTable, 0)
+ if err != nil {
+ return nil, err
+ }
+
+ u.memoTable.Reset()
+
+ defer dictData.Release()
+ return MakeFromData(dictData), nil
+}
+
+type binaryUnifier struct {
+ mem memory.Allocator
+ memoTable *hashing.BinaryMemoTable
+}
+
+// NewBinaryDictionaryUnifier constructs and returns a new dictionary unifier for dictionaries
+// of binary values, using the provided allocator for allocating the unified dictionary
+// and the memotable used for building it.
+func NewBinaryDictionaryUnifier(alloc memory.Allocator) DictionaryUnifier {
+ return &binaryUnifier{
+ mem: alloc,
+ memoTable: hashing.NewBinaryMemoTable(0, 0, NewBinaryBuilder(alloc, arrow.BinaryTypes.Binary)),
+ }
+}
+
+func (u *binaryUnifier) Release() {
+ u.memoTable.Release()
+}
+
+func (u *binaryUnifier) Unify(dict arrow.Array) (err error) {
+ if !arrow.TypeEqual(arrow.BinaryTypes.Binary, dict.DataType()) {
+ return fmt.Errorf("dictionary type different from unifier: %s, expected: %s", dict.DataType(), arrow.BinaryTypes.Binary)
+ }
+
+ typedDict := dict.(*Binary)
+ for i := 0; i < dict.Len(); i++ {
+ if dict.IsNull(i) {
+ u.memoTable.GetOrInsertNull()
+ continue
+ }
+
+ if _, _, err = u.memoTable.GetOrInsertBytes(typedDict.Value(i)); err != nil {
+ return err
+ }
+ }
+ return
+}
+
+func (u *binaryUnifier) UnifyAndTranspose(dict arrow.Array) (transposed *memory.Buffer, err error) {
+ if !arrow.TypeEqual(arrow.BinaryTypes.Binary, dict.DataType()) {
+ return nil, fmt.Errorf("dictionary type different from unifier: %s, expected: %s", dict.DataType(), arrow.BinaryTypes.Binary)
+ }
+
+ transposed = memory.NewResizableBuffer(u.mem)
+ transposed.Resize(arrow.Int32Traits.BytesRequired(dict.Len()))
+
+ newIdxes := arrow.Int32Traits.CastFromBytes(transposed.Bytes())
+ typedDict := dict.(*Binary)
+ for i := 0; i < dict.Len(); i++ {
+ if dict.IsNull(i) {
+ idx, _ := u.memoTable.GetOrInsertNull()
+ newIdxes[i] = int32(idx)
+ continue
+ }
+
+ idx, _, err := u.memoTable.GetOrInsertBytes(typedDict.Value(i))
+ if err != nil {
+ transposed.Release()
+ return nil, err
+ }
+ newIdxes[i] = int32(idx)
+ }
+ return
+}
+
+func (u *binaryUnifier) GetResult() (outType arrow.DataType, outDict arrow.Array, err error) {
+ dictLen := u.memoTable.Size()
+ var indexType arrow.DataType
+ switch {
+ case dictLen <= math.MaxInt8:
+ indexType = arrow.PrimitiveTypes.Int8
+ case dictLen <= math.MaxInt16:
+ indexType = arrow.PrimitiveTypes.Int16
+ case dictLen <= math.MaxInt32:
+ indexType = arrow.PrimitiveTypes.Int32
+ default:
+ indexType = arrow.PrimitiveTypes.Int64
+ }
+ outType = &arrow.DictionaryType{IndexType: indexType, ValueType: arrow.BinaryTypes.Binary}
+
+ dictData, err := GetDictArrayData(u.mem, arrow.BinaryTypes.Binary, u.memoTable, 0)
+ if err != nil {
+ return nil, nil, err
+ }
+
+ u.memoTable.Reset()
+
+ defer dictData.Release()
+ outDict = MakeFromData(dictData)
+ return
+}
+
+func (u *binaryUnifier) GetResultWithIndexType(indexType arrow.DataType) (arrow.Array, error) {
+ dictLen := u.memoTable.Size()
+ var toobig bool
+ switch indexType.ID() {
+ case arrow.UINT8:
+ toobig = dictLen > math.MaxUint8
+ case arrow.INT8:
+ toobig = dictLen > math.MaxInt8
+ case arrow.UINT16:
+ toobig = dictLen > math.MaxUint16
+ case arrow.INT16:
+ toobig = dictLen > math.MaxInt16
+ case arrow.UINT32:
+ toobig = uint(dictLen) > math.MaxUint32
+ case arrow.INT32:
+ toobig = dictLen > math.MaxInt32
+ case arrow.UINT64:
+ toobig = uint64(dictLen) > uint64(math.MaxUint64)
+ case arrow.INT64:
+ default:
+ return nil, fmt.Errorf("arrow/array: invalid dictionary index type: %s, must be integral", indexType)
+ }
+ if toobig {
+ return nil, errors.New("arrow/array: cannot combine dictionaries. unified dictionary requires a larger index type")
+ }
+
+ dictData, err := GetDictArrayData(u.mem, arrow.BinaryTypes.Binary, u.memoTable, 0)
+ if err != nil {
+ return nil, err
+ }
+
+ u.memoTable.Reset()
+
+ defer dictData.Release()
+ return MakeFromData(dictData), nil
+}
+
+func unifyRecursive(mem memory.Allocator, typ arrow.DataType, chunks []*Data) (changed bool, err error) {
+ debug.Assert(len(chunks) != 0, "must provide non-zero length chunk slice")
+ var extType arrow.DataType
+
+ if typ.ID() == arrow.EXTENSION {
+ extType = typ
+ typ = typ.(arrow.ExtensionType).StorageType()
+ }
+
+ if nestedTyp, ok := typ.(arrow.NestedType); ok {
+ children := make([]*Data, len(chunks))
+ for i, f := range nestedTyp.Fields() {
+ for j, c := range chunks {
+ children[j] = c.childData[i].(*Data)
+ }
+
+ childChanged, err := unifyRecursive(mem, f.Type, children)
+ if err != nil {
+ return false, err
+ }
+ if childChanged {
+ // only when unification actually occurs
+ for j := range chunks {
+ chunks[j].childData[i] = children[j]
+ }
+ changed = true
+ }
+ }
+ }
+
+ if typ.ID() == arrow.DICTIONARY {
+ dictType := typ.(*arrow.DictionaryType)
+ var (
+ uni DictionaryUnifier
+ newDict arrow.Array
+ )
+ // unify any nested dictionaries first, but the unifier doesn't support
+ // nested dictionaries yet so this would fail.
+ uni, err = NewDictionaryUnifier(mem, dictType.ValueType)
+ if err != nil {
+ return changed, err
+ }
+ defer uni.Release()
+ transposeMaps := make([]*memory.Buffer, len(chunks))
+ for i, c := range chunks {
+ debug.Assert(c.dictionary != nil, "missing dictionary data for dictionary array")
+ arr := MakeFromData(c.dictionary)
+ defer arr.Release()
+ if transposeMaps[i], err = uni.UnifyAndTranspose(arr); err != nil {
+ return
+ }
+ defer transposeMaps[i].Release()
+ }
+
+ if newDict, err = uni.GetResultWithIndexType(dictType.IndexType); err != nil {
+ return
+ }
+ defer newDict.Release()
+
+ for j := range chunks {
+ chnk, err := TransposeDictIndices(mem, chunks[j], typ, typ, newDict.Data(), arrow.Int32Traits.CastFromBytes(transposeMaps[j].Bytes()))
+ if err != nil {
+ return changed, err
+ }
+ chunks[j].Release()
+ chunks[j] = chnk.(*Data)
+ if extType != nil {
+ chunks[j].dtype = extType
+ }
+ }
+ changed = true
+ }
+
+ return
+}
+
+// UnifyChunkedDicts takes a chunked array of dictionary type and will unify
+// the dictionary across all of the chunks with the returned chunked array
+// having all chunks share the same dictionary.
+//
+// The return from this *must* have Release called on it unless an error is returned
+// in which case the *arrow.Chunked will be nil.
+//
+// If there is 1 or fewer chunks, then nothing is modified and this function will just
+// call Retain on the passed in Chunked array (so Release can safely be called on it).
+// The same is true if the type of the array is not a dictionary or if no changes are
+// needed for all of the chunks to be using the same dictionary.
+func UnifyChunkedDicts(alloc memory.Allocator, chnkd *arrow.Chunked) (*arrow.Chunked, error) {
+ if len(chnkd.Chunks()) <= 1 {
+ chnkd.Retain()
+ return chnkd, nil
+ }
+
+ chunksData := make([]*Data, len(chnkd.Chunks()))
+ for i, c := range chnkd.Chunks() {
+ c.Data().Retain()
+ chunksData[i] = c.Data().(*Data)
+ }
+ changed, err := unifyRecursive(alloc, chnkd.DataType(), chunksData)
+ if err != nil || !changed {
+ for _, c := range chunksData {
+ c.Release()
+ }
+ if err == nil {
+ chnkd.Retain()
+ } else {
+ chnkd = nil
+ }
+ return chnkd, err
+ }
+
+ chunks := make([]arrow.Array, len(chunksData))
+ for i, c := range chunksData {
+ chunks[i] = MakeFromData(c)
+ defer chunks[i].Release()
+ c.Release()
+ }
+
+ return arrow.NewChunked(chnkd.DataType(), chunks), nil
+}
+
+// UnifyTableDicts performs UnifyChunkedDicts on each column of the table so that
+// any dictionary column will have the dictionaries of its chunks unified.
+//
+// The returned Table should always be Release'd unless a non-nil error was returned,
+// in which case the table returned will be nil.
+func UnifyTableDicts(alloc memory.Allocator, table arrow.Table) (arrow.Table, error) {
+ cols := make([]arrow.Column, table.NumCols())
+ for i := 0; i < int(table.NumCols()); i++ {
+ chnkd, err := UnifyChunkedDicts(alloc, table.Column(i).Data())
+ if err != nil {
+ return nil, err
+ }
+ defer chnkd.Release()
+ cols[i] = *arrow.NewColumn(table.Schema().Field(i), chnkd)
+ defer cols[i].Release()
+ }
+ return NewTable(table.Schema(), cols, table.NumRows()), nil
+}
+
+var (
+ _ arrow.Array = (*Dictionary)(nil)
+ _ Builder = (*dictionaryBuilder)(nil)
+)
diff --git a/vendor/github.com/apache/arrow/go/v14/arrow/array/diff.go b/vendor/github.com/apache/arrow/go/v14/arrow/array/diff.go
new file mode 100644
index 000000000..026a27b98
--- /dev/null
+++ b/vendor/github.com/apache/arrow/go/v14/arrow/array/diff.go
@@ -0,0 +1,315 @@
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements. See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership. The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License. You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package array
+
+import (
+ "fmt"
+ "strings"
+
+ "github.com/apache/arrow/go/v14/arrow"
+)
+
+// Edit represents one entry in the edit script to compare two arrays.
+type Edit struct {
+ Insert bool
+ RunLength int64
+}
+
+// Edits is a slice of Edit structs that represents an edit script to compare two arrays.
+// When applied to the base array, it produces the target array.
+// Each element of "insert" determines whether an element was inserted into (true)
+// or deleted from (false) base. Each insertion or deletion is followed by a run of
+// elements which are unchanged from base to target; the length of this run is stored
+// in RunLength. (Note that the edit script begins and ends with a run of shared
+// elements but both fields of the struct must have the same length. To accommodate this
+// the first element of "insert" should be ignored.)
+//
+// For example for base "hlloo" and target "hello", the edit script would be
+// [
+//
+// {"insert": false, "run_length": 1}, // leading run of length 1 ("h")
+// {"insert": true, "run_length": 3}, // insert("e") then a run of length 3 ("llo")
+// {"insert": false, "run_length": 0} // delete("o") then an empty run
+//
+// ]
+type Edits []Edit
+
+// String returns a simple string representation of the edit script.
+func (e Edits) String() string {
+ return fmt.Sprintf("%v", []Edit(e))
+}
+
+// UnifiedDiff returns a string representation of the diff of base and target in Unified Diff format.
+func (e Edits) UnifiedDiff(base, target arrow.Array) string {
+ var s strings.Builder
+ baseIndex := int64(0)
+ targetIndex := int64(0)
+ wrotePosition := false
+ for i := 0; i < len(e); i++ {
+ if i > 0 {
+ if !wrotePosition {
+ s.WriteString(fmt.Sprintf("@@ -%d, +%d @@\n", baseIndex, targetIndex))
+ wrotePosition = true
+ }
+ if e[i].Insert {
+ s.WriteString(fmt.Sprintf("+%v\n", stringAt(target, targetIndex)))
+ targetIndex++
+ } else {
+ s.WriteString(fmt.Sprintf("-%v\n", stringAt(base, baseIndex)))
+ baseIndex++
+ }
+ }
+ for j := int64(0); j < e[i].RunLength; j++ {
+ baseIndex++
+ targetIndex++
+ wrotePosition = false
+ }
+ }
+ return s.String()
+}
+
+func stringAt(arr arrow.Array, i int64) string {
+ if arr.IsNull(int(i)) {
+ return "null"
+ }
+ dt := arr.DataType()
+ switch {
+ case arrow.TypeEqual(dt, arrow.PrimitiveTypes.Float32):
+ return fmt.Sprintf("%f", arr.(*Float32).Value(int(i)))
+ case arrow.TypeEqual(dt, arrow.PrimitiveTypes.Float64):
+ return fmt.Sprintf("%f", arr.(*Float64).Value(int(i)))
+ case arrow.TypeEqual(dt, arrow.PrimitiveTypes.Date32):
+ return arr.(*Date32).Value(int(i)).FormattedString()
+ case arrow.TypeEqual(dt, arrow.PrimitiveTypes.Date64):
+ return arr.(*Date64).Value(int(i)).FormattedString()
+ case arrow.TypeEqual(dt, arrow.FixedWidthTypes.Timestamp_s):
+ return arr.(*Timestamp).Value(int(i)).ToTime(arrow.Second).String()
+ case arrow.TypeEqual(dt, arrow.FixedWidthTypes.Timestamp_ms):
+ return arr.(*Timestamp).Value(int(i)).ToTime(arrow.Millisecond).String()
+ case arrow.TypeEqual(dt, arrow.FixedWidthTypes.Timestamp_us):
+ return arr.(*Timestamp).Value(int(i)).ToTime(arrow.Microsecond).String()
+ case arrow.TypeEqual(dt, arrow.FixedWidthTypes.Timestamp_ns):
+ return arr.(*Timestamp).Value(int(i)).ToTime(arrow.Nanosecond).String()
+ }
+ s := NewSlice(arr, i, i+1)
+ defer s.Release()
+ st, _ := s.MarshalJSON()
+ return strings.Trim(string(st[1:len(st)-1]), "\n")
+}
+
+// Diff compares two arrays, returning an edit script which expresses the difference
+// between them. The edit script can be applied to the base array to produce the target.
+// 'base' is a baseline for comparison.
+// 'target' is an array of identical type to base whose elements differ from base's.
+func Diff(base, target arrow.Array) (edits Edits, err error) {
+ if !arrow.TypeEqual(base.DataType(), target.DataType()) {
+ return nil, fmt.Errorf("%w: only taking the diff of like-typed arrays is supported", arrow.ErrNotImplemented)
+ }
+ switch base.DataType().ID() {
+ case arrow.EXTENSION:
+ return Diff(base.(ExtensionArray).Storage(), target.(ExtensionArray).Storage())
+ case arrow.DICTIONARY:
+ return nil, fmt.Errorf("%w: diffing arrays of type %s is not implemented", arrow.ErrNotImplemented, base.DataType())
+ case arrow.RUN_END_ENCODED:
+ return nil, fmt.Errorf("%w: diffing arrays of type %s is not implemented", arrow.ErrNotImplemented, base.DataType())
+ }
+ d := newQuadraticSpaceMyersDiff(base, target)
+ return d.Diff()
+}
+
+// editPoint represents an intermediate state in the comparison of two arrays
+type editPoint struct {
+ base int
+ target int
+}
+
+type quadraticSpaceMyersDiff struct {
+ base arrow.Array
+ target arrow.Array
+ finishIndex int
+ editCount int
+ endpointBase []int
+ insert []bool
+ baseBegin int
+ targetBegin int
+ baseEnd int
+ targetEnd int
+}
+
+func newQuadraticSpaceMyersDiff(base, target arrow.Array) *quadraticSpaceMyersDiff {
+ d := &quadraticSpaceMyersDiff{
+ base: base,
+ target: target,
+ finishIndex: -1,
+ editCount: 0,
+ endpointBase: []int{},
+ insert: []bool{},
+ baseBegin: 0,
+ targetBegin: 0,
+ baseEnd: base.Len(),
+ targetEnd: target.Len(),
+ }
+ d.endpointBase = []int{d.extendFrom(editPoint{d.baseBegin, d.targetBegin}).base}
+ if d.baseEnd-d.baseBegin == d.targetEnd-d.targetBegin && d.endpointBase[0] == d.baseEnd {
+ // trivial case: base == target
+ d.finishIndex = 0
+ }
+ return d
+}
+
+func (d *quadraticSpaceMyersDiff) valuesEqual(baseIndex, targetIndex int) bool {
+ baseNull := d.base.IsNull(baseIndex)
+ targetNull := d.target.IsNull(targetIndex)
+ if baseNull || targetNull {
+ return baseNull && targetNull
+ }
+ return SliceEqual(d.base, int64(baseIndex), int64(baseIndex+1), d.target, int64(targetIndex), int64(targetIndex+1))
+}
+
+// increment the position within base and target (the elements skipped in this way were
+// present in both sequences)
+func (d *quadraticSpaceMyersDiff) extendFrom(p editPoint) editPoint {
+ for p.base != d.baseEnd && p.target != d.targetEnd {
+ if !d.valuesEqual(p.base, p.target) {
+ break
+ }
+ p.base++
+ p.target++
+ }
+ return p
+}
+
+// increment the position within base (the element pointed to was deleted)
+// then extend maximally
+func (d *quadraticSpaceMyersDiff) deleteOne(p editPoint) editPoint {
+ if p.base != d.baseEnd {
+ p.base++
+ }
+ return d.extendFrom(p)
+}
+
+// increment the position within target (the element pointed to was inserted)
+// then extend maximally
+func (d *quadraticSpaceMyersDiff) insertOne(p editPoint) editPoint {
+ if p.target != d.targetEnd {
+ p.target++
+ }
+ return d.extendFrom(p)
+}
+
+// beginning of a range for storing per-edit state in endpointBase and insert
+func storageOffset(editCount int) int {
+ return editCount * (editCount + 1) / 2
+}
+
+// given edit_count and index, augment endpointBase[index] with the corresponding
+// position in target (which is only implicitly represented in editCount, index)
+func (d *quadraticSpaceMyersDiff) getEditPoint(editCount, index int) editPoint {
+ insertionsMinusDeletions := 2*(index-storageOffset(editCount)) - editCount
+ maximalBase := d.endpointBase[index]
+ maximalTarget := min(d.targetBegin+((maximalBase-d.baseBegin)+insertionsMinusDeletions), d.targetEnd)
+ return editPoint{maximalBase, maximalTarget}
+}
+
+func (d *quadraticSpaceMyersDiff) Next() {
+ d.editCount++
+ if len(d.endpointBase) < storageOffset(d.editCount+1) {
+ d.endpointBase = append(d.endpointBase, make([]int, storageOffset(d.editCount+1)-len(d.endpointBase))...)
+ }
+ if len(d.insert) < storageOffset(d.editCount+1) {
+ d.insert = append(d.insert, make([]bool, storageOffset(d.editCount+1)-len(d.insert))...)
+ }
+ previousOffset := storageOffset(d.editCount - 1)
+ currentOffset := storageOffset(d.editCount)
+
+ // try deleting from base first
+ for i, iOut := 0, 0; i < d.editCount; i, iOut = i+1, iOut+1 {
+ previousEndpoint := d.getEditPoint(d.editCount-1, i+previousOffset)
+ d.endpointBase[iOut+currentOffset] = d.deleteOne(previousEndpoint).base
+ }
+
+ // check if inserting from target could do better
+ for i, iOut := 0, 1; i < d.editCount; i, iOut = i+1, iOut+1 {
+ // retrieve the previously computed best endpoint for (editCount, iOut)
+ // for comparison with the best endpoint achievable with an insertion
+ endpointAfterDeletion := d.getEditPoint(d.editCount, iOut+currentOffset)
+
+ previousEndpoint := d.getEditPoint(d.editCount-1, i+previousOffset)
+ endpointAfterInsertion := d.insertOne(previousEndpoint)
+
+ if endpointAfterInsertion.base-endpointAfterDeletion.base >= 0 {
+ // insertion was more efficient; keep it and mark the insertion in insert
+ d.insert[iOut+currentOffset] = true
+ d.endpointBase[iOut+currentOffset] = endpointAfterInsertion.base
+ }
+ }
+
+ finish := editPoint{d.baseEnd, d.targetEnd}
+ for iOut := 0; iOut < d.editCount+1; iOut++ {
+ if d.getEditPoint(d.editCount, iOut+currentOffset) == finish {
+ d.finishIndex = iOut + currentOffset
+ return
+ }
+ }
+}
+
+func (d *quadraticSpaceMyersDiff) Done() bool {
+ return d.finishIndex != -1
+}
+
+func (d *quadraticSpaceMyersDiff) GetEdits() (Edits, error) {
+ if !d.Done() {
+ panic("GetEdits called but Done() = false")
+ }
+
+ length := d.editCount + 1
+ edits := make(Edits, length)
+ index := d.finishIndex
+ endpoint := d.getEditPoint(d.editCount, d.finishIndex)
+
+ for i := d.editCount; i > 0; i-- {
+ insert := d.insert[index]
+ edits[i].Insert = insert
+ insertionsMinusDeletions := (endpoint.base - d.baseBegin) - (endpoint.target - d.targetBegin)
+ if insert {
+ insertionsMinusDeletions++
+ } else {
+ insertionsMinusDeletions--
+ }
+ index = (i-1-insertionsMinusDeletions)/2 + storageOffset(i-1)
+
+ // endpoint of previous edit
+ previous := d.getEditPoint(i-1, index)
+ in := 0
+ if insert {
+ in = 1
+ }
+ edits[i].RunLength = int64(endpoint.base - previous.base - (1 - in))
+ endpoint = previous
+ }
+ edits[0].Insert = false
+ edits[0].RunLength = int64(endpoint.base - d.baseBegin)
+
+ return edits, nil
+}
+
+func (d *quadraticSpaceMyersDiff) Diff() (edits Edits, err error) {
+ for !d.Done() {
+ d.Next()
+ }
+ return d.GetEdits()
+}
diff --git a/vendor/github.com/apache/arrow/go/v14/arrow/array/doc.go b/vendor/github.com/apache/arrow/go/v14/arrow/array/doc.go
new file mode 100644
index 000000000..5cf854086
--- /dev/null
+++ b/vendor/github.com/apache/arrow/go/v14/arrow/array/doc.go
@@ -0,0 +1,20 @@
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements. See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership. The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License. You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+/*
+Package array provides implementations of various Arrow array types.
+*/
+package array
diff --git a/vendor/github.com/apache/arrow/go/v14/arrow/array/encoded.go b/vendor/github.com/apache/arrow/go/v14/arrow/array/encoded.go
new file mode 100644
index 000000000..bf4a942cf
--- /dev/null
+++ b/vendor/github.com/apache/arrow/go/v14/arrow/array/encoded.go
@@ -0,0 +1,520 @@
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements. See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership. The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License. You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package array
+
+import (
+ "bytes"
+ "fmt"
+ "math"
+ "reflect"
+ "sync/atomic"
+
+ "github.com/apache/arrow/go/v14/arrow"
+ "github.com/apache/arrow/go/v14/arrow/encoded"
+ "github.com/apache/arrow/go/v14/arrow/internal/debug"
+ "github.com/apache/arrow/go/v14/arrow/memory"
+ "github.com/apache/arrow/go/v14/internal/json"
+ "github.com/apache/arrow/go/v14/internal/utils"
+)
+
+// RunEndEncoded represents an array containing two children:
+// an array of int32 values defining the ends of each run of values
+// and an array of values
+type RunEndEncoded struct {
+ array
+
+ ends arrow.Array
+ values arrow.Array
+}
+
+func NewRunEndEncodedArray(runEnds, values arrow.Array, logicalLength, offset int) *RunEndEncoded {
+ data := NewData(arrow.RunEndEncodedOf(runEnds.DataType(), values.DataType()), logicalLength,
+ []*memory.Buffer{nil}, []arrow.ArrayData{runEnds.Data(), values.Data()}, 0, offset)
+ defer data.Release()
+ return NewRunEndEncodedData(data)
+}
+
+func NewRunEndEncodedData(data arrow.ArrayData) *RunEndEncoded {
+ r := &RunEndEncoded{}
+ r.refCount = 1
+ r.setData(data.(*Data))
+ return r
+}
+
+func (r *RunEndEncoded) Values() arrow.Array { return r.values }
+func (r *RunEndEncoded) RunEndsArr() arrow.Array { return r.ends }
+
+func (r *RunEndEncoded) Retain() {
+ r.array.Retain()
+ r.values.Retain()
+ r.ends.Retain()
+}
+
+func (r *RunEndEncoded) Release() {
+ r.array.Release()
+ r.values.Release()
+ r.ends.Release()
+}
+
+// LogicalValuesArray returns an array holding the values of each
+// run, only over the range of run values inside the logical offset/length
+// range of the parent array.
+//
+// # Example
+//
+// For this array:
+//
+// RunEndEncoded: { Offset: 150, Length: 1500 }
+// RunEnds: [ 1, 2, 4, 6, 10, 1000, 1750, 2000 ]
+// Values: [ "a", "b", "c", "d", "e", "f", "g", "h" ]
+//
+// LogicalValuesArray will return the following array:
+//
+// [ "f", "g" ]
+//
+// This is because the offset of 150 tells it to skip the values until
+// "f" which corresponds with the logical offset (the run from 10 - 1000),
+// and stops after "g" because the length + offset goes to 1650 which is
+// within the run from 1000 - 1750, corresponding to the "g" value.
+//
+// # Note
+//
+// The return from this needs to be Released.
+func (r *RunEndEncoded) LogicalValuesArray() arrow.Array {
+ physOffset := r.GetPhysicalOffset()
+ physLength := r.GetPhysicalLength()
+ data := NewSliceData(r.data.Children()[1], int64(physOffset), int64(physOffset+physLength))
+ defer data.Release()
+ return MakeFromData(data)
+}
+
+// LogicalRunEndsArray returns an array holding the logical indexes
+// of each run end, only over the range of run end values relative
+// to the logical offset/length range of the parent array.
+//
+// For arrays with an offset, this is not a slice of the existing
+// internal run ends array. Instead a new array is created with run-ends
+// that are adjusted so the new array can have an offset of 0. As a result
+// this method can be expensive to call for an array with a non-zero offset.
+//
+// # Example
+//
+// For this array:
+//
+// RunEndEncoded: { Offset: 150, Length: 1500 }
+// RunEnds: [ 1, 2, 4, 6, 10, 1000, 1750, 2000 ]
+// Values: [ "a", "b", "c", "d", "e", "f", "g", "h" ]
+//
+// LogicalRunEndsArray will return the following array:
+//
+// [ 850, 1500 ]
+//
+// This is because the offset of 150 tells us to skip all run-ends less
+// than 150 (by finding the physical offset), and we adjust the run-ends
+// accordingly (1000 - 150 = 850). The logical length of the array is 1500,
+// so we know we don't want to go past the 1750 run end. Thus the last
+// run-end is determined by doing: min(1750 - 150, 1500) = 1500.
+//
+// # Note
+//
+// The return from this needs to be Released
+func (r *RunEndEncoded) LogicalRunEndsArray(mem memory.Allocator) arrow.Array {
+ physOffset := r.GetPhysicalOffset()
+ physLength := r.GetPhysicalLength()
+
+ if r.data.offset == 0 {
+ data := NewSliceData(r.data.childData[0], 0, int64(physLength))
+ defer data.Release()
+ return MakeFromData(data)
+ }
+
+ bldr := NewBuilder(mem, r.data.childData[0].DataType())
+ defer bldr.Release()
+ bldr.Resize(physLength)
+
+ switch e := r.ends.(type) {
+ case *Int16:
+ for _, v := range e.Int16Values()[physOffset : physOffset+physLength] {
+ v -= int16(r.data.offset)
+ v = int16(utils.MinInt(int(v), r.data.length))
+ bldr.(*Int16Builder).Append(v)
+ }
+ case *Int32:
+ for _, v := range e.Int32Values()[physOffset : physOffset+physLength] {
+ v -= int32(r.data.offset)
+ v = int32(utils.MinInt(int(v), r.data.length))
+ bldr.(*Int32Builder).Append(v)
+ }
+ case *Int64:
+ for _, v := range e.Int64Values()[physOffset : physOffset+physLength] {
+ v -= int64(r.data.offset)
+ v = int64(utils.MinInt(int(v), r.data.length))
+ bldr.(*Int64Builder).Append(v)
+ }
+ }
+
+ return bldr.NewArray()
+}
+
+func (r *RunEndEncoded) setData(data *Data) {
+ if len(data.childData) != 2 {
+ panic(fmt.Errorf("%w: arrow/array: RLE array must have exactly 2 children", arrow.ErrInvalid))
+ }
+ debug.Assert(data.dtype.ID() == arrow.RUN_END_ENCODED, "invalid type for RunLengthEncoded")
+ if !data.dtype.(*arrow.RunEndEncodedType).ValidRunEndsType(data.childData[0].DataType()) {
+ panic(fmt.Errorf("%w: arrow/array: run ends array must be int16, int32, or int64", arrow.ErrInvalid))
+ }
+ if data.childData[0].NullN() > 0 {
+ panic(fmt.Errorf("%w: arrow/array: run ends array cannot contain nulls", arrow.ErrInvalid))
+ }
+
+ r.array.setData(data)
+
+ r.ends = MakeFromData(r.data.childData[0])
+ r.values = MakeFromData(r.data.childData[1])
+}
+
+func (r *RunEndEncoded) GetPhysicalOffset() int {
+ return encoded.FindPhysicalOffset(r.data)
+}
+
+func (r *RunEndEncoded) GetPhysicalLength() int {
+ return encoded.GetPhysicalLength(r.data)
+}
+
+// GetPhysicalIndex can be used to get the run-encoded value instead of costly LogicalValuesArray
+// in the following way:
+//
+// r.Values().(valuetype).Value(r.GetPhysicalIndex(i))
+func (r *RunEndEncoded) GetPhysicalIndex(i int) int {
+ return encoded.FindPhysicalIndex(r.data, i+r.data.offset)
+}
+
+// ValueStr will return the str representation of the value at the logical offset i.
+func (r *RunEndEncoded) ValueStr(i int) string {
+ return r.values.ValueStr(r.GetPhysicalIndex(i))
+}
+
+func (r *RunEndEncoded) String() string {
+ var buf bytes.Buffer
+ buf.WriteByte('[')
+ for i := 0; i < r.ends.Len(); i++ {
+ if i != 0 {
+ buf.WriteByte(',')
+ }
+
+ value := r.values.GetOneForMarshal(i)
+ if byts, ok := value.(json.RawMessage); ok {
+ value = string(byts)
+ }
+ fmt.Fprintf(&buf, "{%d -> %v}", r.ends.GetOneForMarshal(i), value)
+ }
+
+ buf.WriteByte(']')
+ return buf.String()
+}
+
+func (r *RunEndEncoded) GetOneForMarshal(i int) interface{} {
+ return r.values.GetOneForMarshal(r.GetPhysicalIndex(i))
+}
+
+func (r *RunEndEncoded) MarshalJSON() ([]byte, error) {
+ var buf bytes.Buffer
+ enc := json.NewEncoder(&buf)
+ buf.WriteByte('[')
+ for i := 0; i < r.Len(); i++ {
+ if i != 0 {
+ buf.WriteByte(',')
+ }
+ if err := enc.Encode(r.GetOneForMarshal(i)); err != nil {
+ return nil, err
+ }
+ }
+ buf.WriteByte(']')
+ return buf.Bytes(), nil
+}
+
+func arrayRunEndEncodedEqual(l, r *RunEndEncoded) bool {
+ // types were already checked before getting here, so we know
+ // the encoded types are equal
+ mr := encoded.NewMergedRuns([2]arrow.Array{l, r})
+ for mr.Next() {
+ lIndex := mr.IndexIntoArray(0)
+ rIndex := mr.IndexIntoArray(1)
+ if !SliceEqual(l.values, lIndex, lIndex+1, r.values, rIndex, rIndex+1) {
+ return false
+ }
+ }
+ return true
+}
+
+func arrayRunEndEncodedApproxEqual(l, r *RunEndEncoded, opt equalOption) bool {
+ // types were already checked before getting here, so we know
+ // the encoded types are equal
+ mr := encoded.NewMergedRuns([2]arrow.Array{l, r})
+ for mr.Next() {
+ lIndex := mr.IndexIntoArray(0)
+ rIndex := mr.IndexIntoArray(1)
+ if !sliceApproxEqual(l.values, lIndex, lIndex+1, r.values, rIndex, rIndex+1, opt) {
+ return false
+ }
+ }
+ return true
+}
+
+type RunEndEncodedBuilder struct {
+ builder
+
+ dt arrow.DataType
+ runEnds Builder
+ values Builder
+ maxRunEnd uint64
+
+ // currently, mixing AppendValueFromString & UnmarshalOne is unsupported
+ lastUnmarshalled interface{}
+ unmarshalled bool // tracks if Unmarshal was called (in case lastUnmarshalled is nil)
+ lastStr *string
+}
+
+func NewRunEndEncodedBuilder(mem memory.Allocator, runEnds, encoded arrow.DataType) *RunEndEncodedBuilder {
+ dt := arrow.RunEndEncodedOf(runEnds, encoded)
+ if !dt.ValidRunEndsType(runEnds) {
+ panic("arrow/ree: invalid runEnds type for run length encoded array")
+ }
+
+ var maxEnd uint64
+ switch runEnds.ID() {
+ case arrow.INT16:
+ maxEnd = math.MaxInt16
+ case arrow.INT32:
+ maxEnd = math.MaxInt32
+ case arrow.INT64:
+ maxEnd = math.MaxInt64
+ }
+ return &RunEndEncodedBuilder{
+ builder: builder{refCount: 1, mem: mem},
+ dt: dt,
+ runEnds: NewBuilder(mem, runEnds),
+ values: NewBuilder(mem, encoded),
+ maxRunEnd: maxEnd,
+ lastUnmarshalled: nil,
+ }
+}
+
+func (b *RunEndEncodedBuilder) Type() arrow.DataType {
+ return b.dt
+}
+
+func (b *RunEndEncodedBuilder) Release() {
+ debug.Assert(atomic.LoadInt64(&b.refCount) > 0, "too many releases")
+
+ if atomic.AddInt64(&b.refCount, -1) == 0 {
+ b.values.Release()
+ b.runEnds.Release()
+ }
+}
+
+func (b *RunEndEncodedBuilder) addLength(n uint64) {
+ if uint64(b.length)+n > b.maxRunEnd {
+ panic(fmt.Errorf("%w: %s array length must fit be less than %d", arrow.ErrInvalid, b.dt, b.maxRunEnd))
+ }
+
+ b.length += int(n)
+}
+
+func (b *RunEndEncodedBuilder) finishRun() {
+ b.lastUnmarshalled = nil
+ b.lastStr = nil
+ b.unmarshalled = false
+ if b.length == 0 {
+ return
+ }
+
+ switch bldr := b.runEnds.(type) {
+ case *Int16Builder:
+ bldr.Append(int16(b.length))
+ case *Int32Builder:
+ bldr.Append(int32(b.length))
+ case *Int64Builder:
+ bldr.Append(int64(b.length))
+ }
+}
+
+func (b *RunEndEncodedBuilder) ValueBuilder() Builder { return b.values }
+
+func (b *RunEndEncodedBuilder) Append(n uint64) {
+ b.finishRun()
+ b.addLength(n)
+}
+
+func (b *RunEndEncodedBuilder) AppendRuns(runs []uint64) {
+ for _, r := range runs {
+ b.finishRun()
+ b.addLength(r)
+ }
+}
+
+func (b *RunEndEncodedBuilder) ContinueRun(n uint64) {
+ b.addLength(n)
+}
+
+func (b *RunEndEncodedBuilder) AppendNull() {
+ b.finishRun()
+ b.values.AppendNull()
+ b.addLength(1)
+}
+
+func (b *RunEndEncodedBuilder) AppendNulls(n int) {
+ for i := 0; i < n; i++ {
+ b.AppendNull()
+ }
+}
+
+func (b *RunEndEncodedBuilder) NullN() int {
+ return UnknownNullCount
+}
+
+func (b *RunEndEncodedBuilder) AppendEmptyValue() {
+ b.AppendNull()
+}
+
+func (b *RunEndEncodedBuilder) AppendEmptyValues(n int) {
+ b.AppendNulls(n)
+}
+
+func (b *RunEndEncodedBuilder) Reserve(n int) {
+ b.values.Reserve(n)
+ b.runEnds.Reserve(n)
+}
+
+func (b *RunEndEncodedBuilder) Resize(n int) {
+ b.values.Resize(n)
+ b.runEnds.Resize(n)
+}
+
+func (b *RunEndEncodedBuilder) NewRunEndEncodedArray() *RunEndEncoded {
+ data := b.newData()
+ defer data.Release()
+ return NewRunEndEncodedData(data)
+}
+
+func (b *RunEndEncodedBuilder) NewArray() arrow.Array {
+ return b.NewRunEndEncodedArray()
+}
+
+func (b *RunEndEncodedBuilder) newData() (data *Data) {
+ b.finishRun()
+ values := b.values.NewArray()
+ defer values.Release()
+ runEnds := b.runEnds.NewArray()
+ defer runEnds.Release()
+
+ data = NewData(
+ b.dt, b.length, []*memory.Buffer{},
+ []arrow.ArrayData{runEnds.Data(), values.Data()}, 0, 0)
+ b.reset()
+ return
+}
+
+// AppendValueFromString can't be used in conjunction with UnmarshalOne
+func (b *RunEndEncodedBuilder) AppendValueFromString(s string) error {
+ // we don't support mixing AppendValueFromString & UnmarshalOne
+ if b.unmarshalled {
+ return fmt.Errorf("%w: mixing AppendValueFromString & UnmarshalOne not yet implemented", arrow.ErrNotImplemented)
+ }
+
+ if s == NullValueStr {
+ b.AppendNull()
+ return nil
+ }
+
+ if b.lastStr != nil && s == *b.lastStr {
+ b.ContinueRun(1)
+ return nil
+ }
+
+ b.Append(1)
+ lastStr := s
+ b.lastStr = &lastStr
+ return b.ValueBuilder().AppendValueFromString(s)
+}
+
+// UnmarshalOne can't be used in conjunction with AppendValueFromString
+func (b *RunEndEncodedBuilder) UnmarshalOne(dec *json.Decoder) error {
+ // we don't support mixing AppendValueFromString & UnmarshalOne
+ if b.lastStr != nil {
+ return fmt.Errorf("%w: mixing AppendValueFromString & UnmarshalOne not yet implemented", arrow.ErrNotImplemented)
+ }
+
+ var value interface{}
+ if err := dec.Decode(&value); err != nil {
+ return err
+ }
+
+ // if we unmarshalled the same value as the previous one, we want to
+ // continue the run. However, there's an edge case. At the start of
+ // unmarshalling, lastUnmarshalled will be nil, but we might get
+ // nil as the first value we unmarshal. In that case we want to
+ // make sure we add a new run instead. We can detect that case by
+ // checking that the number of runEnds matches the number of values
+ // we have, which means no matter what we have to start a new run
+ if reflect.DeepEqual(value, b.lastUnmarshalled) && (value != nil || b.runEnds.Len() != b.values.Len()) {
+ b.ContinueRun(1)
+ return nil
+ }
+
+ data, err := json.Marshal(value)
+ if err != nil {
+ return err
+ }
+
+ b.Append(1)
+ b.lastUnmarshalled = value
+ b.unmarshalled = true
+ return b.ValueBuilder().UnmarshalOne(json.NewDecoder(bytes.NewReader(data)))
+}
+
+// Unmarshal can't be used in conjunction with AppendValueFromString (as it calls UnmarshalOne)
+func (b *RunEndEncodedBuilder) Unmarshal(dec *json.Decoder) error {
+ b.finishRun()
+ for dec.More() {
+ if err := b.UnmarshalOne(dec); err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+// UnmarshalJSON can't be used in conjunction with AppendValueFromString (as it calls UnmarshalOne)
+func (b *RunEndEncodedBuilder) UnmarshalJSON(data []byte) error {
+ dec := json.NewDecoder(bytes.NewReader(data))
+ t, err := dec.Token()
+ if err != nil {
+ return err
+ }
+
+ if delim, ok := t.(json.Delim); !ok || delim != '[' {
+ return fmt.Errorf("list builder must unpack from json array, found %s", delim)
+ }
+
+ return b.Unmarshal(dec)
+}
+
+var (
+ _ arrow.Array = (*RunEndEncoded)(nil)
+ _ Builder = (*RunEndEncodedBuilder)(nil)
+)
diff --git a/vendor/github.com/apache/arrow/go/v14/arrow/array/extension.go b/vendor/github.com/apache/arrow/go/v14/arrow/array/extension.go
new file mode 100644
index 000000000..03e8c1734
--- /dev/null
+++ b/vendor/github.com/apache/arrow/go/v14/arrow/array/extension.go
@@ -0,0 +1,244 @@
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements. See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership. The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License. You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package array
+
+import (
+ "fmt"
+ "reflect"
+
+ "github.com/apache/arrow/go/v14/arrow"
+ "github.com/apache/arrow/go/v14/arrow/memory"
+ "github.com/apache/arrow/go/v14/internal/json"
+)
+
+// ExtensionArray is the interface that needs to be implemented to handle
+// user-defined extension type arrays. In order to ensure consistency and
+// proper behavior, all ExtensionArray types must embed ExtensionArrayBase
+// in order to meet the interface which provides the default implementation
+// and handling for the array while allowing custom behavior to be built
+// on top of it.
+type ExtensionArray interface {
+ arrow.Array
+ // ExtensionType returns the datatype as per calling DataType(), but
+ // already cast to ExtensionType
+ ExtensionType() arrow.ExtensionType
+ // Storage returns the underlying storage array for this array.
+ Storage() arrow.Array
+ // by having a non-exported function in the interface, it means that
+ // consumers must embed ExtensionArrayBase in their structs in order
+ // to fulfill this interface.
+ mustEmbedExtensionArrayBase()
+}
+
+// two extension arrays are equal if their data types are equal and
+// their underlying storage arrays are equal.
+func arrayEqualExtension(l, r ExtensionArray) bool {
+ if !arrow.TypeEqual(l.DataType(), r.DataType()) {
+ return false
+ }
+
+ return Equal(l.Storage(), r.Storage())
+}
+
+// two extension arrays are approximately equal if their data types are
+// equal and their underlying storage arrays are approximately equal.
+func arrayApproxEqualExtension(l, r ExtensionArray, opt equalOption) bool {
+ if !arrow.TypeEqual(l.DataType(), r.DataType()) {
+ return false
+ }
+
+ return arrayApproxEqual(l.Storage(), r.Storage(), opt)
+}
+
+// NewExtensionArrayWithStorage constructs a new ExtensionArray from the provided
+// ExtensionType and uses the provided storage interface as the underlying storage.
+// This will not release the storage array passed in so consumers should call Release
+// on it manually while the new Extension array will share references to the underlying
+// Data buffers.
+func NewExtensionArrayWithStorage(dt arrow.ExtensionType, storage arrow.Array) arrow.Array {
+ if !arrow.TypeEqual(dt.StorageType(), storage.DataType()) {
+ panic(fmt.Errorf("arrow/array: storage type %s for extension type %s, does not match expected type %s", storage.DataType(), dt.ExtensionName(), dt.StorageType()))
+ }
+
+ storageData := storage.Data().(*Data)
+ // create a new data instance with the ExtensionType as the datatype but referencing the
+ // same underlying buffers to share them with the storage array.
+ data := NewData(dt, storageData.length, storageData.buffers, storageData.childData, storageData.nulls, storageData.offset)
+ defer data.Release()
+ return NewExtensionData(data)
+}
+
+// NewExtensionData expects a data with a datatype of arrow.ExtensionType and
+// underlying data built for the storage array.
+func NewExtensionData(data arrow.ArrayData) ExtensionArray {
+ base := ExtensionArrayBase{}
+ base.refCount = 1
+ base.setData(data.(*Data))
+
+ // use the ExtensionType's ArrayType to construct the correctly typed object
+ // to use as the ExtensionArray interface. reflect.New returns a pointer to
+ // the newly created object.
+ arr := reflect.New(base.ExtensionType().ArrayType())
+ // set the embedded ExtensionArrayBase to the value we created above. We know
+ // that this field will exist because the interface requires embedding ExtensionArrayBase
+ // so we don't have to separately check, this will panic if called on an ArrayType
+ // that doesn't embed ExtensionArrayBase which is what we want.
+ arr.Elem().FieldByName("ExtensionArrayBase").Set(reflect.ValueOf(base))
+ return arr.Interface().(ExtensionArray)
+}
+
+// ExtensionArrayBase is the base struct for user-defined Extension Array types
+// and must be embedded in any user-defined extension arrays like so:
+//
+// type UserDefinedArray struct {
+// array.ExtensionArrayBase
+// }
+type ExtensionArrayBase struct {
+ array
+ storage arrow.Array
+}
+
+func (e *ExtensionArrayBase) String() string {
+ return fmt.Sprintf("(%s)%s", e.data.dtype, e.storage)
+}
+
+func (e *ExtensionArrayBase) GetOneForMarshal(i int) interface{} {
+ return e.storage.GetOneForMarshal(i)
+}
+
+func (e *ExtensionArrayBase) MarshalJSON() ([]byte, error) {
+ return json.Marshal(e.storage)
+}
+
+// Retain increases the reference count by 1.
+// Retain may be called simultaneously from multiple goroutines.
+func (e *ExtensionArrayBase) Retain() {
+ e.array.Retain()
+ e.storage.Retain()
+}
+
+// Release decreases the reference count by 1.
+// Release may be called simultaneously from multiple goroutines.
+// When the reference count goes to zero, the memory is freed.
+func (e *ExtensionArrayBase) Release() {
+ e.array.Release()
+ e.storage.Release()
+}
+
+// Storage returns the underlying storage array
+func (e *ExtensionArrayBase) Storage() arrow.Array { return e.storage }
+
+// ExtensionType returns the same thing as DataType, just already casted
+// to an ExtensionType interface for convenience.
+func (e *ExtensionArrayBase) ExtensionType() arrow.ExtensionType {
+ return e.DataType().(arrow.ExtensionType)
+}
+
+func (e *ExtensionArrayBase) setData(data *Data) {
+ if data.DataType().ID() != arrow.EXTENSION {
+ panic("arrow/array: must use extension type to construct an extension array")
+ }
+ extType, ok := data.dtype.(arrow.ExtensionType)
+ if !ok {
+ panic("arrow/array: DataType for ExtensionArray must implement arrow.ExtensionType")
+ }
+
+ e.array.setData(data)
+ // our underlying storage needs to reference the same data buffers (no copying)
+ // but should have the storage type's datatype, so we create a Data for it.
+ storageData := NewData(extType.StorageType(), data.length, data.buffers, data.childData, data.nulls, data.offset)
+ storageData.SetDictionary(data.dictionary)
+ defer storageData.Release()
+ e.storage = MakeFromData(storageData)
+}
+
+// ValueStr returns the value at index i as a string.
+// This needs to be implemented by the extension array type.
+func (e *ExtensionArrayBase) ValueStr(i int) string {
+ panic("arrow/array: ValueStr wasn't implemented by this extension array type")
+}
+
+// no-op function that exists simply to force embedding this in any extension array types.
+func (ExtensionArrayBase) mustEmbedExtensionArrayBase() {}
+
+// ExtensionBuilder is a convenience builder so that NewBuilder and such will still work
+// with extension types properly. Depending on preference it may be cleaner or easier to just use
+// NewExtensionArrayWithStorage and pass a storage array.
+//
+// That said, this allows easily building an extension array by providing the extension
+// type and retrieving the storage builder.
+type ExtensionBuilder struct {
+ Builder
+ dt arrow.ExtensionType
+}
+
+// NewExtensionBuilder returns a builder using the provided memory allocator for the desired
+// extension type. It will internally construct a builder of the storage type for the extension
+// type and keep a copy of the extension type. The underlying type builder can then be retrieved
+// by calling `StorageBuilder` on this and then type asserting it to the desired builder type.
+//
+// After using the storage builder, calling NewArray or NewExtensionArray will construct
+// the appropriate extension array type and set the storage correctly, resetting the builder for
+// reuse.
+//
+// # Example
+//
+// Simple example assuming an extension type of a UUID defined as a FixedSizeBinary(16) was registered
+// using the type name "uuid":
+//
+// uuidType := arrow.GetExtensionType("uuid")
+// bldr := array.NewExtensionBuilder(memory.DefaultAllocator, uuidType)
+// defer bldr.Release()
+// uuidBldr := bldr.StorageBuilder().(*array.FixedSizeBinaryBuilder)
+// /* build up the fixed size binary array as usual via Append/AppendValues */
+// uuidArr := bldr.NewExtensionArray()
+// defer uuidArr.Release()
+//
+// Because the storage builder is embedded in the Extension builder it also means
+// that any of the functions available on the Builder interface can be called on
+// an instance of ExtensionBuilder and will respond appropriately as the storage
+// builder would for generically grabbing the Lenth, Cap, Nulls, reserving, etc.
+func NewExtensionBuilder(mem memory.Allocator, dt arrow.ExtensionType) *ExtensionBuilder {
+ return &ExtensionBuilder{Builder: NewBuilder(mem, dt.StorageType()), dt: dt}
+}
+
+func (b *ExtensionBuilder) Type() arrow.DataType { return b.dt }
+
+// StorageBuilder returns the builder for the underlying storage type.
+func (b *ExtensionBuilder) StorageBuilder() Builder { return b.Builder }
+
+// NewArray creates a new array from the memory buffers used by the builder
+// and resets the builder so it can be used to build a new array.
+func (b *ExtensionBuilder) NewArray() arrow.Array {
+ return b.NewExtensionArray()
+}
+
+// NewExtensionArray creates an Extension array from the memory buffers used
+// by the builder and resets the ExtensionBuilder so it can be used to build
+// a new ExtensionArray of the same type.
+func (b *ExtensionBuilder) NewExtensionArray() ExtensionArray {
+ storage := b.Builder.NewArray()
+ defer storage.Release()
+
+ storage.Data().(*Data).dtype = b.dt
+ return NewExtensionData(storage.Data())
+}
+
+var (
+ _ arrow.Array = (ExtensionArray)(nil)
+ _ Builder = (*ExtensionBuilder)(nil)
+)
diff --git a/vendor/github.com/apache/arrow/go/v14/arrow/array/extension_builder.go b/vendor/github.com/apache/arrow/go/v14/arrow/array/extension_builder.go
new file mode 100644
index 000000000..a71287faf
--- /dev/null
+++ b/vendor/github.com/apache/arrow/go/v14/arrow/array/extension_builder.go
@@ -0,0 +1,23 @@
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements. See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership. The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License. You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package array
+
+// ExtensionBuilderWrapper is an interface that you need to implement in your custom extension type if you want to provide a customer builder as well.
+// See example in ./arrow/internal/testing/types/extension_types.go
+type ExtensionBuilderWrapper interface {
+ NewBuilder(bldr *ExtensionBuilder) Builder
+}
diff --git a/vendor/github.com/apache/arrow/go/v14/arrow/array/fixed_size_list.go b/vendor/github.com/apache/arrow/go/v14/arrow/array/fixed_size_list.go
new file mode 100644
index 000000000..62c321386
--- /dev/null
+++ b/vendor/github.com/apache/arrow/go/v14/arrow/array/fixed_size_list.go
@@ -0,0 +1,372 @@
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements. See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership. The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License. You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package array
+
+import (
+ "bytes"
+ "fmt"
+ "strings"
+ "sync/atomic"
+
+ "github.com/apache/arrow/go/v14/arrow"
+ "github.com/apache/arrow/go/v14/arrow/bitutil"
+ "github.com/apache/arrow/go/v14/arrow/internal/debug"
+ "github.com/apache/arrow/go/v14/arrow/memory"
+ "github.com/apache/arrow/go/v14/internal/json"
+)
+
+// FixedSizeList represents an immutable sequence of N array values.
+type FixedSizeList struct {
+ array
+ n int32
+ values arrow.Array
+}
+
+var _ ListLike = (*FixedSizeList)(nil)
+
+// NewFixedSizeListData returns a new List array value, from data.
+func NewFixedSizeListData(data arrow.ArrayData) *FixedSizeList {
+ a := &FixedSizeList{}
+ a.refCount = 1
+ a.setData(data.(*Data))
+ return a
+}
+
+func (a *FixedSizeList) ListValues() arrow.Array { return a.values }
+
+func (a *FixedSizeList) ValueStr(i int) string {
+ if a.IsNull(i) {
+ return NullValueStr
+ }
+ return string(a.GetOneForMarshal(i).(json.RawMessage))
+}
+func (a *FixedSizeList) String() string {
+ o := new(strings.Builder)
+ o.WriteString("[")
+ for i := 0; i < a.Len(); i++ {
+ if i > 0 {
+ o.WriteString(" ")
+ }
+ if !a.IsValid(i) {
+ o.WriteString(NullValueStr)
+ continue
+ }
+ sub := a.newListValue(i)
+ fmt.Fprintf(o, "%v", sub)
+ sub.Release()
+ }
+ o.WriteString("]")
+ return o.String()
+}
+
+func (a *FixedSizeList) newListValue(i int) arrow.Array {
+ beg, end := a.ValueOffsets(i)
+ return NewSlice(a.values, beg, end)
+}
+
+func (a *FixedSizeList) setData(data *Data) {
+ a.array.setData(data)
+ a.n = a.DataType().(*arrow.FixedSizeListType).Len()
+ a.values = MakeFromData(data.childData[0])
+}
+
+func arrayEqualFixedSizeList(left, right *FixedSizeList) bool {
+ for i := 0; i < left.Len(); i++ {
+ if left.IsNull(i) {
+ continue
+ }
+ o := func() bool {
+ l := left.newListValue(i)
+ defer l.Release()
+ r := right.newListValue(i)
+ defer r.Release()
+ return Equal(l, r)
+ }()
+ if !o {
+ return false
+ }
+ }
+ return true
+}
+
+// Len returns the number of elements in the array.
+func (a *FixedSizeList) Len() int { return a.array.Len() }
+
+func (a *FixedSizeList) ValueOffsets(i int) (start, end int64) {
+ n := int64(a.n)
+ off := int64(a.array.data.offset)
+ start, end = (off+int64(i))*n, (off+int64(i+1))*n
+ return
+}
+
+func (a *FixedSizeList) Retain() {
+ a.array.Retain()
+ a.values.Retain()
+}
+
+func (a *FixedSizeList) Release() {
+ a.array.Release()
+ a.values.Release()
+}
+
+func (a *FixedSizeList) GetOneForMarshal(i int) interface{} {
+ if a.IsNull(i) {
+ return nil
+ }
+ slice := a.newListValue(i)
+ defer slice.Release()
+ v, err := json.Marshal(slice)
+ if err != nil {
+ panic(err)
+ }
+
+ return json.RawMessage(v)
+}
+
+func (a *FixedSizeList) MarshalJSON() ([]byte, error) {
+ var buf bytes.Buffer
+ enc := json.NewEncoder(&buf)
+
+ buf.WriteByte('[')
+ for i := 0; i < a.Len(); i++ {
+ if i != 0 {
+ buf.WriteByte(',')
+ }
+ if a.IsNull(i) {
+ enc.Encode(nil)
+ continue
+ }
+
+ slice := a.newListValue(i)
+ if err := enc.Encode(slice); err != nil {
+ return nil, err
+ }
+ slice.Release()
+ }
+ buf.WriteByte(']')
+ return buf.Bytes(), nil
+}
+
+type FixedSizeListBuilder struct {
+ builder
+
+ etype arrow.DataType // data type of the list's elements.
+ n int32 // number of elements in the fixed-size list.
+ values Builder // value builder for the list's elements.
+}
+
+// NewFixedSizeListBuilder returns a builder, using the provided memory allocator.
+// The created list builder will create a list whose elements will be of type etype.
+func NewFixedSizeListBuilder(mem memory.Allocator, n int32, etype arrow.DataType) *FixedSizeListBuilder {
+ return &FixedSizeListBuilder{
+ builder: builder{refCount: 1, mem: mem},
+ etype: etype,
+ n: n,
+ values: NewBuilder(mem, etype),
+ }
+}
+
+func (b *FixedSizeListBuilder) Type() arrow.DataType { return arrow.FixedSizeListOf(b.n, b.etype) }
+
+// Release decreases the reference count by 1.
+// When the reference count goes to zero, the memory is freed.
+func (b *FixedSizeListBuilder) Release() {
+ debug.Assert(atomic.LoadInt64(&b.refCount) > 0, "too many releases")
+
+ if atomic.AddInt64(&b.refCount, -1) == 0 {
+ if b.nullBitmap != nil {
+ b.nullBitmap.Release()
+ b.nullBitmap = nil
+ }
+ if b.values != nil {
+ b.values.Release()
+ b.values = nil
+ }
+ }
+}
+
+func (b *FixedSizeListBuilder) Append(v bool) {
+ b.Reserve(1)
+ b.unsafeAppendBoolToBitmap(v)
+}
+
+// AppendNull will append null values to the underlying values by itself
+func (b *FixedSizeListBuilder) AppendNull() {
+ b.Reserve(1)
+ b.unsafeAppendBoolToBitmap(false)
+ // require to append this due to value indexes
+ for i := int32(0); i < b.n; i++ {
+ b.values.AppendNull()
+ }
+}
+
+// AppendNulls will append n null values to the underlying values by itself
+func (b *FixedSizeListBuilder) AppendNulls(n int) {
+ for i := 0; i < n; i++ {
+ b.AppendNull()
+ }
+}
+
+func (b *FixedSizeListBuilder) AppendEmptyValue() {
+ b.Append(true)
+ for i := int32(0); i < b.n; i++ {
+ b.values.AppendEmptyValue()
+ }
+}
+
+func (b *FixedSizeListBuilder) AppendEmptyValues(n int) {
+ for i := 0; i < n; i++ {
+ b.AppendEmptyValue()
+ }
+}
+
+func (b *FixedSizeListBuilder) AppendValues(valid []bool) {
+ b.Reserve(len(valid))
+ b.builder.unsafeAppendBoolsToBitmap(valid, len(valid))
+}
+
+func (b *FixedSizeListBuilder) unsafeAppendBoolToBitmap(isValid bool) {
+ if isValid {
+ bitutil.SetBit(b.nullBitmap.Bytes(), b.length)
+ } else {
+ b.nulls++
+ }
+ b.length++
+}
+
+func (b *FixedSizeListBuilder) init(capacity int) {
+ b.builder.init(capacity)
+}
+
+// Reserve ensures there is enough space for appending n elements
+// by checking the capacity and calling Resize if necessary.
+func (b *FixedSizeListBuilder) Reserve(n int) {
+ b.builder.reserve(n, b.Resize)
+}
+
+// Resize adjusts the space allocated by b to n elements. If n is greater than b.Cap(),
+// additional memory will be allocated. If n is smaller, the allocated memory may reduced.
+func (b *FixedSizeListBuilder) Resize(n int) {
+ if n < minBuilderCapacity {
+ n = minBuilderCapacity
+ }
+
+ if b.capacity == 0 {
+ b.init(n)
+ } else {
+ b.builder.resize(n, b.builder.init)
+ }
+}
+
+func (b *FixedSizeListBuilder) ValueBuilder() Builder {
+ return b.values
+}
+
+// NewArray creates a List array from the memory buffers used by the builder and resets the FixedSizeListBuilder
+// so it can be used to build a new array.
+func (b *FixedSizeListBuilder) NewArray() arrow.Array {
+ return b.NewListArray()
+}
+
+// NewListArray creates a List array from the memory buffers used by the builder and resets the FixedSizeListBuilder
+// so it can be used to build a new array.
+func (b *FixedSizeListBuilder) NewListArray() (a *FixedSizeList) {
+ data := b.newData()
+ a = NewFixedSizeListData(data)
+ data.Release()
+ return
+}
+
+func (b *FixedSizeListBuilder) newData() (data *Data) {
+ values := b.values.NewArray()
+ defer values.Release()
+
+ data = NewData(
+ arrow.FixedSizeListOf(b.n, b.etype), b.length,
+ []*memory.Buffer{b.nullBitmap},
+ []arrow.ArrayData{values.Data()},
+ b.nulls,
+ 0,
+ )
+ b.reset()
+
+ return
+}
+
+func (b *FixedSizeListBuilder) AppendValueFromString(s string) error {
+ if s == NullValueStr {
+ b.AppendNull()
+ return nil
+ }
+ dec := json.NewDecoder(strings.NewReader(s))
+ return b.UnmarshalOne(dec)
+}
+
+func (b *FixedSizeListBuilder) UnmarshalOne(dec *json.Decoder) error {
+ t, err := dec.Token()
+ if err != nil {
+ return err
+ }
+
+ switch t {
+ case json.Delim('['):
+ b.Append(true)
+ if err := b.values.Unmarshal(dec); err != nil {
+ return err
+ }
+ // consume ']'
+ _, err := dec.Token()
+ return err
+ case nil:
+ b.AppendNull()
+ default:
+ return &json.UnmarshalTypeError{
+ Value: fmt.Sprint(t),
+ Struct: arrow.FixedSizeListOf(b.n, b.etype).String(),
+ }
+ }
+
+ return nil
+}
+
+func (b *FixedSizeListBuilder) Unmarshal(dec *json.Decoder) error {
+ for dec.More() {
+ if err := b.UnmarshalOne(dec); err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+func (b *FixedSizeListBuilder) UnmarshalJSON(data []byte) error {
+ dec := json.NewDecoder(bytes.NewReader(data))
+ t, err := dec.Token()
+ if err != nil {
+ return err
+ }
+
+ if delim, ok := t.(json.Delim); !ok || delim != '[' {
+ return fmt.Errorf("fixed size list builder must unpack from json array, found %s", delim)
+ }
+
+ return b.Unmarshal(dec)
+}
+
+var (
+ _ arrow.Array = (*FixedSizeList)(nil)
+ _ Builder = (*FixedSizeListBuilder)(nil)
+)
diff --git a/vendor/github.com/apache/arrow/go/v14/arrow/array/fixedsize_binary.go b/vendor/github.com/apache/arrow/go/v14/arrow/array/fixedsize_binary.go
new file mode 100644
index 000000000..5466156d5
--- /dev/null
+++ b/vendor/github.com/apache/arrow/go/v14/arrow/array/fixedsize_binary.go
@@ -0,0 +1,123 @@
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements. See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership. The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License. You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package array
+
+import (
+ "bytes"
+ "encoding/base64"
+ "fmt"
+ "strings"
+
+ "github.com/apache/arrow/go/v14/arrow"
+ "github.com/apache/arrow/go/v14/internal/json"
+)
+
+// A type which represents an immutable sequence of fixed-length binary strings.
+type FixedSizeBinary struct {
+ array
+
+ valueBytes []byte
+ bytewidth int32
+}
+
+// NewFixedSizeBinaryData constructs a new fixed-size binary array from data.
+func NewFixedSizeBinaryData(data arrow.ArrayData) *FixedSizeBinary {
+ a := &FixedSizeBinary{bytewidth: int32(data.DataType().(arrow.FixedWidthDataType).BitWidth() / 8)}
+ a.refCount = 1
+ a.setData(data.(*Data))
+ return a
+}
+
+// Value returns the fixed-size slice at index i. This value should not be mutated.
+func (a *FixedSizeBinary) Value(i int) []byte {
+ i += a.array.data.offset
+ var (
+ bw = int(a.bytewidth)
+ beg = i * bw
+ end = (i + 1) * bw
+ )
+ return a.valueBytes[beg:end]
+}
+func (a *FixedSizeBinary) ValueStr(i int) string {
+ if a.IsNull(i) {
+ return NullValueStr
+ }
+ return base64.StdEncoding.EncodeToString(a.Value(i))
+}
+
+func (a *FixedSizeBinary) String() string {
+ o := new(strings.Builder)
+ o.WriteString("[")
+ for i := 0; i < a.Len(); i++ {
+ if i > 0 {
+ o.WriteString(" ")
+ }
+ switch {
+ case a.IsNull(i):
+ o.WriteString(NullValueStr)
+ default:
+ fmt.Fprintf(o, "%q", a.Value(i))
+ }
+ }
+ o.WriteString("]")
+ return o.String()
+}
+
+func (a *FixedSizeBinary) setData(data *Data) {
+ a.array.setData(data)
+ vals := data.buffers[1]
+ if vals != nil {
+ a.valueBytes = vals.Bytes()
+ }
+
+}
+
+func (a *FixedSizeBinary) GetOneForMarshal(i int) interface{} {
+ if a.IsNull(i) {
+ return nil
+ }
+
+ return a.Value(i)
+}
+
+func (a *FixedSizeBinary) MarshalJSON() ([]byte, error) {
+ vals := make([]interface{}, a.Len())
+ for i := 0; i < a.Len(); i++ {
+ if a.IsValid(i) {
+ vals[i] = a.Value(i)
+ } else {
+ vals[i] = nil
+ }
+ }
+ return json.Marshal(vals)
+}
+
+func arrayEqualFixedSizeBinary(left, right *FixedSizeBinary) bool {
+ for i := 0; i < left.Len(); i++ {
+ if left.IsNull(i) {
+ continue
+ }
+ if !bytes.Equal(left.Value(i), right.Value(i)) {
+ return false
+ }
+ }
+ return true
+}
+
+var (
+ _ arrow.Array = (*FixedSizeBinary)(nil)
+)
diff --git a/vendor/github.com/apache/arrow/go/v14/arrow/array/fixedsize_binarybuilder.go b/vendor/github.com/apache/arrow/go/v14/arrow/array/fixedsize_binarybuilder.go
new file mode 100644
index 000000000..ba4b474a8
--- /dev/null
+++ b/vendor/github.com/apache/arrow/go/v14/arrow/array/fixedsize_binarybuilder.go
@@ -0,0 +1,261 @@
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements. See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership. The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License. You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package array
+
+import (
+ "bytes"
+ "encoding/base64"
+ "fmt"
+ "reflect"
+ "sync/atomic"
+
+ "github.com/apache/arrow/go/v14/arrow"
+ "github.com/apache/arrow/go/v14/arrow/internal/debug"
+ "github.com/apache/arrow/go/v14/arrow/memory"
+ "github.com/apache/arrow/go/v14/internal/json"
+)
+
+// A FixedSizeBinaryBuilder is used to build a FixedSizeBinary array using the Append methods.
+type FixedSizeBinaryBuilder struct {
+ builder
+
+ dtype *arrow.FixedSizeBinaryType
+ values *byteBufferBuilder
+}
+
+func NewFixedSizeBinaryBuilder(mem memory.Allocator, dtype *arrow.FixedSizeBinaryType) *FixedSizeBinaryBuilder {
+ b := &FixedSizeBinaryBuilder{
+ builder: builder{refCount: 1, mem: mem},
+ dtype: dtype,
+ values: newByteBufferBuilder(mem),
+ }
+ return b
+}
+
+func (b *FixedSizeBinaryBuilder) Type() arrow.DataType { return b.dtype }
+
+// Release decreases the reference count by 1.
+// When the reference count goes to zero, the memory is freed.
+// Release may be called simultaneously from multiple goroutines.
+func (b *FixedSizeBinaryBuilder) Release() {
+ debug.Assert(atomic.LoadInt64(&b.refCount) > 0, "too many releases")
+
+ if atomic.AddInt64(&b.refCount, -1) == 0 {
+ if b.nullBitmap != nil {
+ b.nullBitmap.Release()
+ b.nullBitmap = nil
+ }
+ if b.values != nil {
+ b.values.Release()
+ b.values = nil
+ }
+ }
+}
+
+func (b *FixedSizeBinaryBuilder) Append(v []byte) {
+ if len(v) != b.dtype.ByteWidth {
+ // TODO(alexandre): should we return an error instead?
+ panic("len(v) != b.dtype.ByteWidth")
+ }
+
+ b.Reserve(1)
+ b.values.Append(v)
+ b.UnsafeAppendBoolToBitmap(true)
+}
+
+func (b *FixedSizeBinaryBuilder) AppendNull() {
+ b.Reserve(1)
+ b.values.Advance(b.dtype.ByteWidth)
+ b.UnsafeAppendBoolToBitmap(false)
+}
+
+func (b *FixedSizeBinaryBuilder) AppendNulls(n int) {
+ for i := 0; i < n; i++ {
+ b.AppendNull()
+ }
+}
+
+func (b *FixedSizeBinaryBuilder) AppendEmptyValue() {
+ b.Reserve(1)
+ b.values.Advance(b.dtype.ByteWidth)
+ b.UnsafeAppendBoolToBitmap(true)
+}
+
+func (b *FixedSizeBinaryBuilder) AppendEmptyValues(n int) {
+ for i := 0; i < n; i++ {
+ b.AppendEmptyValue()
+ }
+}
+
+func (b *FixedSizeBinaryBuilder) UnsafeAppend(v []byte) {
+ b.values.unsafeAppend(v)
+ b.UnsafeAppendBoolToBitmap(true)
+}
+
+// AppendValues will append the values in the v slice. The valid slice determines which values
+// in v are valid (not null). The valid slice must either be empty or be equal in length to v. If empty,
+// all values in v are appended and considered valid.
+func (b *FixedSizeBinaryBuilder) AppendValues(v [][]byte, valid []bool) {
+ if len(v) != len(valid) && len(valid) != 0 {
+ panic("len(v) != len(valid) && len(valid) != 0")
+ }
+
+ if len(v) == 0 {
+ return
+ }
+
+ b.Reserve(len(v))
+ for _, vv := range v {
+ switch len(vv) {
+ case 0:
+ b.values.Advance(b.dtype.ByteWidth)
+ case b.dtype.ByteWidth:
+ b.values.Append(vv)
+ default:
+ panic(fmt.Errorf("array: invalid binary length (got=%d, want=%d)", len(vv), b.dtype.ByteWidth))
+ }
+ }
+
+ b.builder.unsafeAppendBoolsToBitmap(valid, len(v))
+}
+
+func (b *FixedSizeBinaryBuilder) init(capacity int) {
+ b.builder.init(capacity)
+ b.values.resize(capacity * b.dtype.ByteWidth)
+}
+
+// Reserve ensures there is enough space for appending n elements
+// by checking the capacity and calling Resize if necessary.
+func (b *FixedSizeBinaryBuilder) Reserve(n int) {
+ b.builder.reserve(n, b.Resize)
+}
+
+// Resize adjusts the space allocated by b to n elements. If n is greater than b.Cap(),
+// additional memory will be allocated. If n is smaller, the allocated memory may reduced.
+func (b *FixedSizeBinaryBuilder) Resize(n int) {
+ b.builder.resize(n, b.init)
+}
+
+// NewArray creates a FixedSizeBinary array from the memory buffers used by the
+// builder and resets the FixedSizeBinaryBuilder so it can be used to build a new array.
+func (b *FixedSizeBinaryBuilder) NewArray() arrow.Array {
+ return b.NewFixedSizeBinaryArray()
+}
+
+// NewFixedSizeBinaryArray creates a FixedSizeBinary array from the memory buffers used by the builder and resets the FixedSizeBinaryBuilder
+// so it can be used to build a new array.
+func (b *FixedSizeBinaryBuilder) NewFixedSizeBinaryArray() (a *FixedSizeBinary) {
+ data := b.newData()
+ a = NewFixedSizeBinaryData(data)
+ data.Release()
+ return
+}
+
+func (b *FixedSizeBinaryBuilder) newData() (data *Data) {
+ values := b.values.Finish()
+ data = NewData(b.dtype, b.length, []*memory.Buffer{b.nullBitmap, values}, nil, b.nulls, 0)
+
+ if values != nil {
+ values.Release()
+ }
+
+ b.builder.reset()
+
+ return
+}
+
+func (b *FixedSizeBinaryBuilder) AppendValueFromString(s string) error {
+ if s == NullValueStr {
+ b.AppendNull()
+ return nil
+ }
+
+ data, err := base64.StdEncoding.DecodeString(s)
+ if err != nil {
+ b.AppendNull()
+ return err
+ }
+ b.Append(data)
+ return nil
+}
+
+func (b *FixedSizeBinaryBuilder) UnmarshalOne(dec *json.Decoder) error {
+ t, err := dec.Token()
+ if err != nil {
+ return err
+ }
+
+ var val []byte
+ switch v := t.(type) {
+ case string:
+ data, err := base64.StdEncoding.DecodeString(v)
+ if err != nil {
+ return err
+ }
+ val = data
+ case []byte:
+ val = v
+ case nil:
+ b.AppendNull()
+ return nil
+ default:
+ return &json.UnmarshalTypeError{
+ Value: fmt.Sprint(t),
+ Type: reflect.TypeOf([]byte{}),
+ Offset: dec.InputOffset(),
+ Struct: fmt.Sprintf("FixedSizeBinary[%d]", b.dtype.ByteWidth),
+ }
+ }
+
+ if len(val) != b.dtype.ByteWidth {
+ return &json.UnmarshalTypeError{
+ Value: fmt.Sprint(val),
+ Type: reflect.TypeOf([]byte{}),
+ Offset: dec.InputOffset(),
+ Struct: fmt.Sprintf("FixedSizeBinary[%d]", b.dtype.ByteWidth),
+ }
+ }
+ b.Append(val)
+ return nil
+}
+
+func (b *FixedSizeBinaryBuilder) Unmarshal(dec *json.Decoder) error {
+ for dec.More() {
+ if err := b.UnmarshalOne(dec); err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+func (b *FixedSizeBinaryBuilder) UnmarshalJSON(data []byte) error {
+ dec := json.NewDecoder(bytes.NewReader(data))
+ t, err := dec.Token()
+ if err != nil {
+ return err
+ }
+
+ if delim, ok := t.(json.Delim); !ok || delim != '[' {
+ return fmt.Errorf("fixed size binary builder must unpack from json array, found %s", delim)
+ }
+
+ return b.Unmarshal(dec)
+}
+
+var (
+ _ Builder = (*FixedSizeBinaryBuilder)(nil)
+)
diff --git a/vendor/github.com/apache/arrow/go/v14/arrow/array/float16.go b/vendor/github.com/apache/arrow/go/v14/arrow/array/float16.go
new file mode 100644
index 000000000..de499e267
--- /dev/null
+++ b/vendor/github.com/apache/arrow/go/v14/arrow/array/float16.go
@@ -0,0 +1,113 @@
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements. See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership. The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License. You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package array
+
+import (
+ "fmt"
+ "strings"
+
+ "github.com/apache/arrow/go/v14/arrow"
+ "github.com/apache/arrow/go/v14/arrow/float16"
+ "github.com/apache/arrow/go/v14/internal/json"
+)
+
+// A type which represents an immutable sequence of Float16 values.
+type Float16 struct {
+ array
+ values []float16.Num
+}
+
+func NewFloat16Data(data arrow.ArrayData) *Float16 {
+ a := &Float16{}
+ a.refCount = 1
+ a.setData(data.(*Data))
+ return a
+}
+
+func (a *Float16) Value(i int) float16.Num { return a.values[i] }
+func (a *Float16) ValueStr(i int) string {
+ if a.IsNull(i) {
+ return NullValueStr
+ }
+ return a.Value(i).String()
+}
+
+func (a *Float16) Values() []float16.Num { return a.values }
+
+func (a *Float16) String() string {
+ o := new(strings.Builder)
+ o.WriteString("[")
+ for i := 0; i < a.Len(); i++ {
+ if i > 0 {
+ fmt.Fprintf(o, " ")
+ }
+ switch {
+ case a.IsNull(i):
+ o.WriteString(NullValueStr)
+ default:
+ fmt.Fprintf(o, "%v", a.values[i].Float32())
+ }
+ }
+ o.WriteString("]")
+ return o.String()
+}
+
+func (a *Float16) setData(data *Data) {
+ a.array.setData(data)
+ vals := data.buffers[1]
+ if vals != nil {
+ a.values = arrow.Float16Traits.CastFromBytes(vals.Bytes())
+ beg := a.array.data.offset
+ end := beg + a.array.data.length
+ a.values = a.values[beg:end]
+ }
+}
+
+func (a *Float16) GetOneForMarshal(i int) interface{} {
+ if a.IsValid(i) {
+ return a.values[i].Float32()
+ }
+ return nil
+}
+
+func (a *Float16) MarshalJSON() ([]byte, error) {
+ vals := make([]interface{}, a.Len())
+ for i, v := range a.values {
+ if a.IsValid(i) {
+ vals[i] = v.Float32()
+ } else {
+ vals[i] = nil
+ }
+ }
+ return json.Marshal(vals)
+}
+
+func arrayEqualFloat16(left, right *Float16) bool {
+ for i := 0; i < left.Len(); i++ {
+ if left.IsNull(i) {
+ continue
+ }
+ if left.Value(i) != right.Value(i) {
+ return false
+ }
+ }
+ return true
+}
+
+var (
+ _ arrow.Array = (*Float16)(nil)
+)
diff --git a/vendor/github.com/apache/arrow/go/v14/arrow/array/float16_builder.go b/vendor/github.com/apache/arrow/go/v14/arrow/array/float16_builder.go
new file mode 100644
index 000000000..f96ab6037
--- /dev/null
+++ b/vendor/github.com/apache/arrow/go/v14/arrow/array/float16_builder.go
@@ -0,0 +1,263 @@
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements. See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership. The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License. You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package array
+
+import (
+ "bytes"
+ "fmt"
+ "reflect"
+ "strconv"
+ "sync/atomic"
+
+ "github.com/apache/arrow/go/v14/arrow"
+ "github.com/apache/arrow/go/v14/arrow/bitutil"
+ "github.com/apache/arrow/go/v14/arrow/float16"
+ "github.com/apache/arrow/go/v14/arrow/internal/debug"
+ "github.com/apache/arrow/go/v14/arrow/memory"
+ "github.com/apache/arrow/go/v14/internal/json"
+)
+
+type Float16Builder struct {
+ builder
+
+ data *memory.Buffer
+ rawData []float16.Num
+}
+
+func NewFloat16Builder(mem memory.Allocator) *Float16Builder {
+ return &Float16Builder{builder: builder{refCount: 1, mem: mem}}
+}
+
+func (b *Float16Builder) Type() arrow.DataType { return arrow.FixedWidthTypes.Float16 }
+
+// Release decreases the reference count by 1.
+// When the reference count goes to zero, the memory is freed.
+func (b *Float16Builder) Release() {
+ debug.Assert(atomic.LoadInt64(&b.refCount) > 0, "too many releases")
+
+ if atomic.AddInt64(&b.refCount, -1) == 0 {
+ if b.nullBitmap != nil {
+ b.nullBitmap.Release()
+ b.nullBitmap = nil
+ }
+ if b.data != nil {
+ b.data.Release()
+ b.data = nil
+ b.rawData = nil
+ }
+ }
+}
+
+func (b *Float16Builder) Append(v float16.Num) {
+ b.Reserve(1)
+ b.UnsafeAppend(v)
+}
+
+func (b *Float16Builder) UnsafeAppend(v float16.Num) {
+ bitutil.SetBit(b.nullBitmap.Bytes(), b.length)
+ b.rawData[b.length] = v
+ b.length++
+}
+
+func (b *Float16Builder) AppendNull() {
+ b.Reserve(1)
+ b.UnsafeAppendBoolToBitmap(false)
+}
+
+func (b *Float16Builder) AppendNulls(n int) {
+ for i := 0; i < n; i++ {
+ b.AppendNull()
+ }
+}
+
+func (b *Float16Builder) AppendEmptyValue() {
+ b.Reserve(1)
+ b.UnsafeAppend(float16.Num{})
+}
+
+func (b *Float16Builder) AppendEmptyValues(n int) {
+ for i := 0; i < n; i++ {
+ b.AppendEmptyValue()
+ }
+}
+
+func (b *Float16Builder) UnsafeAppendBoolToBitmap(isValid bool) {
+ if isValid {
+ bitutil.SetBit(b.nullBitmap.Bytes(), b.length)
+ } else {
+ b.nulls++
+ }
+ b.length++
+}
+
+// AppendValues will append the values in the v slice. The valid slice determines which values
+// in v are valid (not null). The valid slice must either be empty or be equal in length to v. If empty,
+// all values in v are appended and considered valid.
+func (b *Float16Builder) AppendValues(v []float16.Num, valid []bool) {
+ if len(v) != len(valid) && len(valid) != 0 {
+ panic("len(v) != len(valid) && len(valid) != 0")
+ }
+
+ if len(v) == 0 {
+ return
+ }
+
+ b.Reserve(len(v))
+ if len(v) > 0 {
+ arrow.Float16Traits.Copy(b.rawData[b.length:], v)
+ }
+ b.builder.unsafeAppendBoolsToBitmap(valid, len(v))
+}
+
+func (b *Float16Builder) init(capacity int) {
+ b.builder.init(capacity)
+
+ b.data = memory.NewResizableBuffer(b.mem)
+ bytesN := arrow.Uint16Traits.BytesRequired(capacity)
+ b.data.Resize(bytesN)
+ b.rawData = arrow.Float16Traits.CastFromBytes(b.data.Bytes())
+}
+
+// Reserve ensures there is enough space for appending n elements
+// by checking the capacity and calling Resize if necessary.
+func (b *Float16Builder) Reserve(n int) {
+ b.builder.reserve(n, b.Resize)
+}
+
+// Resize adjusts the space allocated by b to n elements. If n is greater than b.Cap(),
+// additional memory will be allocated. If n is smaller, the allocated memory may reduced.
+func (b *Float16Builder) Resize(n int) {
+ nBuilder := n
+ if n < minBuilderCapacity {
+ n = minBuilderCapacity
+ }
+
+ if b.capacity == 0 {
+ b.init(n)
+ } else {
+ b.builder.resize(nBuilder, b.init)
+ b.data.Resize(arrow.Float16Traits.BytesRequired(n))
+ b.rawData = arrow.Float16Traits.CastFromBytes(b.data.Bytes())
+ }
+}
+
+// NewArray creates a Float16 array from the memory buffers used by the builder and resets the Float16Builder
+// so it can be used to build a new array.
+func (b *Float16Builder) NewArray() arrow.Array {
+ return b.NewFloat16Array()
+}
+
+// NewFloat16Array creates a Float16 array from the memory buffers used by the builder and resets the Float16Builder
+// so it can be used to build a new array.
+func (b *Float16Builder) NewFloat16Array() (a *Float16) {
+ data := b.newData()
+ a = NewFloat16Data(data)
+ data.Release()
+ return
+}
+
+func (b *Float16Builder) newData() (data *Data) {
+ bytesRequired := arrow.Float16Traits.BytesRequired(b.length)
+ if bytesRequired > 0 && bytesRequired < b.data.Len() {
+ // trim buffers
+ b.data.Resize(bytesRequired)
+ }
+ data = NewData(arrow.FixedWidthTypes.Float16, b.length, []*memory.Buffer{b.nullBitmap, b.data}, nil, b.nulls, 0)
+ b.reset()
+
+ if b.data != nil {
+ b.data.Release()
+ b.data = nil
+ b.rawData = nil
+ }
+
+ return
+}
+
+func (b *Float16Builder) AppendValueFromString(s string) error {
+ if s == NullValueStr {
+ b.AppendNull()
+ return nil
+ }
+ v, err := strconv.ParseFloat(s, 32)
+ if err != nil {
+ b.AppendNull()
+ return err
+ }
+ b.Append(float16.New(float32(v)))
+ return nil
+}
+
+func (b *Float16Builder) UnmarshalOne(dec *json.Decoder) error {
+ t, err := dec.Token()
+ if err != nil {
+ return err
+ }
+
+ switch v := t.(type) {
+ case float64:
+ b.Append(float16.New(float32(v)))
+ case string:
+ f, err := strconv.ParseFloat(v, 32)
+ if err != nil {
+ return err
+ }
+ // this will currently silently truncate if it is too large
+ b.Append(float16.New(float32(f)))
+ case json.Number:
+ f, err := v.Float64()
+ if err != nil {
+ return err
+ }
+ b.Append(float16.New(float32(f)))
+ case nil:
+ b.AppendNull()
+ default:
+ return &json.UnmarshalTypeError{
+ Value: fmt.Sprint(t),
+ Type: reflect.TypeOf(float16.Num{}),
+ Offset: dec.InputOffset(),
+ }
+ }
+ return nil
+}
+
+func (b *Float16Builder) Unmarshal(dec *json.Decoder) error {
+ for dec.More() {
+ if err := b.UnmarshalOne(dec); err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+// UnmarshalJSON will add values to this builder from unmarshalling the
+// array of values. Currently values that are larger than a float16 will
+// be silently truncated.
+func (b *Float16Builder) UnmarshalJSON(data []byte) error {
+ dec := json.NewDecoder(bytes.NewReader(data))
+ t, err := dec.Token()
+ if err != nil {
+ return err
+ }
+
+ if delim, ok := t.(json.Delim); !ok || delim != '[' {
+ return fmt.Errorf("float16 builder must unpack from json array, found %s", delim)
+ }
+
+ return b.Unmarshal(dec)
+}
diff --git a/vendor/github.com/apache/arrow/go/v14/arrow/array/interval.go b/vendor/github.com/apache/arrow/go/v14/arrow/array/interval.go
new file mode 100644
index 000000000..ff059c92c
--- /dev/null
+++ b/vendor/github.com/apache/arrow/go/v14/arrow/array/interval.go
@@ -0,0 +1,953 @@
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements. See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership. The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License. You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package array
+
+import (
+ "bytes"
+ "fmt"
+ "strconv"
+ "strings"
+ "sync/atomic"
+
+ "github.com/apache/arrow/go/v14/arrow"
+ "github.com/apache/arrow/go/v14/arrow/bitutil"
+ "github.com/apache/arrow/go/v14/arrow/internal/debug"
+ "github.com/apache/arrow/go/v14/arrow/memory"
+ "github.com/apache/arrow/go/v14/internal/json"
+)
+
+func NewIntervalData(data arrow.ArrayData) arrow.Array {
+ switch data.DataType().(type) {
+ case *arrow.MonthIntervalType:
+ return NewMonthIntervalData(data.(*Data))
+ case *arrow.DayTimeIntervalType:
+ return NewDayTimeIntervalData(data.(*Data))
+ case *arrow.MonthDayNanoIntervalType:
+ return NewMonthDayNanoIntervalData(data.(*Data))
+ default:
+ panic(fmt.Errorf("arrow/array: unknown interval data type %T", data.DataType()))
+ }
+}
+
+// A type which represents an immutable sequence of arrow.MonthInterval values.
+type MonthInterval struct {
+ array
+ values []arrow.MonthInterval
+}
+
+func NewMonthIntervalData(data arrow.ArrayData) *MonthInterval {
+ a := &MonthInterval{}
+ a.refCount = 1
+ a.setData(data.(*Data))
+ return a
+}
+
+func (a *MonthInterval) Value(i int) arrow.MonthInterval { return a.values[i] }
+func (a *MonthInterval) ValueStr(i int) string {
+ if a.IsNull(i) {
+ return NullValueStr
+ }
+ return fmt.Sprintf("%v", a.Value(i))
+}
+func (a *MonthInterval) MonthIntervalValues() []arrow.MonthInterval { return a.values }
+
+func (a *MonthInterval) String() string {
+ o := new(strings.Builder)
+ o.WriteString("[")
+ for i, v := range a.values {
+ if i > 0 {
+ fmt.Fprintf(o, " ")
+ }
+ switch {
+ case a.IsNull(i):
+ o.WriteString(NullValueStr)
+ default:
+ fmt.Fprintf(o, "%v", v)
+ }
+ }
+ o.WriteString("]")
+ return o.String()
+}
+
+func (a *MonthInterval) setData(data *Data) {
+ a.array.setData(data)
+ vals := data.buffers[1]
+ if vals != nil {
+ a.values = arrow.MonthIntervalTraits.CastFromBytes(vals.Bytes())
+ beg := a.array.data.offset
+ end := beg + a.array.data.length
+ a.values = a.values[beg:end]
+ }
+}
+
+func (a *MonthInterval) GetOneForMarshal(i int) interface{} {
+ if a.IsValid(i) {
+ return a.values[i]
+ }
+ return nil
+}
+
+// MarshalJSON will create a json array out of a MonthInterval array,
+// each value will be an object of the form {"months": #} where
+// # is the numeric value of that index
+func (a *MonthInterval) MarshalJSON() ([]byte, error) {
+ if a.NullN() == 0 {
+ return json.Marshal(a.values)
+ }
+ vals := make([]interface{}, a.Len())
+ for i := 0; i < a.Len(); i++ {
+ if a.IsValid(i) {
+ vals[i] = a.values[i]
+ } else {
+ vals[i] = nil
+ }
+ }
+
+ return json.Marshal(vals)
+}
+
+func arrayEqualMonthInterval(left, right *MonthInterval) bool {
+ for i := 0; i < left.Len(); i++ {
+ if left.IsNull(i) {
+ continue
+ }
+ if left.Value(i) != right.Value(i) {
+ return false
+ }
+ }
+ return true
+}
+
+type MonthIntervalBuilder struct {
+ builder
+
+ data *memory.Buffer
+ rawData []arrow.MonthInterval
+}
+
+func NewMonthIntervalBuilder(mem memory.Allocator) *MonthIntervalBuilder {
+ return &MonthIntervalBuilder{builder: builder{refCount: 1, mem: mem}}
+}
+
+func (b *MonthIntervalBuilder) Type() arrow.DataType { return arrow.FixedWidthTypes.MonthInterval }
+
+// Release decreases the reference count by 1.
+// When the reference count goes to zero, the memory is freed.
+func (b *MonthIntervalBuilder) Release() {
+ debug.Assert(atomic.LoadInt64(&b.refCount) > 0, "too many releases")
+
+ if atomic.AddInt64(&b.refCount, -1) == 0 {
+ if b.nullBitmap != nil {
+ b.nullBitmap.Release()
+ b.nullBitmap = nil
+ }
+ if b.data != nil {
+ b.data.Release()
+ b.data = nil
+ b.rawData = nil
+ }
+ }
+}
+
+func (b *MonthIntervalBuilder) Append(v arrow.MonthInterval) {
+ b.Reserve(1)
+ b.UnsafeAppend(v)
+}
+
+func (b *MonthIntervalBuilder) AppendNull() {
+ b.Reserve(1)
+ b.UnsafeAppendBoolToBitmap(false)
+}
+
+func (b *MonthIntervalBuilder) AppendNulls(n int) {
+ for i := 0; i < n; i++ {
+ b.AppendNull()
+ }
+}
+
+func (b *MonthIntervalBuilder) AppendEmptyValue() {
+ b.Append(arrow.MonthInterval(0))
+}
+
+func (b *MonthIntervalBuilder) AppendEmptyValues(n int) {
+ for i := 0; i < n; i++ {
+ b.AppendEmptyValue()
+ }
+}
+
+func (b *MonthIntervalBuilder) UnsafeAppend(v arrow.MonthInterval) {
+ bitutil.SetBit(b.nullBitmap.Bytes(), b.length)
+ b.rawData[b.length] = v
+ b.length++
+}
+
+func (b *MonthIntervalBuilder) UnsafeAppendBoolToBitmap(isValid bool) {
+ if isValid {
+ bitutil.SetBit(b.nullBitmap.Bytes(), b.length)
+ } else {
+ b.nulls++
+ }
+ b.length++
+}
+
+// AppendValues will append the values in the v slice. The valid slice determines which values
+// in v are valid (not null). The valid slice must either be empty or be equal in length to v. If empty,
+// all values in v are appended and considered valid.
+func (b *MonthIntervalBuilder) AppendValues(v []arrow.MonthInterval, valid []bool) {
+ if len(v) != len(valid) && len(valid) != 0 {
+ panic("len(v) != len(valid) && len(valid) != 0")
+ }
+
+ if len(v) == 0 {
+ return
+ }
+
+ b.Reserve(len(v))
+ arrow.MonthIntervalTraits.Copy(b.rawData[b.length:], v)
+ b.builder.unsafeAppendBoolsToBitmap(valid, len(v))
+}
+
+func (b *MonthIntervalBuilder) init(capacity int) {
+ b.builder.init(capacity)
+
+ b.data = memory.NewResizableBuffer(b.mem)
+ bytesN := arrow.MonthIntervalTraits.BytesRequired(capacity)
+ b.data.Resize(bytesN)
+ b.rawData = arrow.MonthIntervalTraits.CastFromBytes(b.data.Bytes())
+}
+
+// Reserve ensures there is enough space for appending n elements
+// by checking the capacity and calling Resize if necessary.
+func (b *MonthIntervalBuilder) Reserve(n int) {
+ b.builder.reserve(n, b.Resize)
+}
+
+// Resize adjusts the space allocated by b to n elements. If n is greater than b.Cap(),
+// additional memory will be allocated. If n is smaller, the allocated memory may reduced.
+func (b *MonthIntervalBuilder) Resize(n int) {
+ nBuilder := n
+ if n < minBuilderCapacity {
+ n = minBuilderCapacity
+ }
+
+ if b.capacity == 0 {
+ b.init(n)
+ } else {
+ b.builder.resize(nBuilder, b.init)
+ b.data.Resize(arrow.MonthIntervalTraits.BytesRequired(n))
+ b.rawData = arrow.MonthIntervalTraits.CastFromBytes(b.data.Bytes())
+ }
+}
+
+// NewArray creates a MonthInterval array from the memory buffers used by the builder and resets the MonthIntervalBuilder
+// so it can be used to build a new array.
+func (b *MonthIntervalBuilder) NewArray() arrow.Array {
+ return b.NewMonthIntervalArray()
+}
+
+// NewMonthIntervalArray creates a MonthInterval array from the memory buffers used by the builder and resets the MonthIntervalBuilder
+// so it can be used to build a new array.
+func (b *MonthIntervalBuilder) NewMonthIntervalArray() (a *MonthInterval) {
+ data := b.newData()
+ a = NewMonthIntervalData(data)
+ data.Release()
+ return
+}
+
+func (b *MonthIntervalBuilder) newData() (data *Data) {
+ bytesRequired := arrow.MonthIntervalTraits.BytesRequired(b.length)
+ if bytesRequired > 0 && bytesRequired < b.data.Len() {
+ // trim buffers
+ b.data.Resize(bytesRequired)
+ }
+ data = NewData(arrow.FixedWidthTypes.MonthInterval, b.length, []*memory.Buffer{b.nullBitmap, b.data}, nil, b.nulls, 0)
+ b.reset()
+
+ if b.data != nil {
+ b.data.Release()
+ b.data = nil
+ b.rawData = nil
+ }
+
+ return
+}
+
+func (b *MonthIntervalBuilder) AppendValueFromString(s string) error {
+ if s == NullValueStr {
+ b.AppendNull()
+ return nil
+ }
+ v, err := strconv.ParseInt(s, 10, 32)
+ if err != nil {
+ b.AppendNull()
+ return err
+ }
+ b.Append(arrow.MonthInterval(v))
+ return nil
+}
+
+func (b *MonthIntervalBuilder) UnmarshalOne(dec *json.Decoder) error {
+ var v *arrow.MonthInterval
+ if err := dec.Decode(&v); err != nil {
+ return err
+ }
+
+ if v == nil {
+ b.AppendNull()
+ } else {
+ b.Append(*v)
+ }
+ return nil
+}
+
+func (b *MonthIntervalBuilder) Unmarshal(dec *json.Decoder) error {
+ for dec.More() {
+ if err := b.UnmarshalOne(dec); err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+// UnmarshalJSON will add the unmarshalled values of an array to the builder,
+// values are expected to be strings of the form "#months" where # is the int32
+// value that will be added to the builder.
+func (b *MonthIntervalBuilder) UnmarshalJSON(data []byte) error {
+ dec := json.NewDecoder(bytes.NewReader(data))
+ t, err := dec.Token()
+ if err != nil {
+ return err
+ }
+
+ if delim, ok := t.(json.Delim); !ok || delim != '[' {
+ return fmt.Errorf("month interval builder must unpack from json array, found %s", delim)
+ }
+
+ return b.Unmarshal(dec)
+}
+
+// A type which represents an immutable sequence of arrow.DayTimeInterval values.
+type DayTimeInterval struct {
+ array
+ values []arrow.DayTimeInterval
+}
+
+func NewDayTimeIntervalData(data arrow.ArrayData) *DayTimeInterval {
+ a := &DayTimeInterval{}
+ a.refCount = 1
+ a.setData(data.(*Data))
+ return a
+}
+
+func (a *DayTimeInterval) Value(i int) arrow.DayTimeInterval { return a.values[i] }
+func (a *DayTimeInterval) ValueStr(i int) string {
+ if a.IsNull(i) {
+ return NullValueStr
+ }
+ data, err := json.Marshal(a.GetOneForMarshal(i))
+ if err != nil {
+ panic(err)
+ }
+ return string(data)
+}
+
+func (a *DayTimeInterval) DayTimeIntervalValues() []arrow.DayTimeInterval { return a.values }
+
+func (a *DayTimeInterval) String() string {
+ o := new(strings.Builder)
+ o.WriteString("[")
+ for i, v := range a.values {
+ if i > 0 {
+ fmt.Fprintf(o, " ")
+ }
+ switch {
+ case a.IsNull(i):
+ o.WriteString(NullValueStr)
+ default:
+ fmt.Fprintf(o, "%v", v)
+ }
+ }
+ o.WriteString("]")
+ return o.String()
+}
+
+func (a *DayTimeInterval) setData(data *Data) {
+ a.array.setData(data)
+ vals := data.buffers[1]
+ if vals != nil {
+ a.values = arrow.DayTimeIntervalTraits.CastFromBytes(vals.Bytes())
+ beg := a.array.data.offset
+ end := beg + a.array.data.length
+ a.values = a.values[beg:end]
+ }
+}
+
+func (a *DayTimeInterval) GetOneForMarshal(i int) interface{} {
+ if a.IsValid(i) {
+ return a.values[i]
+ }
+ return nil
+}
+
+// MarshalJSON will marshal this array to JSON as an array of objects,
+// consisting of the form {"days": #, "milliseconds": #} for each element.
+func (a *DayTimeInterval) MarshalJSON() ([]byte, error) {
+ if a.NullN() == 0 {
+ return json.Marshal(a.values)
+ }
+ vals := make([]interface{}, a.Len())
+ for i, v := range a.values {
+ if a.IsValid(i) {
+ vals[i] = v
+ } else {
+ vals[i] = nil
+ }
+ }
+ return json.Marshal(vals)
+}
+
+func arrayEqualDayTimeInterval(left, right *DayTimeInterval) bool {
+ for i := 0; i < left.Len(); i++ {
+ if left.IsNull(i) {
+ continue
+ }
+ if left.Value(i) != right.Value(i) {
+ return false
+ }
+ }
+ return true
+}
+
+type DayTimeIntervalBuilder struct {
+ builder
+
+ data *memory.Buffer
+ rawData []arrow.DayTimeInterval
+}
+
+func NewDayTimeIntervalBuilder(mem memory.Allocator) *DayTimeIntervalBuilder {
+ return &DayTimeIntervalBuilder{builder: builder{refCount: 1, mem: mem}}
+}
+
+func (b *DayTimeIntervalBuilder) Type() arrow.DataType { return arrow.FixedWidthTypes.DayTimeInterval }
+
+// Release decreases the reference count by 1.
+// When the reference count goes to zero, the memory is freed.
+func (b *DayTimeIntervalBuilder) Release() {
+ debug.Assert(atomic.LoadInt64(&b.refCount) > 0, "too many releases")
+
+ if atomic.AddInt64(&b.refCount, -1) == 0 {
+ if b.nullBitmap != nil {
+ b.nullBitmap.Release()
+ b.nullBitmap = nil
+ }
+ if b.data != nil {
+ b.data.Release()
+ b.data = nil
+ b.rawData = nil
+ }
+ }
+}
+
+func (b *DayTimeIntervalBuilder) Append(v arrow.DayTimeInterval) {
+ b.Reserve(1)
+ b.UnsafeAppend(v)
+}
+
+func (b *DayTimeIntervalBuilder) AppendNull() {
+ b.Reserve(1)
+ b.UnsafeAppendBoolToBitmap(false)
+}
+
+func (b *DayTimeIntervalBuilder) AppendNulls(n int) {
+ for i := 0; i < n; i++ {
+ b.AppendNull()
+ }
+}
+
+func (b *DayTimeIntervalBuilder) AppendEmptyValue() {
+ b.Append(arrow.DayTimeInterval{})
+}
+
+func (b *DayTimeIntervalBuilder) AppendEmptyValues(n int) {
+ for i := 0; i < n; i++ {
+ b.AppendEmptyValue()
+ }
+}
+
+func (b *DayTimeIntervalBuilder) UnsafeAppend(v arrow.DayTimeInterval) {
+ bitutil.SetBit(b.nullBitmap.Bytes(), b.length)
+ b.rawData[b.length] = v
+ b.length++
+}
+
+func (b *DayTimeIntervalBuilder) UnsafeAppendBoolToBitmap(isValid bool) {
+ if isValid {
+ bitutil.SetBit(b.nullBitmap.Bytes(), b.length)
+ } else {
+ b.nulls++
+ }
+ b.length++
+}
+
+// AppendValues will append the values in the v slice. The valid slice determines which values
+// in v are valid (not null). The valid slice must either be empty or be equal in length to v. If empty,
+// all values in v are appended and considered valid.
+func (b *DayTimeIntervalBuilder) AppendValues(v []arrow.DayTimeInterval, valid []bool) {
+ if len(v) != len(valid) && len(valid) != 0 {
+ panic("len(v) != len(valid) && len(valid) != 0")
+ }
+
+ if len(v) == 0 {
+ return
+ }
+
+ b.Reserve(len(v))
+ arrow.DayTimeIntervalTraits.Copy(b.rawData[b.length:], v)
+ b.builder.unsafeAppendBoolsToBitmap(valid, len(v))
+}
+
+func (b *DayTimeIntervalBuilder) init(capacity int) {
+ b.builder.init(capacity)
+
+ b.data = memory.NewResizableBuffer(b.mem)
+ bytesN := arrow.DayTimeIntervalTraits.BytesRequired(capacity)
+ b.data.Resize(bytesN)
+ b.rawData = arrow.DayTimeIntervalTraits.CastFromBytes(b.data.Bytes())
+}
+
+// Reserve ensures there is enough space for appending n elements
+// by checking the capacity and calling Resize if necessary.
+func (b *DayTimeIntervalBuilder) Reserve(n int) {
+ b.builder.reserve(n, b.Resize)
+}
+
+// Resize adjusts the space allocated by b to n elements. If n is greater than b.Cap(),
+// additional memory will be allocated. If n is smaller, the allocated memory may reduced.
+func (b *DayTimeIntervalBuilder) Resize(n int) {
+ nBuilder := n
+ if n < minBuilderCapacity {
+ n = minBuilderCapacity
+ }
+
+ if b.capacity == 0 {
+ b.init(n)
+ } else {
+ b.builder.resize(nBuilder, b.init)
+ b.data.Resize(arrow.DayTimeIntervalTraits.BytesRequired(n))
+ b.rawData = arrow.DayTimeIntervalTraits.CastFromBytes(b.data.Bytes())
+ }
+}
+
+// NewArray creates a DayTimeInterval array from the memory buffers used by the builder and resets the DayTimeIntervalBuilder
+// so it can be used to build a new array.
+func (b *DayTimeIntervalBuilder) NewArray() arrow.Array {
+ return b.NewDayTimeIntervalArray()
+}
+
+// NewDayTimeIntervalArray creates a DayTimeInterval array from the memory buffers used by the builder and resets the DayTimeIntervalBuilder
+// so it can be used to build a new array.
+func (b *DayTimeIntervalBuilder) NewDayTimeIntervalArray() (a *DayTimeInterval) {
+ data := b.newData()
+ a = NewDayTimeIntervalData(data)
+ data.Release()
+ return
+}
+
+func (b *DayTimeIntervalBuilder) newData() (data *Data) {
+ bytesRequired := arrow.DayTimeIntervalTraits.BytesRequired(b.length)
+ if bytesRequired > 0 && bytesRequired < b.data.Len() {
+ // trim buffers
+ b.data.Resize(bytesRequired)
+ }
+ data = NewData(arrow.FixedWidthTypes.DayTimeInterval, b.length, []*memory.Buffer{b.nullBitmap, b.data}, nil, b.nulls, 0)
+ b.reset()
+
+ if b.data != nil {
+ b.data.Release()
+ b.data = nil
+ b.rawData = nil
+ }
+
+ return
+}
+
+func (b *DayTimeIntervalBuilder) AppendValueFromString(s string) error {
+ if s == NullValueStr {
+ b.AppendNull()
+ return nil
+ }
+ var v arrow.DayTimeInterval
+ if err := json.Unmarshal([]byte(s), &v); err != nil {
+ b.AppendNull()
+ return err
+ }
+ b.Append(v)
+ return nil
+}
+
+func (b *DayTimeIntervalBuilder) UnmarshalOne(dec *json.Decoder) error {
+ var v *arrow.DayTimeInterval
+ if err := dec.Decode(&v); err != nil {
+ return err
+ }
+
+ if v == nil {
+ b.AppendNull()
+ } else {
+ b.Append(*v)
+ }
+ return nil
+}
+
+func (b *DayTimeIntervalBuilder) Unmarshal(dec *json.Decoder) error {
+ for dec.More() {
+ if err := b.UnmarshalOne(dec); err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+// UnmarshalJSON will add the values unmarshalled from an array to the builder,
+// with the values expected to be objects of the form {"days": #, "milliseconds": #}
+func (b *DayTimeIntervalBuilder) UnmarshalJSON(data []byte) error {
+ dec := json.NewDecoder(bytes.NewReader(data))
+ t, err := dec.Token()
+ if err != nil {
+ return err
+ }
+
+ if delim, ok := t.(json.Delim); !ok || delim != '[' {
+ return fmt.Errorf("day_time interval builder must unpack from json array, found %s", delim)
+ }
+
+ return b.Unmarshal(dec)
+}
+
+// A type which represents an immutable sequence of arrow.DayTimeInterval values.
+type MonthDayNanoInterval struct {
+ array
+ values []arrow.MonthDayNanoInterval
+}
+
+func NewMonthDayNanoIntervalData(data arrow.ArrayData) *MonthDayNanoInterval {
+ a := &MonthDayNanoInterval{}
+ a.refCount = 1
+ a.setData(data.(*Data))
+ return a
+}
+
+func (a *MonthDayNanoInterval) Value(i int) arrow.MonthDayNanoInterval { return a.values[i] }
+func (a *MonthDayNanoInterval) ValueStr(i int) string {
+ if a.IsNull(i) {
+ return NullValueStr
+ }
+ data, err := json.Marshal(a.GetOneForMarshal(i))
+ if err != nil {
+ panic(err)
+ }
+ return string(data)
+}
+
+func (a *MonthDayNanoInterval) MonthDayNanoIntervalValues() []arrow.MonthDayNanoInterval {
+ return a.values
+}
+
+func (a *MonthDayNanoInterval) String() string {
+ o := new(strings.Builder)
+ o.WriteString("[")
+ for i, v := range a.values {
+ if i > 0 {
+ fmt.Fprintf(o, " ")
+ }
+ switch {
+ case a.IsNull(i):
+ o.WriteString(NullValueStr)
+ default:
+ fmt.Fprintf(o, "%v", v)
+ }
+ }
+ o.WriteString("]")
+ return o.String()
+}
+
+func (a *MonthDayNanoInterval) setData(data *Data) {
+ a.array.setData(data)
+ vals := data.buffers[1]
+ if vals != nil {
+ a.values = arrow.MonthDayNanoIntervalTraits.CastFromBytes(vals.Bytes())
+ beg := a.array.data.offset
+ end := beg + a.array.data.length
+ a.values = a.values[beg:end]
+ }
+}
+
+func (a *MonthDayNanoInterval) GetOneForMarshal(i int) interface{} {
+ if a.IsValid(i) {
+ return a.values[i]
+ }
+ return nil
+}
+
+// MarshalJSON will marshal this array to a JSON array with elements
+// marshalled to the form {"months": #, "days": #, "nanoseconds": #}
+func (a *MonthDayNanoInterval) MarshalJSON() ([]byte, error) {
+ if a.NullN() == 0 {
+ return json.Marshal(a.values)
+ }
+ vals := make([]interface{}, a.Len())
+ for i, v := range a.values {
+ if a.IsValid(i) {
+ vals[i] = v
+ } else {
+ vals[i] = nil
+ }
+ }
+ return json.Marshal(vals)
+}
+
+func arrayEqualMonthDayNanoInterval(left, right *MonthDayNanoInterval) bool {
+ for i := 0; i < left.Len(); i++ {
+ if left.IsNull(i) {
+ continue
+ }
+ if left.Value(i) != right.Value(i) {
+ return false
+ }
+ }
+ return true
+}
+
+type MonthDayNanoIntervalBuilder struct {
+ builder
+
+ data *memory.Buffer
+ rawData []arrow.MonthDayNanoInterval
+}
+
+func NewMonthDayNanoIntervalBuilder(mem memory.Allocator) *MonthDayNanoIntervalBuilder {
+ return &MonthDayNanoIntervalBuilder{builder: builder{refCount: 1, mem: mem}}
+}
+
+func (b *MonthDayNanoIntervalBuilder) Type() arrow.DataType {
+ return arrow.FixedWidthTypes.MonthDayNanoInterval
+}
+
+// Release decreases the reference count by 1.
+// When the reference count goes to zero, the memory is freed.
+func (b *MonthDayNanoIntervalBuilder) Release() {
+ debug.Assert(atomic.LoadInt64(&b.refCount) > 0, "too many releases")
+
+ if atomic.AddInt64(&b.refCount, -1) == 0 {
+ if b.nullBitmap != nil {
+ b.nullBitmap.Release()
+ b.nullBitmap = nil
+ }
+ if b.data != nil {
+ b.data.Release()
+ b.data = nil
+ b.rawData = nil
+ }
+ }
+}
+
+func (b *MonthDayNanoIntervalBuilder) Append(v arrow.MonthDayNanoInterval) {
+ b.Reserve(1)
+ b.UnsafeAppend(v)
+}
+
+func (b *MonthDayNanoIntervalBuilder) AppendNull() {
+ b.Reserve(1)
+ b.UnsafeAppendBoolToBitmap(false)
+}
+
+func (b *MonthDayNanoIntervalBuilder) AppendNulls(n int) {
+ for i := 0; i < n; i++ {
+ b.AppendNull()
+ }
+}
+
+func (b *MonthDayNanoIntervalBuilder) AppendEmptyValue() {
+ b.Append(arrow.MonthDayNanoInterval{})
+}
+
+func (b *MonthDayNanoIntervalBuilder) AppendEmptyValues(n int) {
+ for i := 0; i < n; i++ {
+ b.AppendEmptyValue()
+ }
+}
+
+func (b *MonthDayNanoIntervalBuilder) UnsafeAppend(v arrow.MonthDayNanoInterval) {
+ bitutil.SetBit(b.nullBitmap.Bytes(), b.length)
+ b.rawData[b.length] = v
+ b.length++
+}
+
+func (b *MonthDayNanoIntervalBuilder) UnsafeAppendBoolToBitmap(isValid bool) {
+ if isValid {
+ bitutil.SetBit(b.nullBitmap.Bytes(), b.length)
+ } else {
+ b.nulls++
+ }
+ b.length++
+}
+
+// AppendValues will append the values in the v slice. The valid slice determines which values
+// in v are valid (not null). The valid slice must either be empty or be equal in length to v. If empty,
+// all values in v are appended and considered valid.
+func (b *MonthDayNanoIntervalBuilder) AppendValues(v []arrow.MonthDayNanoInterval, valid []bool) {
+ if len(v) != len(valid) && len(valid) != 0 {
+ panic("len(v) != len(valid) && len(valid) != 0")
+ }
+
+ if len(v) == 0 {
+ return
+ }
+
+ b.Reserve(len(v))
+ arrow.MonthDayNanoIntervalTraits.Copy(b.rawData[b.length:], v)
+ b.builder.unsafeAppendBoolsToBitmap(valid, len(v))
+}
+
+func (b *MonthDayNanoIntervalBuilder) init(capacity int) {
+ b.builder.init(capacity)
+
+ b.data = memory.NewResizableBuffer(b.mem)
+ bytesN := arrow.MonthDayNanoIntervalTraits.BytesRequired(capacity)
+ b.data.Resize(bytesN)
+ b.rawData = arrow.MonthDayNanoIntervalTraits.CastFromBytes(b.data.Bytes())
+}
+
+// Reserve ensures there is enough space for appending n elements
+// by checking the capacity and calling Resize if necessary.
+func (b *MonthDayNanoIntervalBuilder) Reserve(n int) {
+ b.builder.reserve(n, b.Resize)
+}
+
+// Resize adjusts the space allocated by b to n elements. If n is greater than b.Cap(),
+// additional memory will be allocated. If n is smaller, the allocated memory may reduced.
+func (b *MonthDayNanoIntervalBuilder) Resize(n int) {
+ nBuilder := n
+ if n < minBuilderCapacity {
+ n = minBuilderCapacity
+ }
+
+ if b.capacity == 0 {
+ b.init(n)
+ } else {
+ b.builder.resize(nBuilder, b.init)
+ b.data.Resize(arrow.MonthDayNanoIntervalTraits.BytesRequired(n))
+ b.rawData = arrow.MonthDayNanoIntervalTraits.CastFromBytes(b.data.Bytes())
+ }
+}
+
+// NewArray creates a MonthDayNanoInterval array from the memory buffers used by the builder and resets the MonthDayNanoIntervalBuilder
+// so it can be used to build a new array.
+func (b *MonthDayNanoIntervalBuilder) NewArray() arrow.Array {
+ return b.NewMonthDayNanoIntervalArray()
+}
+
+// NewMonthDayNanoIntervalArray creates a MonthDayNanoInterval array from the memory buffers used by the builder and resets the MonthDayNanoIntervalBuilder
+// so it can be used to build a new array.
+func (b *MonthDayNanoIntervalBuilder) NewMonthDayNanoIntervalArray() (a *MonthDayNanoInterval) {
+ data := b.newData()
+ a = NewMonthDayNanoIntervalData(data)
+ data.Release()
+ return
+}
+
+func (b *MonthDayNanoIntervalBuilder) newData() (data *Data) {
+ bytesRequired := arrow.MonthDayNanoIntervalTraits.BytesRequired(b.length)
+ if bytesRequired > 0 && bytesRequired < b.data.Len() {
+ // trim buffers
+ b.data.Resize(bytesRequired)
+ }
+ data = NewData(arrow.FixedWidthTypes.MonthDayNanoInterval, b.length, []*memory.Buffer{b.nullBitmap, b.data}, nil, b.nulls, 0)
+ b.reset()
+
+ if b.data != nil {
+ b.data.Release()
+ b.data = nil
+ b.rawData = nil
+ }
+
+ return
+}
+
+func (b *MonthDayNanoIntervalBuilder) AppendValueFromString(s string) error {
+ if s == NullValueStr {
+ b.AppendNull()
+ return nil
+ }
+ var v arrow.MonthDayNanoInterval
+ if err := json.Unmarshal([]byte(s), &v); err != nil {
+ return err
+ }
+ b.Append(v)
+ return nil
+}
+
+func (b *MonthDayNanoIntervalBuilder) UnmarshalOne(dec *json.Decoder) error {
+ var v *arrow.MonthDayNanoInterval
+ if err := dec.Decode(&v); err != nil {
+ return err
+ }
+
+ if v == nil {
+ b.AppendNull()
+ } else {
+ b.Append(*v)
+ }
+ return nil
+}
+
+func (b *MonthDayNanoIntervalBuilder) Unmarshal(dec *json.Decoder) error {
+ for dec.More() {
+ if err := b.UnmarshalOne(dec); err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+// UnmarshalJSON unmarshals a JSON array of objects and adds them to this builder,
+// each element of the array is expected to be an object of the form
+// {"months": #, "days": #, "nanoseconds": #}
+func (b *MonthDayNanoIntervalBuilder) UnmarshalJSON(data []byte) error {
+ dec := json.NewDecoder(bytes.NewReader(data))
+ t, err := dec.Token()
+ if err != nil {
+ return err
+ }
+
+ if delim, ok := t.(json.Delim); !ok || delim != '[' {
+ return fmt.Errorf("month_day_nano interval builder must unpack from json array, found %s", delim)
+ }
+
+ return b.Unmarshal(dec)
+}
+
+var (
+ _ arrow.Array = (*MonthInterval)(nil)
+ _ arrow.Array = (*DayTimeInterval)(nil)
+ _ arrow.Array = (*MonthDayNanoInterval)(nil)
+
+ _ Builder = (*MonthIntervalBuilder)(nil)
+ _ Builder = (*DayTimeIntervalBuilder)(nil)
+ _ Builder = (*MonthDayNanoIntervalBuilder)(nil)
+)
diff --git a/vendor/github.com/apache/arrow/go/v14/arrow/array/json_reader.go b/vendor/github.com/apache/arrow/go/v14/arrow/array/json_reader.go
new file mode 100644
index 000000000..e09717c41
--- /dev/null
+++ b/vendor/github.com/apache/arrow/go/v14/arrow/array/json_reader.go
@@ -0,0 +1,205 @@
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements. See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership. The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License. You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package array
+
+import (
+ "errors"
+ "fmt"
+ "io"
+ "sync/atomic"
+
+ "github.com/apache/arrow/go/v14/arrow"
+ "github.com/apache/arrow/go/v14/arrow/internal/debug"
+ "github.com/apache/arrow/go/v14/arrow/memory"
+ "github.com/apache/arrow/go/v14/internal/json"
+)
+
+type Option func(config)
+type config interface{}
+
+// WithChunk sets the chunk size for reading in json records. The default is to
+// read in one row per record batch as a single object. If chunk size is set to
+// a negative value, then the entire file is read as a single record batch.
+// Otherwise a record batch is read in with chunk size rows per record batch until
+// it reaches EOF.
+func WithChunk(n int) Option {
+ return func(cfg config) {
+ switch cfg := cfg.(type) {
+ case *JSONReader:
+ cfg.chunk = n
+ default:
+ panic(fmt.Errorf("arrow/json): unknown config type %T", cfg))
+ }
+ }
+}
+
+// WithAllocator specifies the allocator to use for creating the record batches,
+// if it is not called, then memory.DefaultAllocator will be used.
+func WithAllocator(mem memory.Allocator) Option {
+ return func(cfg config) {
+ switch cfg := cfg.(type) {
+ case *JSONReader:
+ cfg.mem = mem
+ default:
+ panic(fmt.Errorf("arrow/json): unknown config type %T", cfg))
+ }
+ }
+}
+
+// JSONReader is a json reader that meets the RecordReader interface definition.
+//
+// To read in an array of objects as a record, you can use RecordFromJSON
+// which is equivalent to reading the json as a struct array whose fields are
+// the columns of the record. This primarily exists to fit the RecordReader
+// interface as a matching reader for the csv reader.
+type JSONReader struct {
+ r *json.Decoder
+ schema *arrow.Schema
+
+ bldr *RecordBuilder
+
+ refs int64
+ cur arrow.Record
+ err error
+
+ chunk int
+ done bool
+
+ mem memory.Allocator
+ next func() bool
+}
+
+// NewJSONReader returns a json RecordReader which expects to find one json object
+// per row of dataset. Using WithChunk can control how many rows are processed
+// per record, which is how many objects become a single record from the file.
+//
+// If it is desired to write out an array of rows, then simply use RecordToStructArray
+// and json.Marshal the struct array for the same effect.
+func NewJSONReader(r io.Reader, schema *arrow.Schema, opts ...Option) *JSONReader {
+ rr := &JSONReader{
+ r: json.NewDecoder(r),
+ schema: schema,
+ refs: 1,
+ chunk: 1,
+ }
+ for _, o := range opts {
+ o(rr)
+ }
+
+ if rr.mem == nil {
+ rr.mem = memory.DefaultAllocator
+ }
+
+ rr.bldr = NewRecordBuilder(rr.mem, schema)
+ switch {
+ case rr.chunk < 0:
+ rr.next = rr.nextall
+ case rr.chunk > 1:
+ rr.next = rr.nextn
+ default:
+ rr.next = rr.next1
+ }
+ return rr
+}
+
+// Err returns the last encountered error
+func (r *JSONReader) Err() error { return r.err }
+
+func (r *JSONReader) Schema() *arrow.Schema { return r.schema }
+
+// Record returns the last read in record. The returned record is only valid
+// until the next call to Next unless Retain is called on the record itself.
+func (r *JSONReader) Record() arrow.Record { return r.cur }
+
+func (r *JSONReader) Retain() {
+ atomic.AddInt64(&r.refs, 1)
+}
+
+func (r *JSONReader) Release() {
+ debug.Assert(atomic.LoadInt64(&r.refs) > 0, "too many releases")
+
+ if atomic.AddInt64(&r.refs, -1) == 0 {
+ if r.cur != nil {
+ r.cur.Release()
+ r.bldr.Release()
+ r.r = nil
+ }
+ }
+}
+
+// Next returns true if it read in a record, which will be available via Record
+// and false if there is either an error or the end of the reader.
+func (r *JSONReader) Next() bool {
+ if r.cur != nil {
+ r.cur.Release()
+ r.cur = nil
+ }
+
+ if r.err != nil || r.done {
+ return false
+ }
+
+ return r.next()
+}
+
+func (r *JSONReader) readNext() bool {
+ r.err = r.r.Decode(r.bldr)
+ if r.err != nil {
+ r.done = true
+ if errors.Is(r.err, io.EOF) {
+ r.err = nil
+ }
+ return false
+ }
+ return true
+}
+
+func (r *JSONReader) nextall() bool {
+ for r.readNext() {
+ }
+
+ r.cur = r.bldr.NewRecord()
+ return r.cur.NumRows() > 0
+}
+
+func (r *JSONReader) next1() bool {
+ if !r.readNext() {
+ return false
+ }
+
+ r.cur = r.bldr.NewRecord()
+ return true
+}
+
+func (r *JSONReader) nextn() bool {
+ var n = 0
+
+ for i := 0; i < r.chunk && !r.done; i, n = i+1, n+1 {
+ if !r.readNext() {
+ break
+ }
+ }
+
+ if n > 0 {
+ r.cur = r.bldr.NewRecord()
+ }
+ return n > 0
+}
+
+var (
+ _ RecordReader = (*JSONReader)(nil)
+)
diff --git a/vendor/github.com/apache/arrow/go/v14/arrow/array/list.go b/vendor/github.com/apache/arrow/go/v14/arrow/array/list.go
new file mode 100644
index 000000000..d8d8b8c76
--- /dev/null
+++ b/vendor/github.com/apache/arrow/go/v14/arrow/array/list.go
@@ -0,0 +1,1688 @@
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements. See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership. The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License. You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package array
+
+import (
+ "bytes"
+ "fmt"
+ "math"
+ "strings"
+ "sync/atomic"
+
+ "github.com/apache/arrow/go/v14/arrow"
+ "github.com/apache/arrow/go/v14/arrow/bitutil"
+ "github.com/apache/arrow/go/v14/arrow/internal/debug"
+ "github.com/apache/arrow/go/v14/arrow/memory"
+ "github.com/apache/arrow/go/v14/internal/json"
+)
+
+type ListLike interface {
+ arrow.Array
+ ListValues() arrow.Array
+ ValueOffsets(i int) (start, end int64)
+}
+
+type VarLenListLike interface {
+ ListLike
+}
+
+// List represents an immutable sequence of array values.
+type List struct {
+ array
+ values arrow.Array
+ offsets []int32
+}
+
+var _ ListLike = (*List)(nil)
+
+// NewListData returns a new List array value, from data.
+func NewListData(data arrow.ArrayData) *List {
+ a := &List{}
+ a.refCount = 1
+ a.setData(data.(*Data))
+ return a
+}
+
+func (a *List) ListValues() arrow.Array { return a.values }
+
+func (a *List) ValueStr(i int) string {
+ if !a.IsValid(i) {
+ return NullValueStr
+ }
+ return string(a.GetOneForMarshal(i).(json.RawMessage))
+}
+
+func (a *List) String() string {
+ o := new(strings.Builder)
+ o.WriteString("[")
+ for i := 0; i < a.Len(); i++ {
+ if i > 0 {
+ o.WriteString(" ")
+ }
+ if a.IsNull(i) {
+ o.WriteString(NullValueStr)
+ continue
+ }
+ sub := a.newListValue(i)
+ fmt.Fprintf(o, "%v", sub)
+ sub.Release()
+ }
+ o.WriteString("]")
+ return o.String()
+}
+
+func (a *List) newListValue(i int) arrow.Array {
+ beg, end := a.ValueOffsets(i)
+ return NewSlice(a.values, beg, end)
+}
+
+func (a *List) setData(data *Data) {
+ debug.Assert(len(data.buffers) >= 2, "list data should have 2 buffers")
+ a.array.setData(data)
+ vals := data.buffers[1]
+ if vals != nil {
+ a.offsets = arrow.Int32Traits.CastFromBytes(vals.Bytes())
+ }
+ a.values = MakeFromData(data.childData[0])
+}
+
+func (a *List) GetOneForMarshal(i int) interface{} {
+ if a.IsNull(i) {
+ return nil
+ }
+
+ slice := a.newListValue(i)
+ defer slice.Release()
+ v, err := json.Marshal(slice)
+ if err != nil {
+ panic(err)
+ }
+ return json.RawMessage(v)
+}
+
+func (a *List) MarshalJSON() ([]byte, error) {
+ var buf bytes.Buffer
+ enc := json.NewEncoder(&buf)
+
+ buf.WriteByte('[')
+ for i := 0; i < a.Len(); i++ {
+ if i != 0 {
+ buf.WriteByte(',')
+ }
+ if err := enc.Encode(a.GetOneForMarshal(i)); err != nil {
+ return nil, err
+ }
+ }
+ buf.WriteByte(']')
+ return buf.Bytes(), nil
+}
+
+func arrayEqualList(left, right *List) bool {
+ for i := 0; i < left.Len(); i++ {
+ if left.IsNull(i) {
+ continue
+ }
+ o := func() bool {
+ l := left.newListValue(i)
+ defer l.Release()
+ r := right.newListValue(i)
+ defer r.Release()
+ return Equal(l, r)
+ }()
+ if !o {
+ return false
+ }
+ }
+ return true
+}
+
+// Len returns the number of elements in the array.
+func (a *List) Len() int { return a.array.Len() }
+
+func (a *List) Offsets() []int32 { return a.offsets }
+
+func (a *List) Retain() {
+ a.array.Retain()
+ a.values.Retain()
+}
+
+func (a *List) Release() {
+ a.array.Release()
+ a.values.Release()
+}
+
+func (a *List) ValueOffsets(i int) (start, end int64) {
+ debug.Assert(i >= 0 && i < a.array.data.length, "index out of range")
+ j := i + a.array.data.offset
+ start, end = int64(a.offsets[j]), int64(a.offsets[j+1])
+ return
+}
+
+// LargeList represents an immutable sequence of array values.
+type LargeList struct {
+ array
+ values arrow.Array
+ offsets []int64
+}
+
+var _ ListLike = (*LargeList)(nil)
+
+// NewLargeListData returns a new LargeList array value, from data.
+func NewLargeListData(data arrow.ArrayData) *LargeList {
+ a := new(LargeList)
+ a.refCount = 1
+ a.setData(data.(*Data))
+ return a
+}
+
+func (a *LargeList) ListValues() arrow.Array { return a.values }
+
+func (a *LargeList) ValueStr(i int) string {
+ if !a.IsValid(i) {
+ return NullValueStr
+ }
+ return string(a.GetOneForMarshal(i).(json.RawMessage))
+}
+
+func (a *LargeList) String() string {
+ o := new(strings.Builder)
+ o.WriteString("[")
+ for i := 0; i < a.Len(); i++ {
+ if i > 0 {
+ o.WriteString(" ")
+ }
+ if a.IsNull(i) {
+ o.WriteString(NullValueStr)
+ continue
+ }
+ sub := a.newListValue(i)
+ fmt.Fprintf(o, "%v", sub)
+ sub.Release()
+ }
+ o.WriteString("]")
+ return o.String()
+}
+
+func (a *LargeList) newListValue(i int) arrow.Array {
+ beg, end := a.ValueOffsets(i)
+ return NewSlice(a.values, beg, end)
+}
+
+func (a *LargeList) setData(data *Data) {
+ debug.Assert(len(data.buffers) >= 2, "list data should have 2 buffers")
+ a.array.setData(data)
+ vals := data.buffers[1]
+ if vals != nil {
+ a.offsets = arrow.Int64Traits.CastFromBytes(vals.Bytes())
+ }
+ a.values = MakeFromData(data.childData[0])
+}
+
+func (a *LargeList) GetOneForMarshal(i int) interface{} {
+ if a.IsNull(i) {
+ return nil
+ }
+
+ slice := a.newListValue(i)
+ defer slice.Release()
+ v, err := json.Marshal(slice)
+ if err != nil {
+ panic(err)
+ }
+ return json.RawMessage(v)
+}
+
+func (a *LargeList) MarshalJSON() ([]byte, error) {
+ var buf bytes.Buffer
+ enc := json.NewEncoder(&buf)
+
+ buf.WriteByte('[')
+ for i := 0; i < a.Len(); i++ {
+ if i != 0 {
+ buf.WriteByte(',')
+ }
+ if err := enc.Encode(a.GetOneForMarshal(i)); err != nil {
+ return nil, err
+ }
+ }
+ buf.WriteByte(']')
+ return buf.Bytes(), nil
+}
+
+func arrayEqualLargeList(left, right *LargeList) bool {
+ for i := 0; i < left.Len(); i++ {
+ if left.IsNull(i) {
+ continue
+ }
+ o := func() bool {
+ l := left.newListValue(i)
+ defer l.Release()
+ r := right.newListValue(i)
+ defer r.Release()
+ return Equal(l, r)
+ }()
+ if !o {
+ return false
+ }
+ }
+ return true
+}
+
+// Len returns the number of elements in the array.
+func (a *LargeList) Len() int { return a.array.Len() }
+
+func (a *LargeList) Offsets() []int64 { return a.offsets }
+
+func (a *LargeList) ValueOffsets(i int) (start, end int64) {
+ debug.Assert(i >= 0 && i < a.array.data.length, "index out of range")
+ j := i + a.array.data.offset
+ start, end = a.offsets[j], a.offsets[j+1]
+ return
+}
+
+func (a *LargeList) Retain() {
+ a.array.Retain()
+ a.values.Retain()
+}
+
+func (a *LargeList) Release() {
+ a.array.Release()
+ a.values.Release()
+}
+
+type baseListBuilder struct {
+ builder
+
+ values Builder // value builder for the list's elements.
+ offsets Builder
+
+ // actual list type
+ dt arrow.DataType
+ appendOffsetVal func(int)
+}
+
+type ListLikeBuilder interface {
+ Builder
+ ValueBuilder() Builder
+ Append(bool)
+}
+
+type VarLenListLikeBuilder interface {
+ ListLikeBuilder
+ AppendWithSize(bool, int)
+}
+
+type ListBuilder struct {
+ baseListBuilder
+}
+
+type LargeListBuilder struct {
+ baseListBuilder
+}
+
+// NewListBuilder returns a builder, using the provided memory allocator.
+// The created list builder will create a list whose elements will be of type etype.
+func NewListBuilder(mem memory.Allocator, etype arrow.DataType) *ListBuilder {
+ offsetBldr := NewInt32Builder(mem)
+ return &ListBuilder{
+ baseListBuilder{
+ builder: builder{refCount: 1, mem: mem},
+ values: NewBuilder(mem, etype),
+ offsets: offsetBldr,
+ dt: arrow.ListOf(etype),
+ appendOffsetVal: func(o int) { offsetBldr.Append(int32(o)) },
+ },
+ }
+}
+
+// NewListBuilderWithField takes a field to use for the child rather than just
+// a datatype to allow for more customization.
+func NewListBuilderWithField(mem memory.Allocator, field arrow.Field) *ListBuilder {
+ offsetBldr := NewInt32Builder(mem)
+ return &ListBuilder{
+ baseListBuilder{
+ builder: builder{refCount: 1, mem: mem},
+ values: NewBuilder(mem, field.Type),
+ offsets: offsetBldr,
+ dt: arrow.ListOfField(field),
+ appendOffsetVal: func(o int) { offsetBldr.Append(int32(o)) },
+ },
+ }
+}
+
+func (b *baseListBuilder) Type() arrow.DataType {
+ switch dt := b.dt.(type) {
+ case *arrow.ListType:
+ f := dt.ElemField()
+ f.Type = b.values.Type()
+ return arrow.ListOfField(f)
+ case *arrow.LargeListType:
+ f := dt.ElemField()
+ f.Type = b.values.Type()
+ return arrow.LargeListOfField(f)
+ }
+ return nil
+}
+
+// NewLargeListBuilder returns a builder, using the provided memory allocator.
+// The created list builder will create a list whose elements will be of type etype.
+func NewLargeListBuilder(mem memory.Allocator, etype arrow.DataType) *LargeListBuilder {
+ offsetBldr := NewInt64Builder(mem)
+ return &LargeListBuilder{
+ baseListBuilder{
+ builder: builder{refCount: 1, mem: mem},
+ values: NewBuilder(mem, etype),
+ offsets: offsetBldr,
+ dt: arrow.LargeListOf(etype),
+ appendOffsetVal: func(o int) { offsetBldr.Append(int64(o)) },
+ },
+ }
+}
+
+// NewLargeListBuilderWithField takes a field rather than just an element type
+// to allow for more customization of the final type of the LargeList Array
+func NewLargeListBuilderWithField(mem memory.Allocator, field arrow.Field) *LargeListBuilder {
+ offsetBldr := NewInt64Builder(mem)
+ return &LargeListBuilder{
+ baseListBuilder{
+ builder: builder{refCount: 1, mem: mem},
+ values: NewBuilder(mem, field.Type),
+ offsets: offsetBldr,
+ dt: arrow.LargeListOfField(field),
+ appendOffsetVal: func(o int) { offsetBldr.Append(int64(o)) },
+ },
+ }
+}
+
+// Release decreases the reference count by 1.
+// When the reference count goes to zero, the memory is freed.
+func (b *baseListBuilder) Release() {
+ debug.Assert(atomic.LoadInt64(&b.refCount) > 0, "too many releases")
+
+ if atomic.AddInt64(&b.refCount, -1) == 0 {
+ if b.nullBitmap != nil {
+ b.nullBitmap.Release()
+ b.nullBitmap = nil
+ }
+ b.values.Release()
+ b.offsets.Release()
+ }
+
+}
+
+func (b *baseListBuilder) appendNextOffset() {
+ b.appendOffsetVal(b.values.Len())
+}
+
+func (b *baseListBuilder) Append(v bool) {
+ b.Reserve(1)
+ b.unsafeAppendBoolToBitmap(v)
+ b.appendNextOffset()
+}
+
+func (b *baseListBuilder) AppendWithSize(v bool, _ int) {
+ b.Append(v)
+}
+
+func (b *baseListBuilder) AppendNull() {
+ b.Reserve(1)
+ b.unsafeAppendBoolToBitmap(false)
+ b.appendNextOffset()
+}
+
+func (b *baseListBuilder) AppendNulls(n int) {
+ for i := 0; i < n; i++ {
+ b.AppendNull()
+ }
+}
+
+func (b *baseListBuilder) AppendEmptyValue() {
+ b.Append(true)
+}
+
+func (b *baseListBuilder) AppendEmptyValues(n int) {
+ for i := 0; i < n; i++ {
+ b.AppendEmptyValue()
+ }
+}
+
+func (b *ListBuilder) AppendValues(offsets []int32, valid []bool) {
+ b.Reserve(len(valid))
+ b.offsets.(*Int32Builder).AppendValues(offsets, nil)
+ b.builder.unsafeAppendBoolsToBitmap(valid, len(valid))
+}
+
+func (b *LargeListBuilder) AppendValues(offsets []int64, valid []bool) {
+ b.Reserve(len(valid))
+ b.offsets.(*Int64Builder).AppendValues(offsets, nil)
+ b.builder.unsafeAppendBoolsToBitmap(valid, len(valid))
+}
+
+func (b *baseListBuilder) unsafeAppendBoolToBitmap(isValid bool) {
+ if isValid {
+ bitutil.SetBit(b.nullBitmap.Bytes(), b.length)
+ } else {
+ b.nulls++
+ }
+ b.length++
+}
+
+func (b *baseListBuilder) init(capacity int) {
+ b.builder.init(capacity)
+ b.offsets.init(capacity + 1)
+}
+
+// Reserve ensures there is enough space for appending n elements
+// by checking the capacity and calling Resize if necessary.
+func (b *baseListBuilder) Reserve(n int) {
+ b.builder.reserve(n, b.resizeHelper)
+ b.offsets.Reserve(n)
+}
+
+// Resize adjusts the space allocated by b to n elements. If n is greater than b.Cap(),
+// additional memory will be allocated. If n is smaller, the allocated memory may reduced.
+func (b *baseListBuilder) Resize(n int) {
+ b.resizeHelper(n)
+ b.offsets.Resize(n)
+}
+
+func (b *baseListBuilder) resizeHelper(n int) {
+ if n < minBuilderCapacity {
+ n = minBuilderCapacity
+ }
+
+ if b.capacity == 0 {
+ b.init(n)
+ } else {
+ b.builder.resize(n, b.builder.init)
+ }
+}
+
+func (b *baseListBuilder) ValueBuilder() Builder {
+ return b.values
+}
+
+// NewArray creates a List array from the memory buffers used by the builder and resets the ListBuilder
+// so it can be used to build a new array.
+func (b *ListBuilder) NewArray() arrow.Array {
+ return b.NewListArray()
+}
+
+// NewArray creates a LargeList array from the memory buffers used by the builder and resets the LargeListBuilder
+// so it can be used to build a new array.
+func (b *LargeListBuilder) NewArray() arrow.Array {
+ return b.NewLargeListArray()
+}
+
+// NewListArray creates a List array from the memory buffers used by the builder and resets the ListBuilder
+// so it can be used to build a new array.
+func (b *ListBuilder) NewListArray() (a *List) {
+ data := b.newData()
+ a = NewListData(data)
+ data.Release()
+ return
+}
+
+// NewLargeListArray creates a List array from the memory buffers used by the builder and resets the LargeListBuilder
+// so it can be used to build a new array.
+func (b *LargeListBuilder) NewLargeListArray() (a *LargeList) {
+ data := b.newData()
+ a = NewLargeListData(data)
+ data.Release()
+ return
+}
+
+func (b *baseListBuilder) newData() (data *Data) {
+ if b.offsets.Len() != b.length+1 {
+ b.appendNextOffset()
+ }
+ values := b.values.NewArray()
+ defer values.Release()
+
+ var offsets *memory.Buffer
+ if b.offsets != nil {
+ arr := b.offsets.NewArray()
+ defer arr.Release()
+ offsets = arr.Data().Buffers()[1]
+ }
+
+ data = NewData(
+ b.Type(), b.length,
+ []*memory.Buffer{
+ b.nullBitmap,
+ offsets,
+ },
+ []arrow.ArrayData{values.Data()},
+ b.nulls,
+ 0,
+ )
+ b.reset()
+
+ return
+}
+
+func (b *baseListBuilder) AppendValueFromString(s string) error {
+ if s == NullValueStr {
+ b.AppendNull()
+ return nil
+ }
+
+ return b.UnmarshalOne(json.NewDecoder(strings.NewReader(s)))
+}
+
+func (b *baseListBuilder) UnmarshalOne(dec *json.Decoder) error {
+ t, err := dec.Token()
+ if err != nil {
+ return err
+ }
+
+ switch t {
+ case json.Delim('['):
+ b.Append(true)
+ if err := b.values.Unmarshal(dec); err != nil {
+ return err
+ }
+ // consume ']'
+ _, err := dec.Token()
+ return err
+ case nil:
+ b.AppendNull()
+ default:
+ return &json.UnmarshalTypeError{
+ Value: fmt.Sprint(t),
+ Struct: b.dt.String(),
+ }
+ }
+
+ return nil
+}
+
+func (b *baseListBuilder) Unmarshal(dec *json.Decoder) error {
+ for dec.More() {
+ if err := b.UnmarshalOne(dec); err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+func (b *baseListBuilder) UnmarshalJSON(data []byte) error {
+ dec := json.NewDecoder(bytes.NewReader(data))
+ t, err := dec.Token()
+ if err != nil {
+ return err
+ }
+
+ if delim, ok := t.(json.Delim); !ok || delim != '[' {
+ return fmt.Errorf("list builder must unpack from json array, found %s", delim)
+ }
+
+ return b.Unmarshal(dec)
+}
+
+// ListView represents an immutable sequence of array values defined by an
+// offset into a child array and a length.
+type ListView struct {
+ array
+ values arrow.Array
+ offsets []int32
+ sizes []int32
+}
+
+var _ VarLenListLike = (*ListView)(nil)
+
+func NewListViewData(data arrow.ArrayData) *ListView {
+ a := &ListView{}
+ a.refCount = 1
+ a.setData(data.(*Data))
+ return a
+}
+
+func (a *ListView) ListValues() arrow.Array { return a.values }
+
+func (a *ListView) ValueStr(i int) string {
+ if !a.IsValid(i) {
+ return NullValueStr
+ }
+ return string(a.GetOneForMarshal(i).(json.RawMessage))
+}
+
+func (a *ListView) String() string {
+ o := new(strings.Builder)
+ o.WriteString("[")
+ for i := 0; i < a.Len(); i++ {
+ if i > 0 {
+ o.WriteString(" ")
+ }
+ if a.IsNull(i) {
+ o.WriteString(NullValueStr)
+ continue
+ }
+ sub := a.newListValue(i)
+ fmt.Fprintf(o, "%v", sub)
+ sub.Release()
+ }
+ o.WriteString("]")
+ return o.String()
+}
+
+func (a *ListView) newListValue(i int) arrow.Array {
+ beg, end := a.ValueOffsets(i)
+ return NewSlice(a.values, beg, end)
+}
+
+func (a *ListView) setData(data *Data) {
+ debug.Assert(len(data.buffers) >= 3, "list-view data should have 3 buffers")
+ a.array.setData(data)
+ offsets := data.buffers[1]
+ if offsets != nil {
+ a.offsets = arrow.Int32Traits.CastFromBytes(offsets.Bytes())
+ }
+ sizes := data.buffers[2]
+ if sizes != nil {
+ a.sizes = arrow.Int32Traits.CastFromBytes(sizes.Bytes())
+ }
+ a.values = MakeFromData(data.childData[0])
+}
+
+func (a *ListView) GetOneForMarshal(i int) interface{} {
+ if a.IsNull(i) {
+ return nil
+ }
+
+ slice := a.newListValue(i)
+ defer slice.Release()
+ v, err := json.Marshal(slice)
+ if err != nil {
+ panic(err)
+ }
+ return json.RawMessage(v)
+}
+
+func (a *ListView) MarshalJSON() ([]byte, error) {
+ var buf bytes.Buffer
+ enc := json.NewEncoder(&buf)
+
+ buf.WriteByte('[')
+ for i := 0; i < a.Len(); i++ {
+ if i != 0 {
+ buf.WriteByte(',')
+ }
+ if err := enc.Encode(a.GetOneForMarshal(i)); err != nil {
+ return nil, err
+ }
+ }
+ buf.WriteByte(']')
+ return buf.Bytes(), nil
+}
+
+func arrayEqualListView(left, right *ListView) bool {
+ for i := 0; i < left.Len(); i++ {
+ if left.IsNull(i) {
+ continue
+ }
+ o := func() bool {
+ l := left.newListValue(i)
+ defer l.Release()
+ r := right.newListValue(i)
+ defer r.Release()
+ return Equal(l, r)
+ }()
+ if !o {
+ return false
+ }
+ }
+ return true
+}
+
+// Len returns the number of elements in the array.
+func (a *ListView) Len() int { return a.array.Len() }
+
+func (a *ListView) Offsets() []int32 { return a.offsets }
+
+func (a *ListView) Sizes() []int32 { return a.sizes }
+
+func (a *ListView) Retain() {
+ a.array.Retain()
+ a.values.Retain()
+}
+
+func (a *ListView) Release() {
+ a.array.Release()
+ a.values.Release()
+}
+
+func (a *ListView) ValueOffsets(i int) (start, end int64) {
+ debug.Assert(i >= 0 && i < a.array.data.length, "index out of range")
+ j := i + a.array.data.offset
+ size := int64(a.sizes[j])
+ // If size is 0, skip accessing offsets.
+ if size == 0 {
+ start, end = 0, 0
+ return
+ }
+ start = int64(a.offsets[j])
+ end = start + size
+ return
+}
+
+// LargeListView represents an immutable sequence of array values defined by an
+// offset into a child array and a length.
+type LargeListView struct {
+ array
+ values arrow.Array
+ offsets []int64
+ sizes []int64
+}
+
+var _ VarLenListLike = (*LargeListView)(nil)
+
+// NewLargeListViewData returns a new LargeListView array value, from data.
+func NewLargeListViewData(data arrow.ArrayData) *LargeListView {
+ a := new(LargeListView)
+ a.refCount = 1
+ a.setData(data.(*Data))
+ return a
+}
+
+func (a *LargeListView) ListValues() arrow.Array { return a.values }
+
+func (a *LargeListView) ValueStr(i int) string {
+ if !a.IsValid(i) {
+ return NullValueStr
+ }
+ return string(a.GetOneForMarshal(i).(json.RawMessage))
+}
+
+func (a *LargeListView) String() string {
+ o := new(strings.Builder)
+ o.WriteString("[")
+ for i := 0; i < a.Len(); i++ {
+ if i > 0 {
+ o.WriteString(" ")
+ }
+ if a.IsNull(i) {
+ o.WriteString(NullValueStr)
+ continue
+ }
+ sub := a.newListValue(i)
+ fmt.Fprintf(o, "%v", sub)
+ sub.Release()
+ }
+ o.WriteString("]")
+ return o.String()
+}
+
+func (a *LargeListView) newListValue(i int) arrow.Array {
+ beg, end := a.ValueOffsets(i)
+ return NewSlice(a.values, beg, end)
+}
+
+func (a *LargeListView) setData(data *Data) {
+ debug.Assert(len(data.buffers) >= 3, "list-view data should have 3 buffers")
+ a.array.setData(data)
+ offsets := data.buffers[1]
+ if offsets != nil {
+ a.offsets = arrow.Int64Traits.CastFromBytes(offsets.Bytes())
+ }
+ sizes := data.buffers[2]
+ if sizes != nil {
+ a.sizes = arrow.Int64Traits.CastFromBytes(sizes.Bytes())
+ }
+ a.values = MakeFromData(data.childData[0])
+}
+
+func (a *LargeListView) GetOneForMarshal(i int) interface{} {
+ if a.IsNull(i) {
+ return nil
+ }
+
+ slice := a.newListValue(i)
+ defer slice.Release()
+ v, err := json.Marshal(slice)
+ if err != nil {
+ panic(err)
+ }
+ return json.RawMessage(v)
+}
+
+func (a *LargeListView) MarshalJSON() ([]byte, error) {
+ var buf bytes.Buffer
+ enc := json.NewEncoder(&buf)
+
+ buf.WriteByte('[')
+ for i := 0; i < a.Len(); i++ {
+ if i != 0 {
+ buf.WriteByte(',')
+ }
+ if err := enc.Encode(a.GetOneForMarshal(i)); err != nil {
+ return nil, err
+ }
+ }
+ buf.WriteByte(']')
+ return buf.Bytes(), nil
+}
+
+func arrayEqualLargeListView(left, right *LargeListView) bool {
+ for i := 0; i < left.Len(); i++ {
+ if left.IsNull(i) {
+ continue
+ }
+ o := func() bool {
+ l := left.newListValue(i)
+ defer l.Release()
+ r := right.newListValue(i)
+ defer r.Release()
+ return Equal(l, r)
+ }()
+ if !o {
+ return false
+ }
+ }
+ return true
+}
+
+// Len returns the number of elements in the array.
+func (a *LargeListView) Len() int { return a.array.Len() }
+
+func (a *LargeListView) Offsets() []int64 { return a.offsets }
+
+func (a *LargeListView) Sizes() []int64 { return a.sizes }
+
+func (a *LargeListView) ValueOffsets(i int) (start, end int64) {
+ debug.Assert(i >= 0 && i < a.array.data.length, "index out of range")
+ j := i + a.array.data.offset
+ size := a.sizes[j]
+ // If size is 0, skip accessing offsets.
+ if size == 0 {
+ return 0, 0
+ }
+ start = a.offsets[j]
+ end = start + size
+ return
+}
+
+func (a *LargeListView) Retain() {
+ a.array.Retain()
+ a.values.Retain()
+}
+
+func (a *LargeListView) Release() {
+ a.array.Release()
+ a.values.Release()
+}
+
+// Acessors for offsets and sizes to make ListView and LargeListView validation generic.
+type offsetsAndSizes interface {
+ offsetAt(slot int64) int64
+ sizeAt(slot int64) int64
+}
+
+var _ offsetsAndSizes = (*ListView)(nil)
+var _ offsetsAndSizes = (*LargeListView)(nil)
+
+func (a *ListView) offsetAt(slot int64) int64 { return int64(a.offsets[int64(a.data.offset)+slot]) }
+
+func (a *ListView) sizeAt(slot int64) int64 { return int64(a.sizes[int64(a.data.offset)+slot]) }
+
+func (a *LargeListView) offsetAt(slot int64) int64 { return a.offsets[int64(a.data.offset)+slot] }
+
+func (a *LargeListView) sizeAt(slot int64) int64 { return a.sizes[int64(a.data.offset)+slot] }
+
+func outOfBoundsListViewOffset(l offsetsAndSizes, slot int64, offsetLimit int64) error {
+ offset := l.offsetAt(slot)
+ return fmt.Errorf("%w: Offset invariant failure: offset for slot %d out of bounds. Expected %d to be at least 0 and less than %d", arrow.ErrInvalid, slot, offset, offsetLimit)
+}
+
+func outOfBoundsListViewSize(l offsetsAndSizes, slot int64, offsetLimit int64) error {
+ size := l.sizeAt(slot)
+ if size < 0 {
+ return fmt.Errorf("%w: Offset invariant failure: size for slot %d out of bounds: %d < 0", arrow.ErrInvalid, slot, size)
+ }
+ offset := l.offsetAt(slot)
+ return fmt.Errorf("%w: Offset invariant failure: size for slot %d out of bounds: %d + %d > %d", arrow.ErrInvalid, slot, offset, size, offsetLimit)
+}
+
+// Pre-condition: Basic validation has already been performed
+func (a *array) fullyValidateOffsetsAndSizes(l offsetsAndSizes, offsetLimit int64) error {
+ for slot := int64(0); slot < int64(a.Len()); slot += 1 {
+ size := l.sizeAt(slot)
+ if size > 0 {
+ offset := l.offsetAt(slot)
+ if offset < 0 || offset > offsetLimit {
+ return outOfBoundsListViewOffset(l, slot, offsetLimit)
+ }
+ if size > offsetLimit-int64(offset) {
+ return outOfBoundsListViewSize(l, slot, offsetLimit)
+ }
+ } else if size < 0 {
+ return outOfBoundsListViewSize(l, slot, offsetLimit)
+ }
+ }
+
+ return nil
+}
+
+func (a *array) validateOffsetsAndMaybeSizes(l offsetsAndSizes, offsetByteWidth int, isListView bool, offsetLimit int64, fullValidation bool) error {
+ nonEmpty := a.Len() > 0
+ if a.data.buffers[1] == nil {
+ // For length 0, an empty offsets buffer is accepted (ARROW-544).
+ if nonEmpty {
+ return fmt.Errorf("non-empty array but offsets are null")
+ }
+ return nil
+ }
+ if isListView && a.data.buffers[2] == nil {
+ if nonEmpty {
+ return fmt.Errorf("non-empty array but sizes are null")
+ }
+ return nil
+ }
+
+ var requiredOffsets int
+ if nonEmpty {
+ requiredOffsets = a.Len() + a.Offset()
+ if !isListView {
+ requiredOffsets += 1
+ }
+ } else {
+ requiredOffsets = 0
+ }
+ offsetsByteSize := a.data.buffers[1].Len()
+ if offsetsByteSize/offsetByteWidth < requiredOffsets {
+ return fmt.Errorf("offsets buffer size (bytes): %d isn't large enough for length: %d and offset: %d",
+ offsetsByteSize, a.Len(), a.Offset())
+ }
+ if isListView {
+ requiredSizes := a.Len() + a.Offset()
+ sizesBytesSize := a.data.buffers[2].Len()
+ if sizesBytesSize/offsetByteWidth < requiredSizes {
+ return fmt.Errorf("sizes buffer size (bytes): %d isn't large enough for length: %d and offset: %d",
+ sizesBytesSize, a.Len(), a.Offset())
+ }
+ }
+
+ if fullValidation && requiredOffsets > 0 {
+ if isListView {
+ return a.fullyValidateOffsetsAndSizes(l, offsetLimit)
+ }
+ // TODO: implement validation of List and LargeList
+ // return fullyValidateOffsets(offset_limit)
+ return nil
+ }
+ return nil
+}
+
+func (a *ListView) validate(fullValidation bool) error {
+ values := a.array.data.childData[0]
+ offsetLimit := values.Len()
+ return a.array.validateOffsetsAndMaybeSizes(a, 4, true, int64(offsetLimit), fullValidation)
+}
+
+func (a *ListView) Validate() error {
+ return a.validate(false)
+}
+
+func (a *ListView) ValidateFull() error {
+ return a.validate(true)
+}
+
+func (a *LargeListView) validate(fullValidation bool) error {
+ values := a.array.data.childData[0]
+ offsetLimit := values.Len()
+ return a.array.validateOffsetsAndMaybeSizes(a, 8, true, int64(offsetLimit), fullValidation)
+}
+
+func (a *LargeListView) Validate() error {
+ return a.validate(false)
+}
+
+func (a *LargeListView) ValidateFull() error {
+ return a.validate(true)
+}
+
+type baseListViewBuilder struct {
+ builder
+
+ values Builder // value builder for the list-view's elements.
+ offsets Builder
+ sizes Builder
+
+ // actual list-view type
+ dt arrow.DataType
+ appendOffsetVal func(int)
+ appendSizeVal func(int)
+}
+
+type ListViewBuilder struct {
+ baseListViewBuilder
+}
+
+type LargeListViewBuilder struct {
+ baseListViewBuilder
+}
+
+// NewListViewBuilder returns a builder, using the provided memory allocator.
+// The created list-view builder will create a list whose elements will be
+// of type etype.
+func NewListViewBuilder(mem memory.Allocator, etype arrow.DataType) *ListViewBuilder {
+ offsetBldr := NewInt32Builder(mem)
+ sizeBldr := NewInt32Builder(mem)
+ return &ListViewBuilder{
+ baseListViewBuilder{
+ builder: builder{refCount: 1, mem: mem},
+ values: NewBuilder(mem, etype),
+ offsets: offsetBldr,
+ sizes: sizeBldr,
+ dt: arrow.ListViewOf(etype),
+ appendOffsetVal: func(o int) { offsetBldr.Append(int32(o)) },
+ appendSizeVal: func(s int) { sizeBldr.Append(int32(s)) },
+ },
+ }
+}
+
+// NewListViewBuilderWithField takes a field to use for the child rather than just
+// a datatype to allow for more customization.
+func NewListViewBuilderWithField(mem memory.Allocator, field arrow.Field) *ListViewBuilder {
+ offsetBldr := NewInt32Builder(mem)
+ sizeBldr := NewInt32Builder(mem)
+ return &ListViewBuilder{
+ baseListViewBuilder{
+ builder: builder{refCount: 1, mem: mem},
+ values: NewBuilder(mem, field.Type),
+ offsets: offsetBldr,
+ sizes: sizeBldr,
+ dt: arrow.ListViewOfField(field),
+ appendOffsetVal: func(o int) { offsetBldr.Append(int32(o)) },
+ appendSizeVal: func(s int) { sizeBldr.Append(int32(s)) },
+ },
+ }
+}
+
+func (b *baseListViewBuilder) Type() arrow.DataType {
+ switch dt := b.dt.(type) {
+ case *arrow.ListViewType:
+ f := dt.ElemField()
+ f.Type = b.values.Type()
+ return arrow.ListViewOfField(f)
+ case *arrow.LargeListViewType:
+ f := dt.ElemField()
+ f.Type = b.values.Type()
+ return arrow.LargeListViewOfField(f)
+ }
+ return nil
+}
+
+// NewLargeListViewBuilder returns a builder, using the provided memory allocator.
+// The created list-view builder will create a list whose elements will be of type etype.
+func NewLargeListViewBuilder(mem memory.Allocator, etype arrow.DataType) *LargeListViewBuilder {
+ offsetBldr := NewInt64Builder(mem)
+ sizeBldr := NewInt64Builder(mem)
+ return &LargeListViewBuilder{
+ baseListViewBuilder{
+ builder: builder{refCount: 1, mem: mem},
+ values: NewBuilder(mem, etype),
+ offsets: offsetBldr,
+ sizes: sizeBldr,
+ dt: arrow.LargeListViewOf(etype),
+ appendOffsetVal: func(o int) { offsetBldr.Append(int64(o)) },
+ appendSizeVal: func(s int) { sizeBldr.Append(int64(s)) },
+ },
+ }
+}
+
+// NewLargeListViewBuilderWithField takes a field rather than just an element type
+// to allow for more customization of the final type of the LargeListView Array
+func NewLargeListViewBuilderWithField(mem memory.Allocator, field arrow.Field) *LargeListViewBuilder {
+ offsetBldr := NewInt64Builder(mem)
+ sizeBldr := NewInt64Builder(mem)
+ return &LargeListViewBuilder{
+ baseListViewBuilder{
+ builder: builder{refCount: 1, mem: mem},
+ values: NewBuilder(mem, field.Type),
+ offsets: offsetBldr,
+ sizes: sizeBldr,
+ dt: arrow.LargeListViewOfField(field),
+ appendOffsetVal: func(o int) { offsetBldr.Append(int64(o)) },
+ appendSizeVal: func(o int) { sizeBldr.Append(int64(o)) },
+ },
+ }
+}
+
+// Release decreases the reference count by 1.
+// When the reference count goes to zero, the memory is freed.
+func (b *baseListViewBuilder) Release() {
+ debug.Assert(atomic.LoadInt64(&b.refCount) > 0, "too many releases")
+
+ if atomic.AddInt64(&b.refCount, -1) == 0 {
+ if b.nullBitmap != nil {
+ b.nullBitmap.Release()
+ b.nullBitmap = nil
+ }
+ b.values.Release()
+ b.offsets.Release()
+ b.sizes.Release()
+ }
+}
+
+func (b *baseListViewBuilder) AppendDimensions(offset int, listSize int) {
+ b.Reserve(1)
+ b.unsafeAppendBoolToBitmap(true)
+ b.appendOffsetVal(offset)
+ b.appendSizeVal(listSize)
+}
+
+func (b *baseListViewBuilder) Append(v bool) {
+ debug.Assert(false, "baseListViewBuilder.Append should never be called -- use AppendWithSize instead")
+}
+
+func (b *baseListViewBuilder) AppendWithSize(v bool, listSize int) {
+ debug.Assert(v || listSize == 0, "invalid list-view should have size 0")
+ b.Reserve(1)
+ b.unsafeAppendBoolToBitmap(v)
+ b.appendOffsetVal(b.values.Len())
+ b.appendSizeVal(listSize)
+}
+
+func (b *baseListViewBuilder) AppendNull() {
+ b.AppendWithSize(false, 0)
+}
+
+func (b *baseListViewBuilder) AppendNulls(n int) {
+ for i := 0; i < n; i++ {
+ b.AppendNull()
+ }
+}
+
+func (b *baseListViewBuilder) AppendEmptyValue() {
+ b.AppendWithSize(true, 0)
+}
+
+func (b *baseListViewBuilder) AppendEmptyValues(n int) {
+ for i := 0; i < n; i++ {
+ b.AppendEmptyValue()
+ }
+}
+
+func (b *ListViewBuilder) AppendValuesWithSizes(offsets []int32, sizes []int32, valid []bool) {
+ b.Reserve(len(valid))
+ b.offsets.(*Int32Builder).AppendValues(offsets, nil)
+ b.sizes.(*Int32Builder).AppendValues(sizes, nil)
+ b.builder.unsafeAppendBoolsToBitmap(valid, len(valid))
+}
+
+func (b *LargeListViewBuilder) AppendValuesWithSizes(offsets []int64, sizes []int64, valid []bool) {
+ b.Reserve(len(valid))
+ b.offsets.(*Int64Builder).AppendValues(offsets, nil)
+ b.sizes.(*Int64Builder).AppendValues(sizes, nil)
+ b.builder.unsafeAppendBoolsToBitmap(valid, len(valid))
+}
+
+func (b *baseListViewBuilder) unsafeAppendBoolToBitmap(isValid bool) {
+ if isValid {
+ bitutil.SetBit(b.nullBitmap.Bytes(), b.length)
+ } else {
+ b.nulls++
+ }
+ b.length++
+}
+
+func (b *baseListViewBuilder) init(capacity int) {
+ b.builder.init(capacity)
+ b.offsets.init(capacity)
+ b.sizes.init(capacity)
+}
+
+// Reserve ensures there is enough space for appending n elements
+// by checking the capacity and calling Resize if necessary.
+func (b *baseListViewBuilder) Reserve(n int) {
+ b.builder.reserve(n, b.resizeHelper)
+ b.offsets.Reserve(n)
+ b.sizes.Reserve(n)
+}
+
+// Resize adjusts the space allocated by b to n elements. If n is greater than b.Cap(),
+// additional memory will be allocated. If n is smaller, the allocated memory may reduced.
+func (b *baseListViewBuilder) Resize(n int) {
+ b.resizeHelper(n)
+ b.offsets.Resize(n)
+ b.sizes.Resize(n)
+}
+
+func (b *baseListViewBuilder) resizeHelper(n int) {
+ if n < minBuilderCapacity {
+ n = minBuilderCapacity
+ }
+
+ if b.capacity == 0 {
+ b.init(n)
+ } else {
+ b.builder.resize(n, b.builder.init)
+ }
+}
+
+func (b *baseListViewBuilder) ValueBuilder() Builder {
+ return b.values
+}
+
+// NewArray creates a ListView array from the memory buffers used by the builder and
+// resets the ListViewBuilder so it can be used to build a new array.
+func (b *ListViewBuilder) NewArray() arrow.Array {
+ return b.NewListViewArray()
+}
+
+// NewArray creates a LargeListView array from the memory buffers used by the builder
+// and resets the LargeListViewBuilder so it can be used to build a new array.
+func (b *LargeListViewBuilder) NewArray() arrow.Array {
+ return b.NewLargeListViewArray()
+}
+
+// NewListViewArray creates a ListView array from the memory buffers used by the builder
+// and resets the ListViewBuilder so it can be used to build a new array.
+func (b *ListViewBuilder) NewListViewArray() (a *ListView) {
+ data := b.newData()
+ a = NewListViewData(data)
+ data.Release()
+ return
+}
+
+// NewLargeListViewArray creates a ListView array from the memory buffers used by the
+// builder and resets the LargeListViewBuilder so it can be used to build a new array.
+func (b *LargeListViewBuilder) NewLargeListViewArray() (a *LargeListView) {
+ data := b.newData()
+ a = NewLargeListViewData(data)
+ data.Release()
+ return
+}
+
+func (b *baseListViewBuilder) newData() (data *Data) {
+ values := b.values.NewArray()
+ defer values.Release()
+
+ var offsets *memory.Buffer
+ if b.offsets != nil {
+ arr := b.offsets.NewArray()
+ defer arr.Release()
+ offsets = arr.Data().Buffers()[1]
+ }
+
+ var sizes *memory.Buffer
+ if b.sizes != nil {
+ arr := b.sizes.NewArray()
+ defer arr.Release()
+ sizes = arr.Data().Buffers()[1]
+ }
+
+ data = NewData(
+ b.Type(), b.length,
+ []*memory.Buffer{
+ b.nullBitmap,
+ offsets,
+ sizes,
+ },
+ []arrow.ArrayData{values.Data()},
+ b.nulls,
+ 0,
+ )
+ b.reset()
+
+ return
+}
+
+func (b *baseListViewBuilder) AppendValueFromString(s string) error {
+ if s == NullValueStr {
+ b.AppendNull()
+ return nil
+ }
+
+ return b.UnmarshalOne(json.NewDecoder(strings.NewReader(s)))
+}
+
+func (b *baseListViewBuilder) UnmarshalOne(dec *json.Decoder) error {
+ t, err := dec.Token()
+ if err != nil {
+ return err
+ }
+
+ switch t {
+ case json.Delim('['):
+ offset := b.values.Len()
+ // 0 is a placeholder size as we don't know the actual size yet
+ b.AppendWithSize(true, 0)
+ if err := b.values.Unmarshal(dec); err != nil {
+ return err
+ }
+ // consume ']'
+ _, err := dec.Token()
+ // replace the last size with the actual size
+ switch b.sizes.(type) {
+ case *Int32Builder:
+ b.sizes.(*Int32Builder).rawData[b.sizes.Len()-1] = int32(b.values.Len() - offset)
+ case *Int64Builder:
+ b.sizes.(*Int64Builder).rawData[b.sizes.Len()-1] = int64(b.values.Len() - offset)
+ }
+ return err
+ case nil:
+ b.AppendNull()
+ default:
+ return &json.UnmarshalTypeError{
+ Value: fmt.Sprint(t),
+ Struct: b.dt.String(),
+ }
+ }
+
+ return nil
+}
+
+func (b *baseListViewBuilder) Unmarshal(dec *json.Decoder) error {
+ for dec.More() {
+ if err := b.UnmarshalOne(dec); err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+func (b *baseListViewBuilder) UnmarshalJSON(data []byte) error {
+ dec := json.NewDecoder(bytes.NewReader(data))
+ t, err := dec.Token()
+ if err != nil {
+ return err
+ }
+
+ if delim, ok := t.(json.Delim); !ok || delim != '[' {
+ return fmt.Errorf("list-view builder must unpack from json array, found %s", delim)
+ }
+
+ return b.Unmarshal(dec)
+}
+
+// Pre-conditions:
+//
+// input.DataType() is ListViewType
+// input.Len() > 0 && input.NullN() != input.Len()
+func minListViewOffset32(input arrow.ArrayData) int32 {
+ var bitmap []byte
+ if input.Buffers()[0] != nil {
+ bitmap = input.Buffers()[0].Bytes()
+ }
+ offsets := arrow.Int32Traits.CastFromBytes(input.Buffers()[1].Bytes())[input.Offset():]
+ sizes := arrow.Int32Traits.CastFromBytes(input.Buffers()[2].Bytes())[input.Offset():]
+
+ isNull := func(i int) bool {
+ return bitmap != nil && bitutil.BitIsNotSet(bitmap, input.Offset()+i)
+ }
+
+ // It's very likely that the first non-null non-empty list-view starts at
+ // offset 0 of the child array.
+ i := 0
+ for i < input.Len() && (isNull(i) || sizes[i] == 0) {
+ i += 1
+ }
+ if i >= input.Len() {
+ return 0
+ }
+ minOffset := offsets[i]
+ if minOffset == 0 {
+ // early exit: offset 0 found already
+ return 0
+ }
+
+ // Slow path: scan the buffers entirely.
+ i += 1
+ for ; i < input.Len(); i += 1 {
+ if isNull(i) {
+ continue
+ }
+ offset := offsets[i]
+ if offset < minOffset && sizes[i] > 0 {
+ minOffset = offset
+ }
+ }
+ return minOffset
+}
+
+// Find the maximum offset+size in a LIST_VIEW array.
+//
+// Pre-conditions:
+//
+// input.DataType() is ListViewType
+// input.Len() > 0 && input.NullN() != input.Len()
+func maxListViewOffset32(input arrow.ArrayData) int {
+ inputOffset := input.Offset()
+ var bitmap []byte
+ if input.Buffers()[0] != nil {
+ bitmap = input.Buffers()[0].Bytes()
+ }
+ offsets := arrow.Int32Traits.CastFromBytes(input.Buffers()[1].Bytes())[inputOffset:]
+ sizes := arrow.Int32Traits.CastFromBytes(input.Buffers()[2].Bytes())[inputOffset:]
+
+ isNull := func(i int) bool {
+ return bitmap != nil && bitutil.BitIsNotSet(bitmap, inputOffset+i)
+ }
+
+ i := input.Len() - 1 // safe because input.Len() > 0
+ for i != 0 && (isNull(i) || sizes[i] == 0) {
+ i -= 1
+ }
+ offset := offsets[i]
+ size := sizes[i]
+ if i == 0 {
+ if isNull(i) || sizes[i] == 0 {
+ return 0
+ } else {
+ return int(offset + size)
+ }
+ }
+
+ values := input.Children()[0]
+ maxEnd := int(offsets[i] + sizes[i])
+ if maxEnd == values.Len() {
+ // Early-exit: maximum possible view-end found already.
+ return maxEnd
+ }
+
+ // Slow path: scan the buffers entirely.
+ for ; i >= 0; i -= 1 {
+ offset := offsets[i]
+ size := sizes[i]
+ if size > 0 && !isNull(i) {
+ if int(offset+size) > maxEnd {
+ maxEnd = int(offset + size)
+ if maxEnd == values.Len() {
+ return maxEnd
+ }
+ }
+ }
+ }
+ return maxEnd
+}
+
+// Pre-conditions:
+//
+// input.DataType() is LargeListViewType
+// input.Len() > 0 && input.NullN() != input.Len()
+func minLargeListViewOffset64(input arrow.ArrayData) int64 {
+ var bitmap []byte
+ if input.Buffers()[0] != nil {
+ bitmap = input.Buffers()[0].Bytes()
+ }
+ offsets := arrow.Int64Traits.CastFromBytes(input.Buffers()[1].Bytes())[input.Offset():]
+ sizes := arrow.Int64Traits.CastFromBytes(input.Buffers()[2].Bytes())[input.Offset():]
+
+ isNull := func(i int) bool {
+ return bitmap != nil && bitutil.BitIsNotSet(bitmap, input.Offset()+i)
+ }
+
+ // It's very likely that the first non-null non-empty list-view starts at
+ // offset 0 of the child array.
+ i := 0
+ for i < input.Len() && (isNull(i) || sizes[i] == 0) {
+ i += 1
+ }
+ if i >= input.Len() {
+ return 0
+ }
+ minOffset := offsets[i]
+ if minOffset == 0 {
+ // early exit: offset 0 found already
+ return 0
+ }
+
+ // Slow path: scan the buffers entirely.
+ i += 1
+ for ; i < input.Len(); i += 1 {
+ if isNull(i) {
+ continue
+ }
+ offset := offsets[i]
+ if offset < minOffset && sizes[i] > 0 {
+ minOffset = offset
+ }
+ }
+ return minOffset
+}
+
+// Find the maximum offset+size in a LARGE_LIST_VIEW array.
+//
+// Pre-conditions:
+//
+// input.DataType() is LargeListViewType
+// input.Len() > 0 && input.NullN() != input.Len()
+func maxLargeListViewOffset64(input arrow.ArrayData) int64 {
+ inputOffset := input.Offset()
+ var bitmap []byte
+ if input.Buffers()[0] != nil {
+ bitmap = input.Buffers()[0].Bytes()
+ }
+ offsets := arrow.Int64Traits.CastFromBytes(input.Buffers()[1].Bytes())[inputOffset:]
+ sizes := arrow.Int64Traits.CastFromBytes(input.Buffers()[2].Bytes())[inputOffset:]
+
+ isNull := func(i int) bool {
+ return bitmap != nil && bitutil.BitIsNotSet(bitmap, inputOffset+i)
+ }
+
+ // It's very likely that the first non-null non-empty list-view starts at
+ // offset zero, so we check that first and potentially early-return a 0.
+ i := input.Len() - 1 // safe because input.Len() > 0
+ for i != 0 && (isNull(i) || sizes[i] == 0) {
+ i -= 1
+ }
+ offset := offsets[i]
+ size := sizes[i]
+ if i == 0 {
+ if isNull(i) || sizes[i] == 0 {
+ return 0
+ } else {
+ return offset + size
+ }
+ }
+
+ if offset > math.MaxInt64-size {
+ // Early-exit: 64-bit overflow detected. This is not possible on a
+ // valid list-view, but we return the maximum possible value to
+ // avoid undefined behavior.
+ return math.MaxInt64
+ }
+ values := input.Children()[0]
+ maxEnd := offsets[i] + sizes[i]
+ if maxEnd == int64(values.Len()) {
+ // Early-exit: maximum possible view-end found already.
+ return maxEnd
+ }
+
+ // Slow path: scan the buffers entirely.
+ for ; i >= 0; i -= 1 {
+ offset := offsets[i]
+ size := sizes[i]
+ if size > 0 && !isNull(i) {
+ if offset+size > maxEnd {
+ if offset > math.MaxInt64-size {
+ // 64-bit overflow detected. This is not possible on a valid list-view,
+ // but we saturate maxEnd to the maximum possible value to avoid
+ // undefined behavior.
+ return math.MaxInt64
+ }
+ maxEnd = offset + size
+ if maxEnd == int64(values.Len()) {
+ return maxEnd
+ }
+ }
+ }
+ }
+ return maxEnd
+}
+
+func rangeOfValuesUsed(input arrow.ArrayData) (int, int) {
+ if input.Len() == 0 || input.NullN() == input.Len() {
+ return 0, 0
+ }
+ var minOffset, maxEnd int
+ switch input.DataType().(type) {
+ case *arrow.ListViewType:
+ minOffset = int(minListViewOffset32(input))
+ maxEnd = maxListViewOffset32(input)
+ case *arrow.LargeListViewType:
+ minOffset = int(minLargeListViewOffset64(input))
+ maxEnd = int(maxLargeListViewOffset64(input))
+ case *arrow.ListType:
+ offsets := arrow.Int32Traits.CastFromBytes(input.Buffers()[1].Bytes())[input.Offset():]
+ minOffset = int(offsets[0])
+ maxEnd = int(offsets[len(offsets)-1])
+ case *arrow.LargeListType:
+ offsets := arrow.Int64Traits.CastFromBytes(input.Buffers()[1].Bytes())[input.Offset():]
+ minOffset = int(offsets[0])
+ maxEnd = int(offsets[len(offsets)-1])
+ case *arrow.MapType:
+ offsets := arrow.Int32Traits.CastFromBytes(input.Buffers()[1].Bytes())[input.Offset():]
+ minOffset = int(offsets[0])
+ maxEnd = int(offsets[len(offsets)-1])
+ }
+ return minOffset, maxEnd - minOffset
+}
+
+// Returns the smallest contiguous range of values of the child array that are
+// referenced by all the list values in the input array.
+func RangeOfValuesUsed(input VarLenListLike) (int, int) {
+ return rangeOfValuesUsed(input.Data())
+}
+
+var (
+ _ arrow.Array = (*List)(nil)
+ _ arrow.Array = (*LargeList)(nil)
+ _ arrow.Array = (*ListView)(nil)
+ _ arrow.Array = (*LargeListView)(nil)
+
+ _ Builder = (*ListBuilder)(nil)
+ _ Builder = (*LargeListBuilder)(nil)
+ _ Builder = (*ListViewBuilder)(nil)
+ _ Builder = (*LargeListViewBuilder)(nil)
+
+ _ VarLenListLike = (*List)(nil)
+ _ VarLenListLike = (*LargeList)(nil)
+ _ VarLenListLike = (*Map)(nil)
+ _ VarLenListLike = (*ListView)(nil)
+ _ VarLenListLike = (*LargeListView)(nil)
+ _ ListLike = (*FixedSizeList)(nil)
+
+ _ VarLenListLikeBuilder = (*ListBuilder)(nil)
+ _ VarLenListLikeBuilder = (*LargeListBuilder)(nil)
+ _ VarLenListLikeBuilder = (*ListBuilder)(nil)
+ _ VarLenListLikeBuilder = (*LargeListBuilder)(nil)
+ _ VarLenListLikeBuilder = (*MapBuilder)(nil)
+ _ ListLikeBuilder = (*FixedSizeListBuilder)(nil)
+)
diff --git a/vendor/github.com/apache/arrow/go/v14/arrow/array/map.go b/vendor/github.com/apache/arrow/go/v14/arrow/array/map.go
new file mode 100644
index 000000000..9945a90ce
--- /dev/null
+++ b/vendor/github.com/apache/arrow/go/v14/arrow/array/map.go
@@ -0,0 +1,361 @@
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements. See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership. The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License. You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package array
+
+import (
+ "bytes"
+ "fmt"
+
+ "github.com/apache/arrow/go/v14/arrow"
+ "github.com/apache/arrow/go/v14/arrow/memory"
+ "github.com/apache/arrow/go/v14/internal/json"
+)
+
+// Map represents an immutable sequence of Key/Value structs. It is a
+// logical type that is implemented as a List<Struct: key, value>.
+type Map struct {
+ *List
+ keys, items arrow.Array
+}
+
+var _ ListLike = (*Map)(nil)
+
+// NewMapData returns a new Map array value, from data
+func NewMapData(data arrow.ArrayData) *Map {
+ a := &Map{List: &List{}}
+ a.refCount = 1
+ a.setData(data.(*Data))
+ return a
+}
+
+// KeysSorted checks the datatype that was used to construct this array and
+// returns the KeysSorted boolean value used to denote if the key array is
+// sorted for each list element.
+//
+// Important note: Nothing is enforced regarding the KeysSorted value, it is
+// solely a metadata field that should be set if keys within each value are sorted.
+// This value is not used at all in regards to comparisons / equality.
+func (a *Map) KeysSorted() bool { return a.DataType().(*arrow.MapType).KeysSorted }
+
+func (a *Map) validateData(data *Data) {
+ if len(data.childData) != 1 || data.childData[0] == nil {
+ panic("arrow/array: expected one child array for map array")
+ }
+
+ if data.childData[0].DataType().ID() != arrow.STRUCT {
+ panic("arrow/array: map array child should be struct type")
+ }
+
+ if data.childData[0].NullN() != 0 {
+ panic("arrow/array: map array child array should have no nulls")
+ }
+
+ if len(data.childData[0].Children()) != 2 {
+ panic("arrow/array: map array child array should have two fields")
+ }
+
+ if data.childData[0].Children()[0].NullN() != 0 {
+ panic("arrow/array: map array keys array should have no nulls")
+ }
+}
+
+func (a *Map) setData(data *Data) {
+ a.validateData(data)
+
+ a.List.setData(data)
+ a.keys = MakeFromData(data.childData[0].Children()[0])
+ a.items = MakeFromData(data.childData[0].Children()[1])
+}
+
+// Keys returns the full Array of Key values, equivalent to grabbing
+// the key field of the child struct.
+func (a *Map) Keys() arrow.Array { return a.keys }
+
+// Items returns the full Array of Item values, equivalent to grabbing
+// the Value field (the second field) of the child struct.
+func (a *Map) Items() arrow.Array { return a.items }
+
+// Retain increases the reference count by 1.
+// Retain may be called simultaneously from multiple goroutines.
+func (a *Map) Retain() {
+ a.List.Retain()
+ a.keys.Retain()
+ a.items.Retain()
+}
+
+// Release decreases the reference count by 1.
+// Release may be called simultaneously from multiple goroutines.
+// When the reference count goes to zero, the memory is freed.
+func (a *Map) Release() {
+ a.List.Release()
+ a.keys.Release()
+ a.items.Release()
+}
+
+func arrayEqualMap(left, right *Map) bool {
+ // since Map is implemented using a list, we can just use arrayEqualList
+ return arrayEqualList(left.List, right.List)
+}
+
+type MapBuilder struct {
+ listBuilder *ListBuilder
+
+ etype *arrow.MapType
+ keytype, itemtype arrow.DataType
+ keyBuilder, itemBuilder Builder
+ keysSorted bool
+}
+
+// NewMapBuilder returns a builder, using the provided memory allocator.
+// The created Map builder will create a map array whose keys will be a non-nullable
+// array of type `keytype` and whose mapped items will be a nullable array of itemtype.
+//
+// KeysSorted is not enforced at all by the builder, it should only be set to true
+// building using keys in sorted order for each value. The KeysSorted value will just be
+// used when creating the DataType for the map.
+//
+// # Example
+//
+// Simple example provided of converting a []map[string]int32 to an array.Map
+// by using a MapBuilder:
+//
+// /* assume maplist == []map[string]int32 */
+// bldr := array.NewMapBuilder(memory.DefaultAllocator, arrow.BinaryTypes.String, arrow.PrimitiveTypes.Int32, false)
+// defer bldr.Release()
+// kb := bldr.KeyBuilder().(*array.StringBuilder)
+// ib := bldr.ItemBuilder().(*array.Int32Builder)
+// for _, m := range maplist {
+// bldr.Append(true)
+// for k, v := range m {
+// kb.Append(k)
+// ib.Append(v)
+// }
+// }
+// maparr := bldr.NewMapArray()
+// defer maparr.Release()
+func NewMapBuilder(mem memory.Allocator, keytype, itemtype arrow.DataType, keysSorted bool) *MapBuilder {
+ etype := arrow.MapOf(keytype, itemtype)
+ etype.KeysSorted = keysSorted
+ listBldr := NewListBuilder(mem, etype.Elem())
+ keyBldr := listBldr.ValueBuilder().(*StructBuilder).FieldBuilder(0)
+ keyBldr.Retain()
+ itemBldr := listBldr.ValueBuilder().(*StructBuilder).FieldBuilder(1)
+ itemBldr.Retain()
+ return &MapBuilder{
+ listBuilder: listBldr,
+ keyBuilder: keyBldr,
+ itemBuilder: itemBldr,
+ etype: etype,
+ keytype: keytype,
+ itemtype: itemtype,
+ keysSorted: keysSorted,
+ }
+}
+
+func NewMapBuilderWithType(mem memory.Allocator, dt *arrow.MapType) *MapBuilder {
+ listBldr := NewListBuilder(mem, dt.Elem())
+ keyBldr := listBldr.ValueBuilder().(*StructBuilder).FieldBuilder(0)
+ keyBldr.Retain()
+ itemBldr := listBldr.ValueBuilder().(*StructBuilder).FieldBuilder(1)
+ itemBldr.Retain()
+ return &MapBuilder{
+ listBuilder: listBldr,
+ keyBuilder: keyBldr,
+ itemBuilder: itemBldr,
+ etype: dt,
+ keytype: dt.KeyType(),
+ itemtype: dt.ItemType(),
+ keysSorted: dt.KeysSorted,
+ }
+}
+
+func (b *MapBuilder) Type() arrow.DataType { return b.etype }
+
+// Retain increases the reference count by 1 for the sub-builders (list, key, item).
+// Retain may be called simultaneously from multiple goroutines.
+func (b *MapBuilder) Retain() {
+ b.listBuilder.Retain()
+ b.keyBuilder.Retain()
+ b.itemBuilder.Retain()
+}
+
+// Release decreases the reference count by 1 for the sub builders (list, key, item).
+func (b *MapBuilder) Release() {
+ b.listBuilder.Release()
+ b.keyBuilder.Release()
+ b.itemBuilder.Release()
+}
+
+// Len returns the current number of Maps that are in the builder
+func (b *MapBuilder) Len() int { return b.listBuilder.Len() }
+
+// Cap returns the total number of elements that can be stored
+// without allocating additional memory.
+func (b *MapBuilder) Cap() int { return b.listBuilder.Cap() }
+
+// NullN returns the number of null values in the array builder.
+func (b *MapBuilder) NullN() int { return b.listBuilder.NullN() }
+
+// IsNull returns if a previously appended value at a given index is null or not.
+func (b *MapBuilder) IsNull(i int) bool {
+ return b.listBuilder.IsNull(i)
+}
+
+// Append adds a new Map element to the array, calling Append(false) is
+// equivalent to calling AppendNull.
+func (b *MapBuilder) Append(v bool) {
+ b.adjustStructBuilderLen()
+ b.listBuilder.Append(v)
+}
+
+func (b *MapBuilder) AppendWithSize(v bool, _ int) {
+ b.Append(v)
+}
+
+// AppendNull adds a null map entry to the array.
+func (b *MapBuilder) AppendNull() {
+ b.Append(false)
+}
+
+// AppendNulls adds null map entry to the array.
+func (b *MapBuilder) AppendNulls(n int) {
+ for i := 0; i < n; i++ {
+ b.AppendNull()
+ }
+}
+
+func (b *MapBuilder) SetNull(i int) {
+ b.listBuilder.SetNull(i)
+}
+
+func (b *MapBuilder) AppendEmptyValue() {
+ b.Append(true)
+}
+
+func (b *MapBuilder) AppendEmptyValues(n int) {
+ for i := 0; i < n; i++ {
+ b.AppendEmptyValue()
+ }
+}
+
+// Reserve enough space for n maps
+func (b *MapBuilder) Reserve(n int) { b.listBuilder.Reserve(n) }
+
+// Resize adjust the space allocated by b to n map elements. If n is greater than
+// b.Cap(), additional memory will be allocated. If n is smaller, the allocated memory may be reduced.
+func (b *MapBuilder) Resize(n int) { b.listBuilder.Resize(n) }
+
+// AppendValues is for bulk appending a group of elements with offsets provided
+// and validity booleans provided.
+func (b *MapBuilder) AppendValues(offsets []int32, valid []bool) {
+ b.adjustStructBuilderLen()
+ b.listBuilder.AppendValues(offsets, valid)
+}
+
+func (b *MapBuilder) UnsafeAppendBoolToBitmap(v bool) {
+ b.listBuilder.UnsafeAppendBoolToBitmap(v)
+}
+
+func (b *MapBuilder) init(capacity int) { b.listBuilder.init(capacity) }
+func (b *MapBuilder) resize(newBits int, init func(int)) { b.listBuilder.resize(newBits, init) }
+
+func (b *MapBuilder) adjustStructBuilderLen() {
+ sb := b.listBuilder.ValueBuilder().(*StructBuilder)
+ if sb.Len() < b.keyBuilder.Len() {
+ valids := make([]bool, b.keyBuilder.Len()-sb.Len())
+ for i := range valids {
+ valids[i] = true
+ }
+ sb.AppendValues(valids)
+ }
+}
+
+// NewArray creates a new Map array from the memory buffers used by the builder, and
+// resets the builder so it can be used again to build a new Map array.
+func (b *MapBuilder) NewArray() arrow.Array {
+ return b.NewMapArray()
+}
+
+// NewMapArray creates a new Map array from the memory buffers used by the builder, and
+// resets the builder so it can be used again to build a new Map array.
+func (b *MapBuilder) NewMapArray() (a *Map) {
+ if !b.etype.ItemField().Nullable && b.ItemBuilder().NullN() > 0 {
+ panic("arrow/array: item not nullable")
+ }
+
+ data := b.newData()
+ defer data.Release()
+ a = NewMapData(data)
+ return
+}
+
+func (b *MapBuilder) newData() (data *Data) {
+ b.adjustStructBuilderLen()
+ values := b.listBuilder.NewListArray()
+ defer values.Release()
+
+ data = NewData(b.etype,
+ values.Len(), values.data.buffers,
+ values.data.childData, values.NullN(), 0)
+ return
+}
+
+// KeyBuilder returns a builder that can be used to populate the keys of the maps.
+func (b *MapBuilder) KeyBuilder() Builder { return b.keyBuilder }
+
+// ItemBuilder returns a builder that can be used to populate the values that the
+// keys point to.
+func (b *MapBuilder) ItemBuilder() Builder { return b.itemBuilder }
+
+// ValueBuilder can be used instead of separately using the Key/Item builders
+// to build the list as a List of Structs rather than building the keys/items
+// separately.
+func (b *MapBuilder) ValueBuilder() Builder {
+ return b.listBuilder.ValueBuilder()
+}
+
+func (b *MapBuilder) AppendValueFromString(s string) error {
+ return b.listBuilder.AppendValueFromString(s)
+}
+
+func (b *MapBuilder) UnmarshalOne(dec *json.Decoder) error {
+ return b.listBuilder.UnmarshalOne(dec)
+}
+
+func (b *MapBuilder) Unmarshal(dec *json.Decoder) error {
+ return b.listBuilder.Unmarshal(dec)
+}
+
+func (b *MapBuilder) UnmarshalJSON(data []byte) error {
+ dec := json.NewDecoder(bytes.NewReader(data))
+ t, err := dec.Token()
+ if err != nil {
+ return err
+ }
+
+ if delim, ok := t.(json.Delim); !ok || delim != '[' {
+ return fmt.Errorf("map builder must unpack from json array, found %s", delim)
+ }
+
+ return b.Unmarshal(dec)
+}
+
+var (
+ _ arrow.Array = (*Map)(nil)
+ _ Builder = (*MapBuilder)(nil)
+ _ ListLikeBuilder = (*MapBuilder)(nil)
+)
diff --git a/vendor/github.com/apache/arrow/go/v14/arrow/array/null.go b/vendor/github.com/apache/arrow/go/v14/arrow/array/null.go
new file mode 100644
index 000000000..150a1030e
--- /dev/null
+++ b/vendor/github.com/apache/arrow/go/v14/arrow/array/null.go
@@ -0,0 +1,218 @@
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements. See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership. The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License. You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package array
+
+import (
+ "bytes"
+ "fmt"
+ "reflect"
+ "strings"
+ "sync/atomic"
+
+ "github.com/apache/arrow/go/v14/arrow"
+ "github.com/apache/arrow/go/v14/arrow/internal/debug"
+ "github.com/apache/arrow/go/v14/arrow/memory"
+ "github.com/apache/arrow/go/v14/internal/json"
+)
+
+// Null represents an immutable, degenerate array with no physical storage.
+type Null struct {
+ array
+}
+
+// NewNull returns a new Null array value of size n.
+func NewNull(n int) *Null {
+ a := &Null{}
+ a.refCount = 1
+ data := NewData(
+ arrow.Null, n,
+ []*memory.Buffer{nil},
+ nil,
+ n,
+ 0,
+ )
+ a.setData(data)
+ data.Release()
+ return a
+}
+
+// NewNullData returns a new Null array value, from data.
+func NewNullData(data arrow.ArrayData) *Null {
+ a := &Null{}
+ a.refCount = 1
+ a.setData(data.(*Data))
+ return a
+}
+
+func (a *Null) ValueStr(int) string { return NullValueStr }
+
+func (a *Null) Value(int) interface{} { return nil }
+
+func (a *Null) String() string {
+ o := new(strings.Builder)
+ o.WriteString("[")
+ for i := 0; i < a.Len(); i++ {
+ if i > 0 {
+ o.WriteString(" ")
+ }
+ o.WriteString(NullValueStr)
+ }
+ o.WriteString("]")
+ return o.String()
+}
+
+func (a *Null) setData(data *Data) {
+ a.array.setData(data)
+ a.array.nullBitmapBytes = nil
+ a.array.data.nulls = a.array.data.length
+}
+
+func (a *Null) GetOneForMarshal(i int) interface{} {
+ return nil
+}
+
+func (a *Null) MarshalJSON() ([]byte, error) {
+ return json.Marshal(make([]interface{}, a.Len()))
+}
+
+type NullBuilder struct {
+ builder
+}
+
+// NewNullBuilder returns a builder, using the provided memory allocator.
+func NewNullBuilder(mem memory.Allocator) *NullBuilder {
+ return &NullBuilder{builder: builder{refCount: 1, mem: mem}}
+}
+
+func (b *NullBuilder) Type() arrow.DataType { return arrow.Null }
+
+// Release decreases the reference count by 1.
+// When the reference count goes to zero, the memory is freed.
+func (b *NullBuilder) Release() {
+ debug.Assert(atomic.LoadInt64(&b.refCount) > 0, "too many releases")
+
+ if atomic.AddInt64(&b.refCount, -1) == 0 {
+ if b.nullBitmap != nil {
+ b.nullBitmap.Release()
+ b.nullBitmap = nil
+ }
+ }
+}
+
+func (b *NullBuilder) AppendNull() {
+ b.builder.length++
+ b.builder.nulls++
+}
+
+func (b *NullBuilder) AppendNulls(n int) {
+ for i := 0; i < n; i++ {
+ b.AppendNull()
+ }
+}
+
+func (b *NullBuilder) AppendValueFromString(s string) error {
+ if s == NullValueStr {
+ b.AppendNull()
+ return nil
+ }
+ return fmt.Errorf("cannot convert %q to null", s)
+}
+
+func (b *NullBuilder) AppendEmptyValue() { b.AppendNull() }
+
+func (b *NullBuilder) AppendEmptyValues(n int) { b.AppendNulls(n) }
+
+func (*NullBuilder) Reserve(size int) {}
+func (*NullBuilder) Resize(size int) {}
+
+func (*NullBuilder) init(cap int) {}
+func (*NullBuilder) resize(newBits int, init func(int)) {}
+
+// NewArray creates a Null array from the memory buffers used by the builder and resets the NullBuilder
+// so it can be used to build a new array.
+func (b *NullBuilder) NewArray() arrow.Array {
+ return b.NewNullArray()
+}
+
+// NewNullArray creates a Null array from the memory buffers used by the builder and resets the NullBuilder
+// so it can be used to build a new array.
+func (b *NullBuilder) NewNullArray() (a *Null) {
+ data := b.newData()
+ a = NewNullData(data)
+ data.Release()
+ return
+}
+
+func (b *NullBuilder) newData() (data *Data) {
+ data = NewData(
+ arrow.Null, b.length,
+ []*memory.Buffer{nil},
+ nil,
+ b.nulls,
+ 0,
+ )
+ b.reset()
+
+ return
+}
+
+func (b *NullBuilder) UnmarshalOne(dec *json.Decoder) error {
+ t, err := dec.Token()
+ if err != nil {
+ return err
+ }
+
+ switch t.(type) {
+ case nil:
+ b.AppendNull()
+ default:
+ return &json.UnmarshalTypeError{
+ Value: fmt.Sprint(t),
+ Type: reflect.TypeOf(nil),
+ Offset: dec.InputOffset(),
+ }
+ }
+ return nil
+}
+
+func (b *NullBuilder) Unmarshal(dec *json.Decoder) error {
+ for dec.More() {
+ if err := b.UnmarshalOne(dec); err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+func (b *NullBuilder) UnmarshalJSON(data []byte) error {
+ dec := json.NewDecoder(bytes.NewReader(data))
+ t, err := dec.Token()
+ if err != nil {
+ return err
+ }
+
+ if delim, ok := t.(json.Delim); !ok || delim != '[' {
+ return fmt.Errorf("null builder must unpack from json array, found %s", delim)
+ }
+
+ return b.Unmarshal(dec)
+}
+
+var (
+ _ arrow.Array = (*Null)(nil)
+ _ Builder = (*NullBuilder)(nil)
+)
diff --git a/vendor/github.com/apache/arrow/go/v14/arrow/array/numeric.gen.go b/vendor/github.com/apache/arrow/go/v14/arrow/array/numeric.gen.go
new file mode 100644
index 000000000..a3e110151
--- /dev/null
+++ b/vendor/github.com/apache/arrow/go/v14/arrow/array/numeric.gen.go
@@ -0,0 +1,1430 @@
+// Code generated by array/numeric.gen.go.tmpl. DO NOT EDIT.
+
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements. See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership. The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License. You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package array
+
+import (
+ "fmt"
+ "strconv"
+ "strings"
+
+ "github.com/apache/arrow/go/v14/arrow"
+ "github.com/apache/arrow/go/v14/internal/json"
+)
+
+// A type which represents an immutable sequence of int64 values.
+type Int64 struct {
+ array
+ values []int64
+}
+
+// NewInt64Data creates a new Int64.
+func NewInt64Data(data arrow.ArrayData) *Int64 {
+ a := &Int64{}
+ a.refCount = 1
+ a.setData(data.(*Data))
+ return a
+}
+
+// Reset resets the array for re-use.
+func (a *Int64) Reset(data *Data) {
+ a.setData(data)
+}
+
+// Value returns the value at the specified index.
+func (a *Int64) Value(i int) int64 { return a.values[i] }
+
+// Values returns the values.
+func (a *Int64) Int64Values() []int64 { return a.values }
+
+// String returns a string representation of the array.
+func (a *Int64) String() string {
+ o := new(strings.Builder)
+ o.WriteString("[")
+ for i, v := range a.values {
+ if i > 0 {
+ fmt.Fprintf(o, " ")
+ }
+ switch {
+ case a.IsNull(i):
+ o.WriteString(NullValueStr)
+ default:
+ fmt.Fprintf(o, "%v", v)
+ }
+ }
+ o.WriteString("]")
+ return o.String()
+}
+
+func (a *Int64) setData(data *Data) {
+ a.array.setData(data)
+ vals := data.buffers[1]
+ if vals != nil {
+ a.values = arrow.Int64Traits.CastFromBytes(vals.Bytes())
+ beg := a.array.data.offset
+ end := beg + a.array.data.length
+ a.values = a.values[beg:end]
+ }
+}
+
+func (a *Int64) ValueStr(i int) string {
+ if a.IsNull(i) {
+ return NullValueStr
+ }
+ return strconv.FormatInt(int64(a.Value(i)), 10)
+}
+
+func (a *Int64) GetOneForMarshal(i int) interface{} {
+ if a.IsNull(i) {
+ return nil
+ }
+
+ return a.values[i]
+}
+
+func (a *Int64) MarshalJSON() ([]byte, error) {
+ vals := make([]interface{}, a.Len())
+ for i := 0; i < a.Len(); i++ {
+ if a.IsValid(i) {
+ vals[i] = a.values[i]
+ } else {
+ vals[i] = nil
+ }
+ }
+
+ return json.Marshal(vals)
+}
+
+func arrayEqualInt64(left, right *Int64) bool {
+ for i := 0; i < left.Len(); i++ {
+ if left.IsNull(i) {
+ continue
+ }
+ if left.Value(i) != right.Value(i) {
+ return false
+ }
+ }
+ return true
+}
+
+// A type which represents an immutable sequence of uint64 values.
+type Uint64 struct {
+ array
+ values []uint64
+}
+
+// NewUint64Data creates a new Uint64.
+func NewUint64Data(data arrow.ArrayData) *Uint64 {
+ a := &Uint64{}
+ a.refCount = 1
+ a.setData(data.(*Data))
+ return a
+}
+
+// Reset resets the array for re-use.
+func (a *Uint64) Reset(data *Data) {
+ a.setData(data)
+}
+
+// Value returns the value at the specified index.
+func (a *Uint64) Value(i int) uint64 { return a.values[i] }
+
+// Values returns the values.
+func (a *Uint64) Uint64Values() []uint64 { return a.values }
+
+// String returns a string representation of the array.
+func (a *Uint64) String() string {
+ o := new(strings.Builder)
+ o.WriteString("[")
+ for i, v := range a.values {
+ if i > 0 {
+ fmt.Fprintf(o, " ")
+ }
+ switch {
+ case a.IsNull(i):
+ o.WriteString(NullValueStr)
+ default:
+ fmt.Fprintf(o, "%v", v)
+ }
+ }
+ o.WriteString("]")
+ return o.String()
+}
+
+func (a *Uint64) setData(data *Data) {
+ a.array.setData(data)
+ vals := data.buffers[1]
+ if vals != nil {
+ a.values = arrow.Uint64Traits.CastFromBytes(vals.Bytes())
+ beg := a.array.data.offset
+ end := beg + a.array.data.length
+ a.values = a.values[beg:end]
+ }
+}
+
+func (a *Uint64) ValueStr(i int) string {
+ if a.IsNull(i) {
+ return NullValueStr
+ }
+ return strconv.FormatUint(uint64(a.Value(i)), 10)
+}
+
+func (a *Uint64) GetOneForMarshal(i int) interface{} {
+ if a.IsNull(i) {
+ return nil
+ }
+
+ return a.values[i]
+}
+
+func (a *Uint64) MarshalJSON() ([]byte, error) {
+ vals := make([]interface{}, a.Len())
+ for i := 0; i < a.Len(); i++ {
+ if a.IsValid(i) {
+ vals[i] = a.values[i]
+ } else {
+ vals[i] = nil
+ }
+ }
+
+ return json.Marshal(vals)
+}
+
+func arrayEqualUint64(left, right *Uint64) bool {
+ for i := 0; i < left.Len(); i++ {
+ if left.IsNull(i) {
+ continue
+ }
+ if left.Value(i) != right.Value(i) {
+ return false
+ }
+ }
+ return true
+}
+
+// A type which represents an immutable sequence of float64 values.
+type Float64 struct {
+ array
+ values []float64
+}
+
+// NewFloat64Data creates a new Float64.
+func NewFloat64Data(data arrow.ArrayData) *Float64 {
+ a := &Float64{}
+ a.refCount = 1
+ a.setData(data.(*Data))
+ return a
+}
+
+// Reset resets the array for re-use.
+func (a *Float64) Reset(data *Data) {
+ a.setData(data)
+}
+
+// Value returns the value at the specified index.
+func (a *Float64) Value(i int) float64 { return a.values[i] }
+
+// Values returns the values.
+func (a *Float64) Float64Values() []float64 { return a.values }
+
+// String returns a string representation of the array.
+func (a *Float64) String() string {
+ o := new(strings.Builder)
+ o.WriteString("[")
+ for i, v := range a.values {
+ if i > 0 {
+ fmt.Fprintf(o, " ")
+ }
+ switch {
+ case a.IsNull(i):
+ o.WriteString(NullValueStr)
+ default:
+ fmt.Fprintf(o, "%v", v)
+ }
+ }
+ o.WriteString("]")
+ return o.String()
+}
+
+func (a *Float64) setData(data *Data) {
+ a.array.setData(data)
+ vals := data.buffers[1]
+ if vals != nil {
+ a.values = arrow.Float64Traits.CastFromBytes(vals.Bytes())
+ beg := a.array.data.offset
+ end := beg + a.array.data.length
+ a.values = a.values[beg:end]
+ }
+}
+
+func (a *Float64) ValueStr(i int) string {
+ if a.IsNull(i) {
+ return NullValueStr
+ }
+ return strconv.FormatFloat(float64(a.Value(i)), 'g', -1, 64)
+}
+
+func (a *Float64) GetOneForMarshal(i int) interface{} {
+ if a.IsNull(i) {
+ return nil
+ }
+
+ return a.values[i]
+}
+
+func (a *Float64) MarshalJSON() ([]byte, error) {
+ vals := make([]interface{}, a.Len())
+ for i := 0; i < a.Len(); i++ {
+ if a.IsValid(i) {
+ vals[i] = a.values[i]
+ } else {
+ vals[i] = nil
+ }
+ }
+
+ return json.Marshal(vals)
+}
+
+func arrayEqualFloat64(left, right *Float64) bool {
+ for i := 0; i < left.Len(); i++ {
+ if left.IsNull(i) {
+ continue
+ }
+ if left.Value(i) != right.Value(i) {
+ return false
+ }
+ }
+ return true
+}
+
+// A type which represents an immutable sequence of int32 values.
+type Int32 struct {
+ array
+ values []int32
+}
+
+// NewInt32Data creates a new Int32.
+func NewInt32Data(data arrow.ArrayData) *Int32 {
+ a := &Int32{}
+ a.refCount = 1
+ a.setData(data.(*Data))
+ return a
+}
+
+// Reset resets the array for re-use.
+func (a *Int32) Reset(data *Data) {
+ a.setData(data)
+}
+
+// Value returns the value at the specified index.
+func (a *Int32) Value(i int) int32 { return a.values[i] }
+
+// Values returns the values.
+func (a *Int32) Int32Values() []int32 { return a.values }
+
+// String returns a string representation of the array.
+func (a *Int32) String() string {
+ o := new(strings.Builder)
+ o.WriteString("[")
+ for i, v := range a.values {
+ if i > 0 {
+ fmt.Fprintf(o, " ")
+ }
+ switch {
+ case a.IsNull(i):
+ o.WriteString(NullValueStr)
+ default:
+ fmt.Fprintf(o, "%v", v)
+ }
+ }
+ o.WriteString("]")
+ return o.String()
+}
+
+func (a *Int32) setData(data *Data) {
+ a.array.setData(data)
+ vals := data.buffers[1]
+ if vals != nil {
+ a.values = arrow.Int32Traits.CastFromBytes(vals.Bytes())
+ beg := a.array.data.offset
+ end := beg + a.array.data.length
+ a.values = a.values[beg:end]
+ }
+}
+
+func (a *Int32) ValueStr(i int) string {
+ if a.IsNull(i) {
+ return NullValueStr
+ }
+ return strconv.FormatInt(int64(a.Value(i)), 10)
+}
+
+func (a *Int32) GetOneForMarshal(i int) interface{} {
+ if a.IsNull(i) {
+ return nil
+ }
+
+ return a.values[i]
+}
+
+func (a *Int32) MarshalJSON() ([]byte, error) {
+ vals := make([]interface{}, a.Len())
+ for i := 0; i < a.Len(); i++ {
+ if a.IsValid(i) {
+ vals[i] = a.values[i]
+ } else {
+ vals[i] = nil
+ }
+ }
+
+ return json.Marshal(vals)
+}
+
+func arrayEqualInt32(left, right *Int32) bool {
+ for i := 0; i < left.Len(); i++ {
+ if left.IsNull(i) {
+ continue
+ }
+ if left.Value(i) != right.Value(i) {
+ return false
+ }
+ }
+ return true
+}
+
+// A type which represents an immutable sequence of uint32 values.
+type Uint32 struct {
+ array
+ values []uint32
+}
+
+// NewUint32Data creates a new Uint32.
+func NewUint32Data(data arrow.ArrayData) *Uint32 {
+ a := &Uint32{}
+ a.refCount = 1
+ a.setData(data.(*Data))
+ return a
+}
+
+// Reset resets the array for re-use.
+func (a *Uint32) Reset(data *Data) {
+ a.setData(data)
+}
+
+// Value returns the value at the specified index.
+func (a *Uint32) Value(i int) uint32 { return a.values[i] }
+
+// Values returns the values.
+func (a *Uint32) Uint32Values() []uint32 { return a.values }
+
+// String returns a string representation of the array.
+func (a *Uint32) String() string {
+ o := new(strings.Builder)
+ o.WriteString("[")
+ for i, v := range a.values {
+ if i > 0 {
+ fmt.Fprintf(o, " ")
+ }
+ switch {
+ case a.IsNull(i):
+ o.WriteString(NullValueStr)
+ default:
+ fmt.Fprintf(o, "%v", v)
+ }
+ }
+ o.WriteString("]")
+ return o.String()
+}
+
+func (a *Uint32) setData(data *Data) {
+ a.array.setData(data)
+ vals := data.buffers[1]
+ if vals != nil {
+ a.values = arrow.Uint32Traits.CastFromBytes(vals.Bytes())
+ beg := a.array.data.offset
+ end := beg + a.array.data.length
+ a.values = a.values[beg:end]
+ }
+}
+
+func (a *Uint32) ValueStr(i int) string {
+ if a.IsNull(i) {
+ return NullValueStr
+ }
+ return strconv.FormatUint(uint64(a.Value(i)), 10)
+}
+
+func (a *Uint32) GetOneForMarshal(i int) interface{} {
+ if a.IsNull(i) {
+ return nil
+ }
+
+ return a.values[i]
+}
+
+func (a *Uint32) MarshalJSON() ([]byte, error) {
+ vals := make([]interface{}, a.Len())
+ for i := 0; i < a.Len(); i++ {
+ if a.IsValid(i) {
+ vals[i] = a.values[i]
+ } else {
+ vals[i] = nil
+ }
+ }
+
+ return json.Marshal(vals)
+}
+
+func arrayEqualUint32(left, right *Uint32) bool {
+ for i := 0; i < left.Len(); i++ {
+ if left.IsNull(i) {
+ continue
+ }
+ if left.Value(i) != right.Value(i) {
+ return false
+ }
+ }
+ return true
+}
+
+// A type which represents an immutable sequence of float32 values.
+type Float32 struct {
+ array
+ values []float32
+}
+
+// NewFloat32Data creates a new Float32.
+func NewFloat32Data(data arrow.ArrayData) *Float32 {
+ a := &Float32{}
+ a.refCount = 1
+ a.setData(data.(*Data))
+ return a
+}
+
+// Reset resets the array for re-use.
+func (a *Float32) Reset(data *Data) {
+ a.setData(data)
+}
+
+// Value returns the value at the specified index.
+func (a *Float32) Value(i int) float32 { return a.values[i] }
+
+// Values returns the values.
+func (a *Float32) Float32Values() []float32 { return a.values }
+
+// String returns a string representation of the array.
+func (a *Float32) String() string {
+ o := new(strings.Builder)
+ o.WriteString("[")
+ for i, v := range a.values {
+ if i > 0 {
+ fmt.Fprintf(o, " ")
+ }
+ switch {
+ case a.IsNull(i):
+ o.WriteString(NullValueStr)
+ default:
+ fmt.Fprintf(o, "%v", v)
+ }
+ }
+ o.WriteString("]")
+ return o.String()
+}
+
+func (a *Float32) setData(data *Data) {
+ a.array.setData(data)
+ vals := data.buffers[1]
+ if vals != nil {
+ a.values = arrow.Float32Traits.CastFromBytes(vals.Bytes())
+ beg := a.array.data.offset
+ end := beg + a.array.data.length
+ a.values = a.values[beg:end]
+ }
+}
+
+func (a *Float32) ValueStr(i int) string {
+ if a.IsNull(i) {
+ return NullValueStr
+ }
+ return strconv.FormatFloat(float64(a.Value(i)), 'g', -1, 32)
+}
+
+func (a *Float32) GetOneForMarshal(i int) interface{} {
+ if a.IsNull(i) {
+ return nil
+ }
+
+ return a.values[i]
+}
+
+func (a *Float32) MarshalJSON() ([]byte, error) {
+ vals := make([]interface{}, a.Len())
+ for i := 0; i < a.Len(); i++ {
+ if a.IsValid(i) {
+ vals[i] = a.values[i]
+ } else {
+ vals[i] = nil
+ }
+ }
+
+ return json.Marshal(vals)
+}
+
+func arrayEqualFloat32(left, right *Float32) bool {
+ for i := 0; i < left.Len(); i++ {
+ if left.IsNull(i) {
+ continue
+ }
+ if left.Value(i) != right.Value(i) {
+ return false
+ }
+ }
+ return true
+}
+
+// A type which represents an immutable sequence of int16 values.
+type Int16 struct {
+ array
+ values []int16
+}
+
+// NewInt16Data creates a new Int16.
+func NewInt16Data(data arrow.ArrayData) *Int16 {
+ a := &Int16{}
+ a.refCount = 1
+ a.setData(data.(*Data))
+ return a
+}
+
+// Reset resets the array for re-use.
+func (a *Int16) Reset(data *Data) {
+ a.setData(data)
+}
+
+// Value returns the value at the specified index.
+func (a *Int16) Value(i int) int16 { return a.values[i] }
+
+// Values returns the values.
+func (a *Int16) Int16Values() []int16 { return a.values }
+
+// String returns a string representation of the array.
+func (a *Int16) String() string {
+ o := new(strings.Builder)
+ o.WriteString("[")
+ for i, v := range a.values {
+ if i > 0 {
+ fmt.Fprintf(o, " ")
+ }
+ switch {
+ case a.IsNull(i):
+ o.WriteString(NullValueStr)
+ default:
+ fmt.Fprintf(o, "%v", v)
+ }
+ }
+ o.WriteString("]")
+ return o.String()
+}
+
+func (a *Int16) setData(data *Data) {
+ a.array.setData(data)
+ vals := data.buffers[1]
+ if vals != nil {
+ a.values = arrow.Int16Traits.CastFromBytes(vals.Bytes())
+ beg := a.array.data.offset
+ end := beg + a.array.data.length
+ a.values = a.values[beg:end]
+ }
+}
+
+func (a *Int16) ValueStr(i int) string {
+ if a.IsNull(i) {
+ return NullValueStr
+ }
+ return strconv.FormatInt(int64(a.Value(i)), 10)
+}
+
+func (a *Int16) GetOneForMarshal(i int) interface{} {
+ if a.IsNull(i) {
+ return nil
+ }
+
+ return a.values[i]
+}
+
+func (a *Int16) MarshalJSON() ([]byte, error) {
+ vals := make([]interface{}, a.Len())
+ for i := 0; i < a.Len(); i++ {
+ if a.IsValid(i) {
+ vals[i] = a.values[i]
+ } else {
+ vals[i] = nil
+ }
+ }
+
+ return json.Marshal(vals)
+}
+
+func arrayEqualInt16(left, right *Int16) bool {
+ for i := 0; i < left.Len(); i++ {
+ if left.IsNull(i) {
+ continue
+ }
+ if left.Value(i) != right.Value(i) {
+ return false
+ }
+ }
+ return true
+}
+
+// A type which represents an immutable sequence of uint16 values.
+type Uint16 struct {
+ array
+ values []uint16
+}
+
+// NewUint16Data creates a new Uint16.
+func NewUint16Data(data arrow.ArrayData) *Uint16 {
+ a := &Uint16{}
+ a.refCount = 1
+ a.setData(data.(*Data))
+ return a
+}
+
+// Reset resets the array for re-use.
+func (a *Uint16) Reset(data *Data) {
+ a.setData(data)
+}
+
+// Value returns the value at the specified index.
+func (a *Uint16) Value(i int) uint16 { return a.values[i] }
+
+// Values returns the values.
+func (a *Uint16) Uint16Values() []uint16 { return a.values }
+
+// String returns a string representation of the array.
+func (a *Uint16) String() string {
+ o := new(strings.Builder)
+ o.WriteString("[")
+ for i, v := range a.values {
+ if i > 0 {
+ fmt.Fprintf(o, " ")
+ }
+ switch {
+ case a.IsNull(i):
+ o.WriteString(NullValueStr)
+ default:
+ fmt.Fprintf(o, "%v", v)
+ }
+ }
+ o.WriteString("]")
+ return o.String()
+}
+
+func (a *Uint16) setData(data *Data) {
+ a.array.setData(data)
+ vals := data.buffers[1]
+ if vals != nil {
+ a.values = arrow.Uint16Traits.CastFromBytes(vals.Bytes())
+ beg := a.array.data.offset
+ end := beg + a.array.data.length
+ a.values = a.values[beg:end]
+ }
+}
+
+func (a *Uint16) ValueStr(i int) string {
+ if a.IsNull(i) {
+ return NullValueStr
+ }
+ return strconv.FormatUint(uint64(a.Value(i)), 10)
+}
+
+func (a *Uint16) GetOneForMarshal(i int) interface{} {
+ if a.IsNull(i) {
+ return nil
+ }
+
+ return a.values[i]
+}
+
+func (a *Uint16) MarshalJSON() ([]byte, error) {
+ vals := make([]interface{}, a.Len())
+ for i := 0; i < a.Len(); i++ {
+ if a.IsValid(i) {
+ vals[i] = a.values[i]
+ } else {
+ vals[i] = nil
+ }
+ }
+
+ return json.Marshal(vals)
+}
+
+func arrayEqualUint16(left, right *Uint16) bool {
+ for i := 0; i < left.Len(); i++ {
+ if left.IsNull(i) {
+ continue
+ }
+ if left.Value(i) != right.Value(i) {
+ return false
+ }
+ }
+ return true
+}
+
+// A type which represents an immutable sequence of int8 values.
+type Int8 struct {
+ array
+ values []int8
+}
+
+// NewInt8Data creates a new Int8.
+func NewInt8Data(data arrow.ArrayData) *Int8 {
+ a := &Int8{}
+ a.refCount = 1
+ a.setData(data.(*Data))
+ return a
+}
+
+// Reset resets the array for re-use.
+func (a *Int8) Reset(data *Data) {
+ a.setData(data)
+}
+
+// Value returns the value at the specified index.
+func (a *Int8) Value(i int) int8 { return a.values[i] }
+
+// Values returns the values.
+func (a *Int8) Int8Values() []int8 { return a.values }
+
+// String returns a string representation of the array.
+func (a *Int8) String() string {
+ o := new(strings.Builder)
+ o.WriteString("[")
+ for i, v := range a.values {
+ if i > 0 {
+ fmt.Fprintf(o, " ")
+ }
+ switch {
+ case a.IsNull(i):
+ o.WriteString(NullValueStr)
+ default:
+ fmt.Fprintf(o, "%v", v)
+ }
+ }
+ o.WriteString("]")
+ return o.String()
+}
+
+func (a *Int8) setData(data *Data) {
+ a.array.setData(data)
+ vals := data.buffers[1]
+ if vals != nil {
+ a.values = arrow.Int8Traits.CastFromBytes(vals.Bytes())
+ beg := a.array.data.offset
+ end := beg + a.array.data.length
+ a.values = a.values[beg:end]
+ }
+}
+
+func (a *Int8) ValueStr(i int) string {
+ if a.IsNull(i) {
+ return NullValueStr
+ }
+ return strconv.FormatInt(int64(a.Value(i)), 10)
+}
+
+func (a *Int8) GetOneForMarshal(i int) interface{} {
+ if a.IsNull(i) {
+ return nil
+ }
+
+ return float64(a.values[i]) // prevent uint8 from being seen as binary data
+}
+
+func (a *Int8) MarshalJSON() ([]byte, error) {
+ vals := make([]interface{}, a.Len())
+ for i := 0; i < a.Len(); i++ {
+ if a.IsValid(i) {
+ vals[i] = float64(a.values[i]) // prevent uint8 from being seen as binary data
+ } else {
+ vals[i] = nil
+ }
+ }
+
+ return json.Marshal(vals)
+}
+
+func arrayEqualInt8(left, right *Int8) bool {
+ for i := 0; i < left.Len(); i++ {
+ if left.IsNull(i) {
+ continue
+ }
+ if left.Value(i) != right.Value(i) {
+ return false
+ }
+ }
+ return true
+}
+
+// A type which represents an immutable sequence of uint8 values.
+type Uint8 struct {
+ array
+ values []uint8
+}
+
+// NewUint8Data creates a new Uint8.
+func NewUint8Data(data arrow.ArrayData) *Uint8 {
+ a := &Uint8{}
+ a.refCount = 1
+ a.setData(data.(*Data))
+ return a
+}
+
+// Reset resets the array for re-use.
+func (a *Uint8) Reset(data *Data) {
+ a.setData(data)
+}
+
+// Value returns the value at the specified index.
+func (a *Uint8) Value(i int) uint8 { return a.values[i] }
+
+// Values returns the values.
+func (a *Uint8) Uint8Values() []uint8 { return a.values }
+
+// String returns a string representation of the array.
+func (a *Uint8) String() string {
+ o := new(strings.Builder)
+ o.WriteString("[")
+ for i, v := range a.values {
+ if i > 0 {
+ fmt.Fprintf(o, " ")
+ }
+ switch {
+ case a.IsNull(i):
+ o.WriteString(NullValueStr)
+ default:
+ fmt.Fprintf(o, "%v", v)
+ }
+ }
+ o.WriteString("]")
+ return o.String()
+}
+
+func (a *Uint8) setData(data *Data) {
+ a.array.setData(data)
+ vals := data.buffers[1]
+ if vals != nil {
+ a.values = arrow.Uint8Traits.CastFromBytes(vals.Bytes())
+ beg := a.array.data.offset
+ end := beg + a.array.data.length
+ a.values = a.values[beg:end]
+ }
+}
+
+func (a *Uint8) ValueStr(i int) string {
+ if a.IsNull(i) {
+ return NullValueStr
+ }
+ return strconv.FormatUint(uint64(a.Value(i)), 10)
+}
+
+func (a *Uint8) GetOneForMarshal(i int) interface{} {
+ if a.IsNull(i) {
+ return nil
+ }
+
+ return float64(a.values[i]) // prevent uint8 from being seen as binary data
+}
+
+func (a *Uint8) MarshalJSON() ([]byte, error) {
+ vals := make([]interface{}, a.Len())
+ for i := 0; i < a.Len(); i++ {
+ if a.IsValid(i) {
+ vals[i] = float64(a.values[i]) // prevent uint8 from being seen as binary data
+ } else {
+ vals[i] = nil
+ }
+ }
+
+ return json.Marshal(vals)
+}
+
+func arrayEqualUint8(left, right *Uint8) bool {
+ for i := 0; i < left.Len(); i++ {
+ if left.IsNull(i) {
+ continue
+ }
+ if left.Value(i) != right.Value(i) {
+ return false
+ }
+ }
+ return true
+}
+
+// A type which represents an immutable sequence of arrow.Time32 values.
+type Time32 struct {
+ array
+ values []arrow.Time32
+}
+
+// NewTime32Data creates a new Time32.
+func NewTime32Data(data arrow.ArrayData) *Time32 {
+ a := &Time32{}
+ a.refCount = 1
+ a.setData(data.(*Data))
+ return a
+}
+
+// Reset resets the array for re-use.
+func (a *Time32) Reset(data *Data) {
+ a.setData(data)
+}
+
+// Value returns the value at the specified index.
+func (a *Time32) Value(i int) arrow.Time32 { return a.values[i] }
+
+// Values returns the values.
+func (a *Time32) Time32Values() []arrow.Time32 { return a.values }
+
+// String returns a string representation of the array.
+func (a *Time32) String() string {
+ o := new(strings.Builder)
+ o.WriteString("[")
+ for i, v := range a.values {
+ if i > 0 {
+ fmt.Fprintf(o, " ")
+ }
+ switch {
+ case a.IsNull(i):
+ o.WriteString(NullValueStr)
+ default:
+ fmt.Fprintf(o, "%v", v)
+ }
+ }
+ o.WriteString("]")
+ return o.String()
+}
+
+func (a *Time32) setData(data *Data) {
+ a.array.setData(data)
+ vals := data.buffers[1]
+ if vals != nil {
+ a.values = arrow.Time32Traits.CastFromBytes(vals.Bytes())
+ beg := a.array.data.offset
+ end := beg + a.array.data.length
+ a.values = a.values[beg:end]
+ }
+}
+
+func (a *Time32) ValueStr(i int) string {
+ if a.IsNull(i) {
+ return NullValueStr
+ }
+ return a.values[i].FormattedString(a.DataType().(*arrow.Time32Type).Unit)
+}
+
+func (a *Time32) GetOneForMarshal(i int) interface{} {
+ if a.IsNull(i) {
+ return nil
+ }
+ return a.values[i].ToTime(a.DataType().(*arrow.Time32Type).Unit).Format("15:04:05.999999999")
+}
+
+func (a *Time32) MarshalJSON() ([]byte, error) {
+ vals := make([]interface{}, a.Len())
+ for i := range a.values {
+ vals[i] = a.GetOneForMarshal(i)
+ }
+
+ return json.Marshal(vals)
+}
+
+func arrayEqualTime32(left, right *Time32) bool {
+ for i := 0; i < left.Len(); i++ {
+ if left.IsNull(i) {
+ continue
+ }
+ if left.Value(i) != right.Value(i) {
+ return false
+ }
+ }
+ return true
+}
+
+// A type which represents an immutable sequence of arrow.Time64 values.
+type Time64 struct {
+ array
+ values []arrow.Time64
+}
+
+// NewTime64Data creates a new Time64.
+func NewTime64Data(data arrow.ArrayData) *Time64 {
+ a := &Time64{}
+ a.refCount = 1
+ a.setData(data.(*Data))
+ return a
+}
+
+// Reset resets the array for re-use.
+func (a *Time64) Reset(data *Data) {
+ a.setData(data)
+}
+
+// Value returns the value at the specified index.
+func (a *Time64) Value(i int) arrow.Time64 { return a.values[i] }
+
+// Values returns the values.
+func (a *Time64) Time64Values() []arrow.Time64 { return a.values }
+
+// String returns a string representation of the array.
+func (a *Time64) String() string {
+ o := new(strings.Builder)
+ o.WriteString("[")
+ for i, v := range a.values {
+ if i > 0 {
+ fmt.Fprintf(o, " ")
+ }
+ switch {
+ case a.IsNull(i):
+ o.WriteString(NullValueStr)
+ default:
+ fmt.Fprintf(o, "%v", v)
+ }
+ }
+ o.WriteString("]")
+ return o.String()
+}
+
+func (a *Time64) setData(data *Data) {
+ a.array.setData(data)
+ vals := data.buffers[1]
+ if vals != nil {
+ a.values = arrow.Time64Traits.CastFromBytes(vals.Bytes())
+ beg := a.array.data.offset
+ end := beg + a.array.data.length
+ a.values = a.values[beg:end]
+ }
+}
+
+func (a *Time64) ValueStr(i int) string {
+ if a.IsNull(i) {
+ return NullValueStr
+ }
+ return a.values[i].FormattedString(a.DataType().(*arrow.Time64Type).Unit)
+}
+
+func (a *Time64) GetOneForMarshal(i int) interface{} {
+ if a.IsNull(i) {
+ return nil
+ }
+ return a.values[i].ToTime(a.DataType().(*arrow.Time64Type).Unit).Format("15:04:05.999999999")
+}
+
+func (a *Time64) MarshalJSON() ([]byte, error) {
+ vals := make([]interface{}, a.Len())
+ for i := range a.values {
+ vals[i] = a.GetOneForMarshal(i)
+ }
+
+ return json.Marshal(vals)
+}
+
+func arrayEqualTime64(left, right *Time64) bool {
+ for i := 0; i < left.Len(); i++ {
+ if left.IsNull(i) {
+ continue
+ }
+ if left.Value(i) != right.Value(i) {
+ return false
+ }
+ }
+ return true
+}
+
+// A type which represents an immutable sequence of arrow.Date32 values.
+type Date32 struct {
+ array
+ values []arrow.Date32
+}
+
+// NewDate32Data creates a new Date32.
+func NewDate32Data(data arrow.ArrayData) *Date32 {
+ a := &Date32{}
+ a.refCount = 1
+ a.setData(data.(*Data))
+ return a
+}
+
+// Reset resets the array for re-use.
+func (a *Date32) Reset(data *Data) {
+ a.setData(data)
+}
+
+// Value returns the value at the specified index.
+func (a *Date32) Value(i int) arrow.Date32 { return a.values[i] }
+
+// Values returns the values.
+func (a *Date32) Date32Values() []arrow.Date32 { return a.values }
+
+// String returns a string representation of the array.
+func (a *Date32) String() string {
+ o := new(strings.Builder)
+ o.WriteString("[")
+ for i, v := range a.values {
+ if i > 0 {
+ fmt.Fprintf(o, " ")
+ }
+ switch {
+ case a.IsNull(i):
+ o.WriteString(NullValueStr)
+ default:
+ fmt.Fprintf(o, "%v", v)
+ }
+ }
+ o.WriteString("]")
+ return o.String()
+}
+
+func (a *Date32) setData(data *Data) {
+ a.array.setData(data)
+ vals := data.buffers[1]
+ if vals != nil {
+ a.values = arrow.Date32Traits.CastFromBytes(vals.Bytes())
+ beg := a.array.data.offset
+ end := beg + a.array.data.length
+ a.values = a.values[beg:end]
+ }
+}
+
+func (a *Date32) ValueStr(i int) string {
+ if a.IsNull(i) {
+ return NullValueStr
+ }
+ return a.values[i].FormattedString()
+}
+
+func (a *Date32) GetOneForMarshal(i int) interface{} {
+ if a.IsNull(i) {
+ return nil
+ }
+ return a.values[i].ToTime().Format("2006-01-02")
+}
+
+func (a *Date32) MarshalJSON() ([]byte, error) {
+ vals := make([]interface{}, a.Len())
+ for i := range a.values {
+ vals[i] = a.GetOneForMarshal(i)
+ }
+
+ return json.Marshal(vals)
+}
+
+func arrayEqualDate32(left, right *Date32) bool {
+ for i := 0; i < left.Len(); i++ {
+ if left.IsNull(i) {
+ continue
+ }
+ if left.Value(i) != right.Value(i) {
+ return false
+ }
+ }
+ return true
+}
+
+// A type which represents an immutable sequence of arrow.Date64 values.
+type Date64 struct {
+ array
+ values []arrow.Date64
+}
+
+// NewDate64Data creates a new Date64.
+func NewDate64Data(data arrow.ArrayData) *Date64 {
+ a := &Date64{}
+ a.refCount = 1
+ a.setData(data.(*Data))
+ return a
+}
+
+// Reset resets the array for re-use.
+func (a *Date64) Reset(data *Data) {
+ a.setData(data)
+}
+
+// Value returns the value at the specified index.
+func (a *Date64) Value(i int) arrow.Date64 { return a.values[i] }
+
+// Values returns the values.
+func (a *Date64) Date64Values() []arrow.Date64 { return a.values }
+
+// String returns a string representation of the array.
+func (a *Date64) String() string {
+ o := new(strings.Builder)
+ o.WriteString("[")
+ for i, v := range a.values {
+ if i > 0 {
+ fmt.Fprintf(o, " ")
+ }
+ switch {
+ case a.IsNull(i):
+ o.WriteString(NullValueStr)
+ default:
+ fmt.Fprintf(o, "%v", v)
+ }
+ }
+ o.WriteString("]")
+ return o.String()
+}
+
+func (a *Date64) setData(data *Data) {
+ a.array.setData(data)
+ vals := data.buffers[1]
+ if vals != nil {
+ a.values = arrow.Date64Traits.CastFromBytes(vals.Bytes())
+ beg := a.array.data.offset
+ end := beg + a.array.data.length
+ a.values = a.values[beg:end]
+ }
+}
+
+func (a *Date64) ValueStr(i int) string {
+ if a.IsNull(i) {
+ return NullValueStr
+ }
+ return a.values[i].FormattedString()
+}
+
+func (a *Date64) GetOneForMarshal(i int) interface{} {
+ if a.IsNull(i) {
+ return nil
+ }
+ return a.values[i].ToTime().Format("2006-01-02")
+}
+
+func (a *Date64) MarshalJSON() ([]byte, error) {
+ vals := make([]interface{}, a.Len())
+ for i := range a.values {
+ vals[i] = a.GetOneForMarshal(i)
+ }
+
+ return json.Marshal(vals)
+}
+
+func arrayEqualDate64(left, right *Date64) bool {
+ for i := 0; i < left.Len(); i++ {
+ if left.IsNull(i) {
+ continue
+ }
+ if left.Value(i) != right.Value(i) {
+ return false
+ }
+ }
+ return true
+}
+
+// A type which represents an immutable sequence of arrow.Duration values.
+type Duration struct {
+ array
+ values []arrow.Duration
+}
+
+// NewDurationData creates a new Duration.
+func NewDurationData(data arrow.ArrayData) *Duration {
+ a := &Duration{}
+ a.refCount = 1
+ a.setData(data.(*Data))
+ return a
+}
+
+// Reset resets the array for re-use.
+func (a *Duration) Reset(data *Data) {
+ a.setData(data)
+}
+
+// Value returns the value at the specified index.
+func (a *Duration) Value(i int) arrow.Duration { return a.values[i] }
+
+// Values returns the values.
+func (a *Duration) DurationValues() []arrow.Duration { return a.values }
+
+// String returns a string representation of the array.
+func (a *Duration) String() string {
+ o := new(strings.Builder)
+ o.WriteString("[")
+ for i, v := range a.values {
+ if i > 0 {
+ fmt.Fprintf(o, " ")
+ }
+ switch {
+ case a.IsNull(i):
+ o.WriteString(NullValueStr)
+ default:
+ fmt.Fprintf(o, "%v", v)
+ }
+ }
+ o.WriteString("]")
+ return o.String()
+}
+
+func (a *Duration) setData(data *Data) {
+ a.array.setData(data)
+ vals := data.buffers[1]
+ if vals != nil {
+ a.values = arrow.DurationTraits.CastFromBytes(vals.Bytes())
+ beg := a.array.data.offset
+ end := beg + a.array.data.length
+ a.values = a.values[beg:end]
+ }
+}
+
+func (a *Duration) ValueStr(i int) string {
+ if a.IsNull(i) {
+ return NullValueStr
+ }
+ // return value and suffix as a string such as "12345ms"
+ return fmt.Sprintf("%d%s", a.values[i], a.DataType().(*arrow.DurationType).Unit)
+}
+
+func (a *Duration) GetOneForMarshal(i int) interface{} {
+ if a.IsNull(i) {
+ return nil
+ }
+ // return value and suffix as a string such as "12345ms"
+ return fmt.Sprintf("%d%s", a.values[i], a.DataType().(*arrow.DurationType).Unit.String())
+}
+
+func (a *Duration) MarshalJSON() ([]byte, error) {
+ vals := make([]interface{}, a.Len())
+ for i := range a.values {
+ vals[i] = a.GetOneForMarshal(i)
+ }
+
+ return json.Marshal(vals)
+}
+
+func arrayEqualDuration(left, right *Duration) bool {
+ for i := 0; i < left.Len(); i++ {
+ if left.IsNull(i) {
+ continue
+ }
+ if left.Value(i) != right.Value(i) {
+ return false
+ }
+ }
+ return true
+}
diff --git a/vendor/github.com/apache/arrow/go/v14/arrow/array/numeric.gen.go.tmpl b/vendor/github.com/apache/arrow/go/v14/arrow/array/numeric.gen.go.tmpl
new file mode 100644
index 000000000..34d17fbfc
--- /dev/null
+++ b/vendor/github.com/apache/arrow/go/v14/arrow/array/numeric.gen.go.tmpl
@@ -0,0 +1,158 @@
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements. See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership. The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License. You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package array
+
+import (
+ "fmt"
+ "strings"
+ "time"
+
+ "github.com/apache/arrow/go/v14/arrow"
+ "github.com/apache/arrow/go/v14/internal/json"
+)
+
+{{range .In}}
+
+// A type which represents an immutable sequence of {{or .QualifiedType .Type}} values.
+type {{.Name}} struct {
+ array
+ values []{{or .QualifiedType .Type}}
+}
+
+// New{{.Name}}Data creates a new {{.Name}}.
+func New{{.Name}}Data(data arrow.ArrayData) *{{.Name}} {
+ a := &{{.Name}}{}
+ a.refCount = 1
+ a.setData(data.(*Data))
+ return a
+}
+
+// Reset resets the array for re-use.
+func (a *{{.Name}}) Reset(data *Data) {
+ a.setData(data)
+}
+
+// Value returns the value at the specified index.
+func (a *{{.Name}}) Value(i int) {{or .QualifiedType .Type}} { return a.values[i] }
+
+// Values returns the values.
+func (a *{{.Name}}) {{.Name}}Values() []{{or .QualifiedType .Type}} { return a.values }
+
+// String returns a string representation of the array.
+func (a *{{.Name}}) String() string {
+ o := new(strings.Builder)
+ o.WriteString("[")
+ for i, v := range a.values {
+ if i > 0 {
+ fmt.Fprintf(o, " ")
+ }
+ switch {
+ case a.IsNull(i):
+ o.WriteString(NullValueStr)
+ default:
+ fmt.Fprintf(o, "%v", v)
+ }
+ }
+ o.WriteString("]")
+ return o.String()
+}
+
+func (a *{{.Name}}) setData(data *Data) {
+ a.array.setData(data)
+ vals := data.buffers[1]
+ if vals != nil {
+ a.values = arrow.{{.Name}}Traits.CastFromBytes(vals.Bytes())
+ beg := a.array.data.offset
+ end := beg + a.array.data.length
+ a.values = a.values[beg:end]
+ }
+}
+
+func (a *{{.Name}}) ValueStr(i int) string {
+ if a.IsNull(i) {
+ return NullValueStr
+ }
+{{if or (eq .Name "Date32") (eq .Name "Date64") -}}
+ return a.values[i].FormattedString()
+{{else if or (eq .Name "Time32") (eq .Name "Time64") -}}
+ return a.values[i].FormattedString(a.DataType().(*{{.QualifiedType}}Type).Unit)
+{{else if (eq .Name "Duration") -}}
+ // return value and suffix as a string such as "12345ms"
+ return fmt.Sprintf("%d%s", a.values[i], a.DataType().(*{{.QualifiedType}}Type).Unit)
+{{else if or (eq .Name "Int8") (eq .Name "Int16") (eq .Name "Int32") (eq .Name "Int64") -}}
+ return strconv.FormatInt(int64(a.Value(i)), 10)
+{{else if or (eq .Name "Uint8") (eq .Name "Uint16") (eq .Name "Uint32") (eq .Name "Uint64") -}}
+ return strconv.FormatUint(uint64(a.Value(i)), 10)
+{{else if or (eq .Name "Float32") -}}
+ return strconv.FormatFloat(float64(a.Value(i)), 'g', -1, 32)
+{{else if or (eq .Name "Float64") -}}
+ return strconv.FormatFloat(float64(a.Value(i)), 'g', -1, 64)
+{{else}}
+ return fmt.Sprintf("%v", a.values[i])
+{{end -}}
+}
+
+func (a *{{.Name}}) GetOneForMarshal(i int) interface{} {
+ if a.IsNull(i) {
+ return nil
+ }
+{{if or (eq .Name "Date32") (eq .Name "Date64") -}}
+ return a.values[i].ToTime().Format("2006-01-02")
+{{else if or (eq .Name "Time32") (eq .Name "Time64") -}}
+ return a.values[i].ToTime(a.DataType().(*{{.QualifiedType}}Type).Unit).Format("15:04:05.999999999")
+{{else if (eq .Name "Duration") -}}
+ // return value and suffix as a string such as "12345ms"
+ return fmt.Sprintf("%d%s", a.values[i], a.DataType().(*{{.QualifiedType}}Type).Unit.String())
+{{else if (eq .Size "1")}}
+ return float64(a.values[i]) // prevent uint8 from being seen as binary data
+{{else}}
+ return a.values[i]
+{{end -}}
+}
+
+func (a *{{.Name}}) MarshalJSON() ([]byte, error) {
+{{if .QualifiedType -}}
+ vals := make([]interface{}, a.Len())
+ for i := range a.values {
+ vals[i] = a.GetOneForMarshal(i)
+ }
+{{else -}}
+ vals := make([]interface{}, a.Len())
+ for i := 0; i < a.Len(); i++ {
+ if a.IsValid(i) {
+ {{ if (eq .Size "1") }}vals[i] = float64(a.values[i]) // prevent uint8 from being seen as binary data{{ else }}vals[i] = a.values[i]{{ end }}
+ } else {
+ vals[i] = nil
+ }
+ }
+{{end}}
+ return json.Marshal(vals)
+}
+
+func arrayEqual{{.Name}}(left, right *{{.Name}}) bool {
+ for i := 0; i < left.Len(); i++ {
+ if left.IsNull(i) {
+ continue
+ }
+ if left.Value(i) != right.Value(i) {
+ return false
+ }
+ }
+ return true
+}
+
+{{end}}
diff --git a/vendor/github.com/apache/arrow/go/v14/arrow/array/numericbuilder.gen.go b/vendor/github.com/apache/arrow/go/v14/arrow/array/numericbuilder.gen.go
new file mode 100644
index 000000000..7f01180f5
--- /dev/null
+++ b/vendor/github.com/apache/arrow/go/v14/arrow/array/numericbuilder.gen.go
@@ -0,0 +1,3664 @@
+// Code generated by array/numericbuilder.gen.go.tmpl. DO NOT EDIT.
+
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements. See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership. The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License. You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package array
+
+import (
+ "bytes"
+ "fmt"
+ "reflect"
+ "strconv"
+ "strings"
+ "sync/atomic"
+ "time"
+
+ "github.com/apache/arrow/go/v14/arrow"
+ "github.com/apache/arrow/go/v14/arrow/bitutil"
+ "github.com/apache/arrow/go/v14/arrow/internal/debug"
+ "github.com/apache/arrow/go/v14/arrow/memory"
+ "github.com/apache/arrow/go/v14/internal/json"
+)
+
+type Int64Builder struct {
+ builder
+
+ data *memory.Buffer
+ rawData []int64
+}
+
+func NewInt64Builder(mem memory.Allocator) *Int64Builder {
+ return &Int64Builder{builder: builder{refCount: 1, mem: mem}}
+}
+
+func (b *Int64Builder) Type() arrow.DataType { return arrow.PrimitiveTypes.Int64 }
+
+// Release decreases the reference count by 1.
+// When the reference count goes to zero, the memory is freed.
+func (b *Int64Builder) Release() {
+ debug.Assert(atomic.LoadInt64(&b.refCount) > 0, "too many releases")
+
+ if atomic.AddInt64(&b.refCount, -1) == 0 {
+ if b.nullBitmap != nil {
+ b.nullBitmap.Release()
+ b.nullBitmap = nil
+ }
+ if b.data != nil {
+ b.data.Release()
+ b.data = nil
+ b.rawData = nil
+ }
+ }
+}
+
+func (b *Int64Builder) Append(v int64) {
+ b.Reserve(1)
+ b.UnsafeAppend(v)
+}
+
+func (b *Int64Builder) AppendNull() {
+ b.Reserve(1)
+ b.UnsafeAppendBoolToBitmap(false)
+}
+
+func (b *Int64Builder) AppendNulls(n int) {
+ for i := 0; i < n; i++ {
+ b.AppendNull()
+ }
+}
+
+func (b *Int64Builder) AppendEmptyValue() {
+ b.Append(0)
+}
+
+func (b *Int64Builder) AppendEmptyValues(n int) {
+ for i := 0; i < n; i++ {
+ b.AppendEmptyValue()
+ }
+}
+
+func (b *Int64Builder) UnsafeAppend(v int64) {
+ bitutil.SetBit(b.nullBitmap.Bytes(), b.length)
+ b.rawData[b.length] = v
+ b.length++
+}
+
+func (b *Int64Builder) UnsafeAppendBoolToBitmap(isValid bool) {
+ if isValid {
+ bitutil.SetBit(b.nullBitmap.Bytes(), b.length)
+ } else {
+ b.nulls++
+ }
+ b.length++
+}
+
+// AppendValues will append the values in the v slice. The valid slice determines which values
+// in v are valid (not null). The valid slice must either be empty or be equal in length to v. If empty,
+// all values in v are appended and considered valid.
+func (b *Int64Builder) AppendValues(v []int64, valid []bool) {
+ if len(v) != len(valid) && len(valid) != 0 {
+ panic("len(v) != len(valid) && len(valid) != 0")
+ }
+
+ if len(v) == 0 {
+ return
+ }
+
+ b.Reserve(len(v))
+ arrow.Int64Traits.Copy(b.rawData[b.length:], v)
+ b.builder.unsafeAppendBoolsToBitmap(valid, len(v))
+}
+
+func (b *Int64Builder) init(capacity int) {
+ b.builder.init(capacity)
+
+ b.data = memory.NewResizableBuffer(b.mem)
+ bytesN := arrow.Int64Traits.BytesRequired(capacity)
+ b.data.Resize(bytesN)
+ b.rawData = arrow.Int64Traits.CastFromBytes(b.data.Bytes())
+}
+
+// Reserve ensures there is enough space for appending n elements
+// by checking the capacity and calling Resize if necessary.
+func (b *Int64Builder) Reserve(n int) {
+ b.builder.reserve(n, b.Resize)
+}
+
+// Resize adjusts the space allocated by b to n elements. If n is greater than b.Cap(),
+// additional memory will be allocated. If n is smaller, the allocated memory may reduced.
+func (b *Int64Builder) Resize(n int) {
+ nBuilder := n
+ if n < minBuilderCapacity {
+ n = minBuilderCapacity
+ }
+
+ if b.capacity == 0 {
+ b.init(n)
+ } else {
+ b.builder.resize(nBuilder, b.init)
+ b.data.Resize(arrow.Int64Traits.BytesRequired(n))
+ b.rawData = arrow.Int64Traits.CastFromBytes(b.data.Bytes())
+ }
+}
+
+func (b *Int64Builder) Value(i int) int64 {
+ return b.rawData[i]
+}
+
+// NewArray creates a Int64 array from the memory buffers used by the builder and resets the Int64Builder
+// so it can be used to build a new array.
+func (b *Int64Builder) NewArray() arrow.Array {
+ return b.NewInt64Array()
+}
+
+// NewInt64Array creates a Int64 array from the memory buffers used by the builder and resets the Int64Builder
+// so it can be used to build a new array.
+func (b *Int64Builder) NewInt64Array() (a *Int64) {
+ data := b.newData()
+ a = NewInt64Data(data)
+ data.Release()
+ return
+}
+
+func (b *Int64Builder) newData() (data *Data) {
+ bytesRequired := arrow.Int64Traits.BytesRequired(b.length)
+ if bytesRequired > 0 && bytesRequired < b.data.Len() {
+ // trim buffers
+ b.data.Resize(bytesRequired)
+ }
+ data = NewData(arrow.PrimitiveTypes.Int64, b.length, []*memory.Buffer{b.nullBitmap, b.data}, nil, b.nulls, 0)
+ b.reset()
+
+ if b.data != nil {
+ b.data.Release()
+ b.data = nil
+ b.rawData = nil
+ }
+
+ return
+}
+
+func (b *Int64Builder) AppendValueFromString(s string) error {
+ if s == NullValueStr {
+ b.AppendNull()
+ return nil
+ }
+ v, err := strconv.ParseInt(s, 10, 8*8)
+ if err != nil {
+ b.AppendNull()
+ return err
+ }
+ b.Append(int64(v))
+ return nil
+}
+
+func (b *Int64Builder) UnmarshalOne(dec *json.Decoder) error {
+ t, err := dec.Token()
+ if err != nil {
+ return err
+ }
+
+ switch v := t.(type) {
+ case nil:
+ b.AppendNull()
+
+ case string:
+ f, err := strconv.ParseInt(v, 10, 8*8)
+ if err != nil {
+ return &json.UnmarshalTypeError{
+ Value: v,
+ Type: reflect.TypeOf(int64(0)),
+ Offset: dec.InputOffset(),
+ }
+ }
+ b.Append(int64(f))
+ case float64:
+ b.Append(int64(v))
+ case json.Number:
+ f, err := strconv.ParseInt(v.String(), 10, 8*8)
+ if err != nil {
+ return &json.UnmarshalTypeError{
+ Value: v.String(),
+ Type: reflect.TypeOf(int64(0)),
+ Offset: dec.InputOffset(),
+ }
+ }
+ b.Append(int64(f))
+
+ default:
+ return &json.UnmarshalTypeError{
+ Value: fmt.Sprint(t),
+ Type: reflect.TypeOf(int64(0)),
+ Offset: dec.InputOffset(),
+ }
+ }
+
+ return nil
+}
+
+func (b *Int64Builder) Unmarshal(dec *json.Decoder) error {
+ for dec.More() {
+ if err := b.UnmarshalOne(dec); err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+func (b *Int64Builder) UnmarshalJSON(data []byte) error {
+ dec := json.NewDecoder(bytes.NewReader(data))
+ t, err := dec.Token()
+ if err != nil {
+ return err
+ }
+
+ if delim, ok := t.(json.Delim); !ok || delim != '[' {
+ return fmt.Errorf("binary builder must unpack from json array, found %s", delim)
+ }
+
+ return b.Unmarshal(dec)
+}
+
+type Uint64Builder struct {
+ builder
+
+ data *memory.Buffer
+ rawData []uint64
+}
+
+func NewUint64Builder(mem memory.Allocator) *Uint64Builder {
+ return &Uint64Builder{builder: builder{refCount: 1, mem: mem}}
+}
+
+func (b *Uint64Builder) Type() arrow.DataType { return arrow.PrimitiveTypes.Uint64 }
+
+// Release decreases the reference count by 1.
+// When the reference count goes to zero, the memory is freed.
+func (b *Uint64Builder) Release() {
+ debug.Assert(atomic.LoadInt64(&b.refCount) > 0, "too many releases")
+
+ if atomic.AddInt64(&b.refCount, -1) == 0 {
+ if b.nullBitmap != nil {
+ b.nullBitmap.Release()
+ b.nullBitmap = nil
+ }
+ if b.data != nil {
+ b.data.Release()
+ b.data = nil
+ b.rawData = nil
+ }
+ }
+}
+
+func (b *Uint64Builder) Append(v uint64) {
+ b.Reserve(1)
+ b.UnsafeAppend(v)
+}
+
+func (b *Uint64Builder) AppendNull() {
+ b.Reserve(1)
+ b.UnsafeAppendBoolToBitmap(false)
+}
+
+func (b *Uint64Builder) AppendNulls(n int) {
+ for i := 0; i < n; i++ {
+ b.AppendNull()
+ }
+}
+
+func (b *Uint64Builder) AppendEmptyValue() {
+ b.Append(0)
+}
+
+func (b *Uint64Builder) AppendEmptyValues(n int) {
+ for i := 0; i < n; i++ {
+ b.AppendEmptyValue()
+ }
+}
+
+func (b *Uint64Builder) UnsafeAppend(v uint64) {
+ bitutil.SetBit(b.nullBitmap.Bytes(), b.length)
+ b.rawData[b.length] = v
+ b.length++
+}
+
+func (b *Uint64Builder) UnsafeAppendBoolToBitmap(isValid bool) {
+ if isValid {
+ bitutil.SetBit(b.nullBitmap.Bytes(), b.length)
+ } else {
+ b.nulls++
+ }
+ b.length++
+}
+
+// AppendValues will append the values in the v slice. The valid slice determines which values
+// in v are valid (not null). The valid slice must either be empty or be equal in length to v. If empty,
+// all values in v are appended and considered valid.
+func (b *Uint64Builder) AppendValues(v []uint64, valid []bool) {
+ if len(v) != len(valid) && len(valid) != 0 {
+ panic("len(v) != len(valid) && len(valid) != 0")
+ }
+
+ if len(v) == 0 {
+ return
+ }
+
+ b.Reserve(len(v))
+ arrow.Uint64Traits.Copy(b.rawData[b.length:], v)
+ b.builder.unsafeAppendBoolsToBitmap(valid, len(v))
+}
+
+func (b *Uint64Builder) init(capacity int) {
+ b.builder.init(capacity)
+
+ b.data = memory.NewResizableBuffer(b.mem)
+ bytesN := arrow.Uint64Traits.BytesRequired(capacity)
+ b.data.Resize(bytesN)
+ b.rawData = arrow.Uint64Traits.CastFromBytes(b.data.Bytes())
+}
+
+// Reserve ensures there is enough space for appending n elements
+// by checking the capacity and calling Resize if necessary.
+func (b *Uint64Builder) Reserve(n int) {
+ b.builder.reserve(n, b.Resize)
+}
+
+// Resize adjusts the space allocated by b to n elements. If n is greater than b.Cap(),
+// additional memory will be allocated. If n is smaller, the allocated memory may reduced.
+func (b *Uint64Builder) Resize(n int) {
+ nBuilder := n
+ if n < minBuilderCapacity {
+ n = minBuilderCapacity
+ }
+
+ if b.capacity == 0 {
+ b.init(n)
+ } else {
+ b.builder.resize(nBuilder, b.init)
+ b.data.Resize(arrow.Uint64Traits.BytesRequired(n))
+ b.rawData = arrow.Uint64Traits.CastFromBytes(b.data.Bytes())
+ }
+}
+
+func (b *Uint64Builder) Value(i int) uint64 {
+ return b.rawData[i]
+}
+
+// NewArray creates a Uint64 array from the memory buffers used by the builder and resets the Uint64Builder
+// so it can be used to build a new array.
+func (b *Uint64Builder) NewArray() arrow.Array {
+ return b.NewUint64Array()
+}
+
+// NewUint64Array creates a Uint64 array from the memory buffers used by the builder and resets the Uint64Builder
+// so it can be used to build a new array.
+func (b *Uint64Builder) NewUint64Array() (a *Uint64) {
+ data := b.newData()
+ a = NewUint64Data(data)
+ data.Release()
+ return
+}
+
+func (b *Uint64Builder) newData() (data *Data) {
+ bytesRequired := arrow.Uint64Traits.BytesRequired(b.length)
+ if bytesRequired > 0 && bytesRequired < b.data.Len() {
+ // trim buffers
+ b.data.Resize(bytesRequired)
+ }
+ data = NewData(arrow.PrimitiveTypes.Uint64, b.length, []*memory.Buffer{b.nullBitmap, b.data}, nil, b.nulls, 0)
+ b.reset()
+
+ if b.data != nil {
+ b.data.Release()
+ b.data = nil
+ b.rawData = nil
+ }
+
+ return
+}
+
+func (b *Uint64Builder) AppendValueFromString(s string) error {
+ if s == NullValueStr {
+ b.AppendNull()
+ return nil
+ }
+ v, err := strconv.ParseUint(s, 10, 8*8)
+ if err != nil {
+ b.AppendNull()
+ return err
+ }
+ b.Append(uint64(v))
+ return nil
+}
+
+func (b *Uint64Builder) UnmarshalOne(dec *json.Decoder) error {
+ t, err := dec.Token()
+ if err != nil {
+ return err
+ }
+
+ switch v := t.(type) {
+ case nil:
+ b.AppendNull()
+
+ case string:
+ f, err := strconv.ParseUint(v, 10, 8*8)
+ if err != nil {
+ return &json.UnmarshalTypeError{
+ Value: v,
+ Type: reflect.TypeOf(uint64(0)),
+ Offset: dec.InputOffset(),
+ }
+ }
+ b.Append(uint64(f))
+ case float64:
+ b.Append(uint64(v))
+ case json.Number:
+ f, err := strconv.ParseUint(v.String(), 10, 8*8)
+ if err != nil {
+ return &json.UnmarshalTypeError{
+ Value: v.String(),
+ Type: reflect.TypeOf(uint64(0)),
+ Offset: dec.InputOffset(),
+ }
+ }
+ b.Append(uint64(f))
+
+ default:
+ return &json.UnmarshalTypeError{
+ Value: fmt.Sprint(t),
+ Type: reflect.TypeOf(uint64(0)),
+ Offset: dec.InputOffset(),
+ }
+ }
+
+ return nil
+}
+
+func (b *Uint64Builder) Unmarshal(dec *json.Decoder) error {
+ for dec.More() {
+ if err := b.UnmarshalOne(dec); err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+func (b *Uint64Builder) UnmarshalJSON(data []byte) error {
+ dec := json.NewDecoder(bytes.NewReader(data))
+ t, err := dec.Token()
+ if err != nil {
+ return err
+ }
+
+ if delim, ok := t.(json.Delim); !ok || delim != '[' {
+ return fmt.Errorf("binary builder must unpack from json array, found %s", delim)
+ }
+
+ return b.Unmarshal(dec)
+}
+
+type Float64Builder struct {
+ builder
+
+ data *memory.Buffer
+ rawData []float64
+}
+
+func NewFloat64Builder(mem memory.Allocator) *Float64Builder {
+ return &Float64Builder{builder: builder{refCount: 1, mem: mem}}
+}
+
+func (b *Float64Builder) Type() arrow.DataType { return arrow.PrimitiveTypes.Float64 }
+
+// Release decreases the reference count by 1.
+// When the reference count goes to zero, the memory is freed.
+func (b *Float64Builder) Release() {
+ debug.Assert(atomic.LoadInt64(&b.refCount) > 0, "too many releases")
+
+ if atomic.AddInt64(&b.refCount, -1) == 0 {
+ if b.nullBitmap != nil {
+ b.nullBitmap.Release()
+ b.nullBitmap = nil
+ }
+ if b.data != nil {
+ b.data.Release()
+ b.data = nil
+ b.rawData = nil
+ }
+ }
+}
+
+func (b *Float64Builder) Append(v float64) {
+ b.Reserve(1)
+ b.UnsafeAppend(v)
+}
+
+func (b *Float64Builder) AppendNull() {
+ b.Reserve(1)
+ b.UnsafeAppendBoolToBitmap(false)
+}
+
+func (b *Float64Builder) AppendNulls(n int) {
+ for i := 0; i < n; i++ {
+ b.AppendNull()
+ }
+}
+
+func (b *Float64Builder) AppendEmptyValue() {
+ b.Append(0)
+}
+
+func (b *Float64Builder) AppendEmptyValues(n int) {
+ for i := 0; i < n; i++ {
+ b.AppendEmptyValue()
+ }
+}
+
+func (b *Float64Builder) UnsafeAppend(v float64) {
+ bitutil.SetBit(b.nullBitmap.Bytes(), b.length)
+ b.rawData[b.length] = v
+ b.length++
+}
+
+func (b *Float64Builder) UnsafeAppendBoolToBitmap(isValid bool) {
+ if isValid {
+ bitutil.SetBit(b.nullBitmap.Bytes(), b.length)
+ } else {
+ b.nulls++
+ }
+ b.length++
+}
+
+// AppendValues will append the values in the v slice. The valid slice determines which values
+// in v are valid (not null). The valid slice must either be empty or be equal in length to v. If empty,
+// all values in v are appended and considered valid.
+func (b *Float64Builder) AppendValues(v []float64, valid []bool) {
+ if len(v) != len(valid) && len(valid) != 0 {
+ panic("len(v) != len(valid) && len(valid) != 0")
+ }
+
+ if len(v) == 0 {
+ return
+ }
+
+ b.Reserve(len(v))
+ arrow.Float64Traits.Copy(b.rawData[b.length:], v)
+ b.builder.unsafeAppendBoolsToBitmap(valid, len(v))
+}
+
+func (b *Float64Builder) init(capacity int) {
+ b.builder.init(capacity)
+
+ b.data = memory.NewResizableBuffer(b.mem)
+ bytesN := arrow.Float64Traits.BytesRequired(capacity)
+ b.data.Resize(bytesN)
+ b.rawData = arrow.Float64Traits.CastFromBytes(b.data.Bytes())
+}
+
+// Reserve ensures there is enough space for appending n elements
+// by checking the capacity and calling Resize if necessary.
+func (b *Float64Builder) Reserve(n int) {
+ b.builder.reserve(n, b.Resize)
+}
+
+// Resize adjusts the space allocated by b to n elements. If n is greater than b.Cap(),
+// additional memory will be allocated. If n is smaller, the allocated memory may reduced.
+func (b *Float64Builder) Resize(n int) {
+ nBuilder := n
+ if n < minBuilderCapacity {
+ n = minBuilderCapacity
+ }
+
+ if b.capacity == 0 {
+ b.init(n)
+ } else {
+ b.builder.resize(nBuilder, b.init)
+ b.data.Resize(arrow.Float64Traits.BytesRequired(n))
+ b.rawData = arrow.Float64Traits.CastFromBytes(b.data.Bytes())
+ }
+}
+
+func (b *Float64Builder) Value(i int) float64 {
+ return b.rawData[i]
+}
+
+// NewArray creates a Float64 array from the memory buffers used by the builder and resets the Float64Builder
+// so it can be used to build a new array.
+func (b *Float64Builder) NewArray() arrow.Array {
+ return b.NewFloat64Array()
+}
+
+// NewFloat64Array creates a Float64 array from the memory buffers used by the builder and resets the Float64Builder
+// so it can be used to build a new array.
+func (b *Float64Builder) NewFloat64Array() (a *Float64) {
+ data := b.newData()
+ a = NewFloat64Data(data)
+ data.Release()
+ return
+}
+
+func (b *Float64Builder) newData() (data *Data) {
+ bytesRequired := arrow.Float64Traits.BytesRequired(b.length)
+ if bytesRequired > 0 && bytesRequired < b.data.Len() {
+ // trim buffers
+ b.data.Resize(bytesRequired)
+ }
+ data = NewData(arrow.PrimitiveTypes.Float64, b.length, []*memory.Buffer{b.nullBitmap, b.data}, nil, b.nulls, 0)
+ b.reset()
+
+ if b.data != nil {
+ b.data.Release()
+ b.data = nil
+ b.rawData = nil
+ }
+
+ return
+}
+
+func (b *Float64Builder) AppendValueFromString(s string) error {
+ if s == NullValueStr {
+ b.AppendNull()
+ return nil
+ }
+ v, err := strconv.ParseFloat(s, 8*8)
+ if err != nil {
+ b.AppendNull()
+ return err
+ }
+ b.Append(float64(v))
+ return nil
+}
+
+func (b *Float64Builder) UnmarshalOne(dec *json.Decoder) error {
+ t, err := dec.Token()
+ if err != nil {
+ return err
+ }
+
+ switch v := t.(type) {
+ case nil:
+ b.AppendNull()
+
+ case string:
+ f, err := strconv.ParseFloat(v, 8*8)
+ if err != nil {
+ return &json.UnmarshalTypeError{
+ Value: v,
+ Type: reflect.TypeOf(float64(0)),
+ Offset: dec.InputOffset(),
+ }
+ }
+ b.Append(float64(f))
+ case float64:
+ b.Append(float64(v))
+ case json.Number:
+ f, err := strconv.ParseFloat(v.String(), 8*8)
+ if err != nil {
+ return &json.UnmarshalTypeError{
+ Value: v.String(),
+ Type: reflect.TypeOf(float64(0)),
+ Offset: dec.InputOffset(),
+ }
+ }
+ b.Append(float64(f))
+
+ default:
+ return &json.UnmarshalTypeError{
+ Value: fmt.Sprint(t),
+ Type: reflect.TypeOf(float64(0)),
+ Offset: dec.InputOffset(),
+ }
+ }
+
+ return nil
+}
+
+func (b *Float64Builder) Unmarshal(dec *json.Decoder) error {
+ for dec.More() {
+ if err := b.UnmarshalOne(dec); err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+func (b *Float64Builder) UnmarshalJSON(data []byte) error {
+ dec := json.NewDecoder(bytes.NewReader(data))
+ t, err := dec.Token()
+ if err != nil {
+ return err
+ }
+
+ if delim, ok := t.(json.Delim); !ok || delim != '[' {
+ return fmt.Errorf("binary builder must unpack from json array, found %s", delim)
+ }
+
+ return b.Unmarshal(dec)
+}
+
+type Int32Builder struct {
+ builder
+
+ data *memory.Buffer
+ rawData []int32
+}
+
+func NewInt32Builder(mem memory.Allocator) *Int32Builder {
+ return &Int32Builder{builder: builder{refCount: 1, mem: mem}}
+}
+
+func (b *Int32Builder) Type() arrow.DataType { return arrow.PrimitiveTypes.Int32 }
+
+// Release decreases the reference count by 1.
+// When the reference count goes to zero, the memory is freed.
+func (b *Int32Builder) Release() {
+ debug.Assert(atomic.LoadInt64(&b.refCount) > 0, "too many releases")
+
+ if atomic.AddInt64(&b.refCount, -1) == 0 {
+ if b.nullBitmap != nil {
+ b.nullBitmap.Release()
+ b.nullBitmap = nil
+ }
+ if b.data != nil {
+ b.data.Release()
+ b.data = nil
+ b.rawData = nil
+ }
+ }
+}
+
+func (b *Int32Builder) Append(v int32) {
+ b.Reserve(1)
+ b.UnsafeAppend(v)
+}
+
+func (b *Int32Builder) AppendNull() {
+ b.Reserve(1)
+ b.UnsafeAppendBoolToBitmap(false)
+}
+
+func (b *Int32Builder) AppendNulls(n int) {
+ for i := 0; i < n; i++ {
+ b.AppendNull()
+ }
+}
+
+func (b *Int32Builder) AppendEmptyValue() {
+ b.Append(0)
+}
+
+func (b *Int32Builder) AppendEmptyValues(n int) {
+ for i := 0; i < n; i++ {
+ b.AppendEmptyValue()
+ }
+}
+
+func (b *Int32Builder) UnsafeAppend(v int32) {
+ bitutil.SetBit(b.nullBitmap.Bytes(), b.length)
+ b.rawData[b.length] = v
+ b.length++
+}
+
+func (b *Int32Builder) UnsafeAppendBoolToBitmap(isValid bool) {
+ if isValid {
+ bitutil.SetBit(b.nullBitmap.Bytes(), b.length)
+ } else {
+ b.nulls++
+ }
+ b.length++
+}
+
+// AppendValues will append the values in the v slice. The valid slice determines which values
+// in v are valid (not null). The valid slice must either be empty or be equal in length to v. If empty,
+// all values in v are appended and considered valid.
+func (b *Int32Builder) AppendValues(v []int32, valid []bool) {
+ if len(v) != len(valid) && len(valid) != 0 {
+ panic("len(v) != len(valid) && len(valid) != 0")
+ }
+
+ if len(v) == 0 {
+ return
+ }
+
+ b.Reserve(len(v))
+ arrow.Int32Traits.Copy(b.rawData[b.length:], v)
+ b.builder.unsafeAppendBoolsToBitmap(valid, len(v))
+}
+
+func (b *Int32Builder) init(capacity int) {
+ b.builder.init(capacity)
+
+ b.data = memory.NewResizableBuffer(b.mem)
+ bytesN := arrow.Int32Traits.BytesRequired(capacity)
+ b.data.Resize(bytesN)
+ b.rawData = arrow.Int32Traits.CastFromBytes(b.data.Bytes())
+}
+
+// Reserve ensures there is enough space for appending n elements
+// by checking the capacity and calling Resize if necessary.
+func (b *Int32Builder) Reserve(n int) {
+ b.builder.reserve(n, b.Resize)
+}
+
+// Resize adjusts the space allocated by b to n elements. If n is greater than b.Cap(),
+// additional memory will be allocated. If n is smaller, the allocated memory may reduced.
+func (b *Int32Builder) Resize(n int) {
+ nBuilder := n
+ if n < minBuilderCapacity {
+ n = minBuilderCapacity
+ }
+
+ if b.capacity == 0 {
+ b.init(n)
+ } else {
+ b.builder.resize(nBuilder, b.init)
+ b.data.Resize(arrow.Int32Traits.BytesRequired(n))
+ b.rawData = arrow.Int32Traits.CastFromBytes(b.data.Bytes())
+ }
+}
+
+func (b *Int32Builder) Value(i int) int32 {
+ return b.rawData[i]
+}
+
+// NewArray creates a Int32 array from the memory buffers used by the builder and resets the Int32Builder
+// so it can be used to build a new array.
+func (b *Int32Builder) NewArray() arrow.Array {
+ return b.NewInt32Array()
+}
+
+// NewInt32Array creates a Int32 array from the memory buffers used by the builder and resets the Int32Builder
+// so it can be used to build a new array.
+func (b *Int32Builder) NewInt32Array() (a *Int32) {
+ data := b.newData()
+ a = NewInt32Data(data)
+ data.Release()
+ return
+}
+
+func (b *Int32Builder) newData() (data *Data) {
+ bytesRequired := arrow.Int32Traits.BytesRequired(b.length)
+ if bytesRequired > 0 && bytesRequired < b.data.Len() {
+ // trim buffers
+ b.data.Resize(bytesRequired)
+ }
+ data = NewData(arrow.PrimitiveTypes.Int32, b.length, []*memory.Buffer{b.nullBitmap, b.data}, nil, b.nulls, 0)
+ b.reset()
+
+ if b.data != nil {
+ b.data.Release()
+ b.data = nil
+ b.rawData = nil
+ }
+
+ return
+}
+
+func (b *Int32Builder) AppendValueFromString(s string) error {
+ if s == NullValueStr {
+ b.AppendNull()
+ return nil
+ }
+ v, err := strconv.ParseInt(s, 10, 4*8)
+ if err != nil {
+ b.AppendNull()
+ return err
+ }
+ b.Append(int32(v))
+ return nil
+}
+
+func (b *Int32Builder) UnmarshalOne(dec *json.Decoder) error {
+ t, err := dec.Token()
+ if err != nil {
+ return err
+ }
+
+ switch v := t.(type) {
+ case nil:
+ b.AppendNull()
+
+ case string:
+ f, err := strconv.ParseInt(v, 10, 4*8)
+ if err != nil {
+ return &json.UnmarshalTypeError{
+ Value: v,
+ Type: reflect.TypeOf(int32(0)),
+ Offset: dec.InputOffset(),
+ }
+ }
+ b.Append(int32(f))
+ case float64:
+ b.Append(int32(v))
+ case json.Number:
+ f, err := strconv.ParseInt(v.String(), 10, 4*8)
+ if err != nil {
+ return &json.UnmarshalTypeError{
+ Value: v.String(),
+ Type: reflect.TypeOf(int32(0)),
+ Offset: dec.InputOffset(),
+ }
+ }
+ b.Append(int32(f))
+
+ default:
+ return &json.UnmarshalTypeError{
+ Value: fmt.Sprint(t),
+ Type: reflect.TypeOf(int32(0)),
+ Offset: dec.InputOffset(),
+ }
+ }
+
+ return nil
+}
+
+func (b *Int32Builder) Unmarshal(dec *json.Decoder) error {
+ for dec.More() {
+ if err := b.UnmarshalOne(dec); err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+func (b *Int32Builder) UnmarshalJSON(data []byte) error {
+ dec := json.NewDecoder(bytes.NewReader(data))
+ t, err := dec.Token()
+ if err != nil {
+ return err
+ }
+
+ if delim, ok := t.(json.Delim); !ok || delim != '[' {
+ return fmt.Errorf("binary builder must unpack from json array, found %s", delim)
+ }
+
+ return b.Unmarshal(dec)
+}
+
+type Uint32Builder struct {
+ builder
+
+ data *memory.Buffer
+ rawData []uint32
+}
+
+func NewUint32Builder(mem memory.Allocator) *Uint32Builder {
+ return &Uint32Builder{builder: builder{refCount: 1, mem: mem}}
+}
+
+func (b *Uint32Builder) Type() arrow.DataType { return arrow.PrimitiveTypes.Uint32 }
+
+// Release decreases the reference count by 1.
+// When the reference count goes to zero, the memory is freed.
+func (b *Uint32Builder) Release() {
+ debug.Assert(atomic.LoadInt64(&b.refCount) > 0, "too many releases")
+
+ if atomic.AddInt64(&b.refCount, -1) == 0 {
+ if b.nullBitmap != nil {
+ b.nullBitmap.Release()
+ b.nullBitmap = nil
+ }
+ if b.data != nil {
+ b.data.Release()
+ b.data = nil
+ b.rawData = nil
+ }
+ }
+}
+
+func (b *Uint32Builder) Append(v uint32) {
+ b.Reserve(1)
+ b.UnsafeAppend(v)
+}
+
+func (b *Uint32Builder) AppendNull() {
+ b.Reserve(1)
+ b.UnsafeAppendBoolToBitmap(false)
+}
+
+func (b *Uint32Builder) AppendNulls(n int) {
+ for i := 0; i < n; i++ {
+ b.AppendNull()
+ }
+}
+
+func (b *Uint32Builder) AppendEmptyValue() {
+ b.Append(0)
+}
+
+func (b *Uint32Builder) AppendEmptyValues(n int) {
+ for i := 0; i < n; i++ {
+ b.AppendEmptyValue()
+ }
+}
+
+func (b *Uint32Builder) UnsafeAppend(v uint32) {
+ bitutil.SetBit(b.nullBitmap.Bytes(), b.length)
+ b.rawData[b.length] = v
+ b.length++
+}
+
+func (b *Uint32Builder) UnsafeAppendBoolToBitmap(isValid bool) {
+ if isValid {
+ bitutil.SetBit(b.nullBitmap.Bytes(), b.length)
+ } else {
+ b.nulls++
+ }
+ b.length++
+}
+
+// AppendValues will append the values in the v slice. The valid slice determines which values
+// in v are valid (not null). The valid slice must either be empty or be equal in length to v. If empty,
+// all values in v are appended and considered valid.
+func (b *Uint32Builder) AppendValues(v []uint32, valid []bool) {
+ if len(v) != len(valid) && len(valid) != 0 {
+ panic("len(v) != len(valid) && len(valid) != 0")
+ }
+
+ if len(v) == 0 {
+ return
+ }
+
+ b.Reserve(len(v))
+ arrow.Uint32Traits.Copy(b.rawData[b.length:], v)
+ b.builder.unsafeAppendBoolsToBitmap(valid, len(v))
+}
+
+func (b *Uint32Builder) init(capacity int) {
+ b.builder.init(capacity)
+
+ b.data = memory.NewResizableBuffer(b.mem)
+ bytesN := arrow.Uint32Traits.BytesRequired(capacity)
+ b.data.Resize(bytesN)
+ b.rawData = arrow.Uint32Traits.CastFromBytes(b.data.Bytes())
+}
+
+// Reserve ensures there is enough space for appending n elements
+// by checking the capacity and calling Resize if necessary.
+func (b *Uint32Builder) Reserve(n int) {
+ b.builder.reserve(n, b.Resize)
+}
+
+// Resize adjusts the space allocated by b to n elements. If n is greater than b.Cap(),
+// additional memory will be allocated. If n is smaller, the allocated memory may reduced.
+func (b *Uint32Builder) Resize(n int) {
+ nBuilder := n
+ if n < minBuilderCapacity {
+ n = minBuilderCapacity
+ }
+
+ if b.capacity == 0 {
+ b.init(n)
+ } else {
+ b.builder.resize(nBuilder, b.init)
+ b.data.Resize(arrow.Uint32Traits.BytesRequired(n))
+ b.rawData = arrow.Uint32Traits.CastFromBytes(b.data.Bytes())
+ }
+}
+
+func (b *Uint32Builder) Value(i int) uint32 {
+ return b.rawData[i]
+}
+
+// NewArray creates a Uint32 array from the memory buffers used by the builder and resets the Uint32Builder
+// so it can be used to build a new array.
+func (b *Uint32Builder) NewArray() arrow.Array {
+ return b.NewUint32Array()
+}
+
+// NewUint32Array creates a Uint32 array from the memory buffers used by the builder and resets the Uint32Builder
+// so it can be used to build a new array.
+func (b *Uint32Builder) NewUint32Array() (a *Uint32) {
+ data := b.newData()
+ a = NewUint32Data(data)
+ data.Release()
+ return
+}
+
+func (b *Uint32Builder) newData() (data *Data) {
+ bytesRequired := arrow.Uint32Traits.BytesRequired(b.length)
+ if bytesRequired > 0 && bytesRequired < b.data.Len() {
+ // trim buffers
+ b.data.Resize(bytesRequired)
+ }
+ data = NewData(arrow.PrimitiveTypes.Uint32, b.length, []*memory.Buffer{b.nullBitmap, b.data}, nil, b.nulls, 0)
+ b.reset()
+
+ if b.data != nil {
+ b.data.Release()
+ b.data = nil
+ b.rawData = nil
+ }
+
+ return
+}
+
+func (b *Uint32Builder) AppendValueFromString(s string) error {
+ if s == NullValueStr {
+ b.AppendNull()
+ return nil
+ }
+ v, err := strconv.ParseUint(s, 10, 4*8)
+ if err != nil {
+ b.AppendNull()
+ return err
+ }
+ b.Append(uint32(v))
+ return nil
+}
+
+func (b *Uint32Builder) UnmarshalOne(dec *json.Decoder) error {
+ t, err := dec.Token()
+ if err != nil {
+ return err
+ }
+
+ switch v := t.(type) {
+ case nil:
+ b.AppendNull()
+
+ case string:
+ f, err := strconv.ParseUint(v, 10, 4*8)
+ if err != nil {
+ return &json.UnmarshalTypeError{
+ Value: v,
+ Type: reflect.TypeOf(uint32(0)),
+ Offset: dec.InputOffset(),
+ }
+ }
+ b.Append(uint32(f))
+ case float64:
+ b.Append(uint32(v))
+ case json.Number:
+ f, err := strconv.ParseUint(v.String(), 10, 4*8)
+ if err != nil {
+ return &json.UnmarshalTypeError{
+ Value: v.String(),
+ Type: reflect.TypeOf(uint32(0)),
+ Offset: dec.InputOffset(),
+ }
+ }
+ b.Append(uint32(f))
+
+ default:
+ return &json.UnmarshalTypeError{
+ Value: fmt.Sprint(t),
+ Type: reflect.TypeOf(uint32(0)),
+ Offset: dec.InputOffset(),
+ }
+ }
+
+ return nil
+}
+
+func (b *Uint32Builder) Unmarshal(dec *json.Decoder) error {
+ for dec.More() {
+ if err := b.UnmarshalOne(dec); err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+func (b *Uint32Builder) UnmarshalJSON(data []byte) error {
+ dec := json.NewDecoder(bytes.NewReader(data))
+ t, err := dec.Token()
+ if err != nil {
+ return err
+ }
+
+ if delim, ok := t.(json.Delim); !ok || delim != '[' {
+ return fmt.Errorf("binary builder must unpack from json array, found %s", delim)
+ }
+
+ return b.Unmarshal(dec)
+}
+
+type Float32Builder struct {
+ builder
+
+ data *memory.Buffer
+ rawData []float32
+}
+
+func NewFloat32Builder(mem memory.Allocator) *Float32Builder {
+ return &Float32Builder{builder: builder{refCount: 1, mem: mem}}
+}
+
+func (b *Float32Builder) Type() arrow.DataType { return arrow.PrimitiveTypes.Float32 }
+
+// Release decreases the reference count by 1.
+// When the reference count goes to zero, the memory is freed.
+func (b *Float32Builder) Release() {
+ debug.Assert(atomic.LoadInt64(&b.refCount) > 0, "too many releases")
+
+ if atomic.AddInt64(&b.refCount, -1) == 0 {
+ if b.nullBitmap != nil {
+ b.nullBitmap.Release()
+ b.nullBitmap = nil
+ }
+ if b.data != nil {
+ b.data.Release()
+ b.data = nil
+ b.rawData = nil
+ }
+ }
+}
+
+func (b *Float32Builder) Append(v float32) {
+ b.Reserve(1)
+ b.UnsafeAppend(v)
+}
+
+func (b *Float32Builder) AppendNull() {
+ b.Reserve(1)
+ b.UnsafeAppendBoolToBitmap(false)
+}
+
+func (b *Float32Builder) AppendNulls(n int) {
+ for i := 0; i < n; i++ {
+ b.AppendNull()
+ }
+}
+
+func (b *Float32Builder) AppendEmptyValue() {
+ b.Append(0)
+}
+
+func (b *Float32Builder) AppendEmptyValues(n int) {
+ for i := 0; i < n; i++ {
+ b.AppendEmptyValue()
+ }
+}
+
+func (b *Float32Builder) UnsafeAppend(v float32) {
+ bitutil.SetBit(b.nullBitmap.Bytes(), b.length)
+ b.rawData[b.length] = v
+ b.length++
+}
+
+func (b *Float32Builder) UnsafeAppendBoolToBitmap(isValid bool) {
+ if isValid {
+ bitutil.SetBit(b.nullBitmap.Bytes(), b.length)
+ } else {
+ b.nulls++
+ }
+ b.length++
+}
+
+// AppendValues will append the values in the v slice. The valid slice determines which values
+// in v are valid (not null). The valid slice must either be empty or be equal in length to v. If empty,
+// all values in v are appended and considered valid.
+func (b *Float32Builder) AppendValues(v []float32, valid []bool) {
+ if len(v) != len(valid) && len(valid) != 0 {
+ panic("len(v) != len(valid) && len(valid) != 0")
+ }
+
+ if len(v) == 0 {
+ return
+ }
+
+ b.Reserve(len(v))
+ arrow.Float32Traits.Copy(b.rawData[b.length:], v)
+ b.builder.unsafeAppendBoolsToBitmap(valid, len(v))
+}
+
+func (b *Float32Builder) init(capacity int) {
+ b.builder.init(capacity)
+
+ b.data = memory.NewResizableBuffer(b.mem)
+ bytesN := arrow.Float32Traits.BytesRequired(capacity)
+ b.data.Resize(bytesN)
+ b.rawData = arrow.Float32Traits.CastFromBytes(b.data.Bytes())
+}
+
+// Reserve ensures there is enough space for appending n elements
+// by checking the capacity and calling Resize if necessary.
+func (b *Float32Builder) Reserve(n int) {
+ b.builder.reserve(n, b.Resize)
+}
+
+// Resize adjusts the space allocated by b to n elements. If n is greater than b.Cap(),
+// additional memory will be allocated. If n is smaller, the allocated memory may reduced.
+func (b *Float32Builder) Resize(n int) {
+ nBuilder := n
+ if n < minBuilderCapacity {
+ n = minBuilderCapacity
+ }
+
+ if b.capacity == 0 {
+ b.init(n)
+ } else {
+ b.builder.resize(nBuilder, b.init)
+ b.data.Resize(arrow.Float32Traits.BytesRequired(n))
+ b.rawData = arrow.Float32Traits.CastFromBytes(b.data.Bytes())
+ }
+}
+
+func (b *Float32Builder) Value(i int) float32 {
+ return b.rawData[i]
+}
+
+// NewArray creates a Float32 array from the memory buffers used by the builder and resets the Float32Builder
+// so it can be used to build a new array.
+func (b *Float32Builder) NewArray() arrow.Array {
+ return b.NewFloat32Array()
+}
+
+// NewFloat32Array creates a Float32 array from the memory buffers used by the builder and resets the Float32Builder
+// so it can be used to build a new array.
+func (b *Float32Builder) NewFloat32Array() (a *Float32) {
+ data := b.newData()
+ a = NewFloat32Data(data)
+ data.Release()
+ return
+}
+
+func (b *Float32Builder) newData() (data *Data) {
+ bytesRequired := arrow.Float32Traits.BytesRequired(b.length)
+ if bytesRequired > 0 && bytesRequired < b.data.Len() {
+ // trim buffers
+ b.data.Resize(bytesRequired)
+ }
+ data = NewData(arrow.PrimitiveTypes.Float32, b.length, []*memory.Buffer{b.nullBitmap, b.data}, nil, b.nulls, 0)
+ b.reset()
+
+ if b.data != nil {
+ b.data.Release()
+ b.data = nil
+ b.rawData = nil
+ }
+
+ return
+}
+
+func (b *Float32Builder) AppendValueFromString(s string) error {
+ if s == NullValueStr {
+ b.AppendNull()
+ return nil
+ }
+ v, err := strconv.ParseFloat(s, 4*8)
+ if err != nil {
+ b.AppendNull()
+ return err
+ }
+ b.Append(float32(v))
+ return nil
+}
+
+func (b *Float32Builder) UnmarshalOne(dec *json.Decoder) error {
+ t, err := dec.Token()
+ if err != nil {
+ return err
+ }
+
+ switch v := t.(type) {
+ case nil:
+ b.AppendNull()
+
+ case string:
+ f, err := strconv.ParseFloat(v, 4*8)
+ if err != nil {
+ return &json.UnmarshalTypeError{
+ Value: v,
+ Type: reflect.TypeOf(float32(0)),
+ Offset: dec.InputOffset(),
+ }
+ }
+ b.Append(float32(f))
+ case float64:
+ b.Append(float32(v))
+ case json.Number:
+ f, err := strconv.ParseFloat(v.String(), 4*8)
+ if err != nil {
+ return &json.UnmarshalTypeError{
+ Value: v.String(),
+ Type: reflect.TypeOf(float32(0)),
+ Offset: dec.InputOffset(),
+ }
+ }
+ b.Append(float32(f))
+
+ default:
+ return &json.UnmarshalTypeError{
+ Value: fmt.Sprint(t),
+ Type: reflect.TypeOf(float32(0)),
+ Offset: dec.InputOffset(),
+ }
+ }
+
+ return nil
+}
+
+func (b *Float32Builder) Unmarshal(dec *json.Decoder) error {
+ for dec.More() {
+ if err := b.UnmarshalOne(dec); err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+func (b *Float32Builder) UnmarshalJSON(data []byte) error {
+ dec := json.NewDecoder(bytes.NewReader(data))
+ t, err := dec.Token()
+ if err != nil {
+ return err
+ }
+
+ if delim, ok := t.(json.Delim); !ok || delim != '[' {
+ return fmt.Errorf("binary builder must unpack from json array, found %s", delim)
+ }
+
+ return b.Unmarshal(dec)
+}
+
+type Int16Builder struct {
+ builder
+
+ data *memory.Buffer
+ rawData []int16
+}
+
+func NewInt16Builder(mem memory.Allocator) *Int16Builder {
+ return &Int16Builder{builder: builder{refCount: 1, mem: mem}}
+}
+
+func (b *Int16Builder) Type() arrow.DataType { return arrow.PrimitiveTypes.Int16 }
+
+// Release decreases the reference count by 1.
+// When the reference count goes to zero, the memory is freed.
+func (b *Int16Builder) Release() {
+ debug.Assert(atomic.LoadInt64(&b.refCount) > 0, "too many releases")
+
+ if atomic.AddInt64(&b.refCount, -1) == 0 {
+ if b.nullBitmap != nil {
+ b.nullBitmap.Release()
+ b.nullBitmap = nil
+ }
+ if b.data != nil {
+ b.data.Release()
+ b.data = nil
+ b.rawData = nil
+ }
+ }
+}
+
+func (b *Int16Builder) Append(v int16) {
+ b.Reserve(1)
+ b.UnsafeAppend(v)
+}
+
+func (b *Int16Builder) AppendNull() {
+ b.Reserve(1)
+ b.UnsafeAppendBoolToBitmap(false)
+}
+
+func (b *Int16Builder) AppendNulls(n int) {
+ for i := 0; i < n; i++ {
+ b.AppendNull()
+ }
+}
+
+func (b *Int16Builder) AppendEmptyValue() {
+ b.Append(0)
+}
+
+func (b *Int16Builder) AppendEmptyValues(n int) {
+ for i := 0; i < n; i++ {
+ b.AppendEmptyValue()
+ }
+}
+
+func (b *Int16Builder) UnsafeAppend(v int16) {
+ bitutil.SetBit(b.nullBitmap.Bytes(), b.length)
+ b.rawData[b.length] = v
+ b.length++
+}
+
+func (b *Int16Builder) UnsafeAppendBoolToBitmap(isValid bool) {
+ if isValid {
+ bitutil.SetBit(b.nullBitmap.Bytes(), b.length)
+ } else {
+ b.nulls++
+ }
+ b.length++
+}
+
+// AppendValues will append the values in the v slice. The valid slice determines which values
+// in v are valid (not null). The valid slice must either be empty or be equal in length to v. If empty,
+// all values in v are appended and considered valid.
+func (b *Int16Builder) AppendValues(v []int16, valid []bool) {
+ if len(v) != len(valid) && len(valid) != 0 {
+ panic("len(v) != len(valid) && len(valid) != 0")
+ }
+
+ if len(v) == 0 {
+ return
+ }
+
+ b.Reserve(len(v))
+ arrow.Int16Traits.Copy(b.rawData[b.length:], v)
+ b.builder.unsafeAppendBoolsToBitmap(valid, len(v))
+}
+
+func (b *Int16Builder) init(capacity int) {
+ b.builder.init(capacity)
+
+ b.data = memory.NewResizableBuffer(b.mem)
+ bytesN := arrow.Int16Traits.BytesRequired(capacity)
+ b.data.Resize(bytesN)
+ b.rawData = arrow.Int16Traits.CastFromBytes(b.data.Bytes())
+}
+
+// Reserve ensures there is enough space for appending n elements
+// by checking the capacity and calling Resize if necessary.
+func (b *Int16Builder) Reserve(n int) {
+ b.builder.reserve(n, b.Resize)
+}
+
+// Resize adjusts the space allocated by b to n elements. If n is greater than b.Cap(),
+// additional memory will be allocated. If n is smaller, the allocated memory may reduced.
+func (b *Int16Builder) Resize(n int) {
+ nBuilder := n
+ if n < minBuilderCapacity {
+ n = minBuilderCapacity
+ }
+
+ if b.capacity == 0 {
+ b.init(n)
+ } else {
+ b.builder.resize(nBuilder, b.init)
+ b.data.Resize(arrow.Int16Traits.BytesRequired(n))
+ b.rawData = arrow.Int16Traits.CastFromBytes(b.data.Bytes())
+ }
+}
+
+func (b *Int16Builder) Value(i int) int16 {
+ return b.rawData[i]
+}
+
+// NewArray creates a Int16 array from the memory buffers used by the builder and resets the Int16Builder
+// so it can be used to build a new array.
+func (b *Int16Builder) NewArray() arrow.Array {
+ return b.NewInt16Array()
+}
+
+// NewInt16Array creates a Int16 array from the memory buffers used by the builder and resets the Int16Builder
+// so it can be used to build a new array.
+func (b *Int16Builder) NewInt16Array() (a *Int16) {
+ data := b.newData()
+ a = NewInt16Data(data)
+ data.Release()
+ return
+}
+
+func (b *Int16Builder) newData() (data *Data) {
+ bytesRequired := arrow.Int16Traits.BytesRequired(b.length)
+ if bytesRequired > 0 && bytesRequired < b.data.Len() {
+ // trim buffers
+ b.data.Resize(bytesRequired)
+ }
+ data = NewData(arrow.PrimitiveTypes.Int16, b.length, []*memory.Buffer{b.nullBitmap, b.data}, nil, b.nulls, 0)
+ b.reset()
+
+ if b.data != nil {
+ b.data.Release()
+ b.data = nil
+ b.rawData = nil
+ }
+
+ return
+}
+
+func (b *Int16Builder) AppendValueFromString(s string) error {
+ if s == NullValueStr {
+ b.AppendNull()
+ return nil
+ }
+ v, err := strconv.ParseInt(s, 10, 2*8)
+ if err != nil {
+ b.AppendNull()
+ return err
+ }
+ b.Append(int16(v))
+ return nil
+}
+
+func (b *Int16Builder) UnmarshalOne(dec *json.Decoder) error {
+ t, err := dec.Token()
+ if err != nil {
+ return err
+ }
+
+ switch v := t.(type) {
+ case nil:
+ b.AppendNull()
+
+ case string:
+ f, err := strconv.ParseInt(v, 10, 2*8)
+ if err != nil {
+ return &json.UnmarshalTypeError{
+ Value: v,
+ Type: reflect.TypeOf(int16(0)),
+ Offset: dec.InputOffset(),
+ }
+ }
+ b.Append(int16(f))
+ case float64:
+ b.Append(int16(v))
+ case json.Number:
+ f, err := strconv.ParseInt(v.String(), 10, 2*8)
+ if err != nil {
+ return &json.UnmarshalTypeError{
+ Value: v.String(),
+ Type: reflect.TypeOf(int16(0)),
+ Offset: dec.InputOffset(),
+ }
+ }
+ b.Append(int16(f))
+
+ default:
+ return &json.UnmarshalTypeError{
+ Value: fmt.Sprint(t),
+ Type: reflect.TypeOf(int16(0)),
+ Offset: dec.InputOffset(),
+ }
+ }
+
+ return nil
+}
+
+func (b *Int16Builder) Unmarshal(dec *json.Decoder) error {
+ for dec.More() {
+ if err := b.UnmarshalOne(dec); err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+func (b *Int16Builder) UnmarshalJSON(data []byte) error {
+ dec := json.NewDecoder(bytes.NewReader(data))
+ t, err := dec.Token()
+ if err != nil {
+ return err
+ }
+
+ if delim, ok := t.(json.Delim); !ok || delim != '[' {
+ return fmt.Errorf("binary builder must unpack from json array, found %s", delim)
+ }
+
+ return b.Unmarshal(dec)
+}
+
+type Uint16Builder struct {
+ builder
+
+ data *memory.Buffer
+ rawData []uint16
+}
+
+func NewUint16Builder(mem memory.Allocator) *Uint16Builder {
+ return &Uint16Builder{builder: builder{refCount: 1, mem: mem}}
+}
+
+func (b *Uint16Builder) Type() arrow.DataType { return arrow.PrimitiveTypes.Uint16 }
+
+// Release decreases the reference count by 1.
+// When the reference count goes to zero, the memory is freed.
+func (b *Uint16Builder) Release() {
+ debug.Assert(atomic.LoadInt64(&b.refCount) > 0, "too many releases")
+
+ if atomic.AddInt64(&b.refCount, -1) == 0 {
+ if b.nullBitmap != nil {
+ b.nullBitmap.Release()
+ b.nullBitmap = nil
+ }
+ if b.data != nil {
+ b.data.Release()
+ b.data = nil
+ b.rawData = nil
+ }
+ }
+}
+
+func (b *Uint16Builder) Append(v uint16) {
+ b.Reserve(1)
+ b.UnsafeAppend(v)
+}
+
+func (b *Uint16Builder) AppendNull() {
+ b.Reserve(1)
+ b.UnsafeAppendBoolToBitmap(false)
+}
+
+func (b *Uint16Builder) AppendNulls(n int) {
+ for i := 0; i < n; i++ {
+ b.AppendNull()
+ }
+}
+
+func (b *Uint16Builder) AppendEmptyValue() {
+ b.Append(0)
+}
+
+func (b *Uint16Builder) AppendEmptyValues(n int) {
+ for i := 0; i < n; i++ {
+ b.AppendEmptyValue()
+ }
+}
+
+func (b *Uint16Builder) UnsafeAppend(v uint16) {
+ bitutil.SetBit(b.nullBitmap.Bytes(), b.length)
+ b.rawData[b.length] = v
+ b.length++
+}
+
+func (b *Uint16Builder) UnsafeAppendBoolToBitmap(isValid bool) {
+ if isValid {
+ bitutil.SetBit(b.nullBitmap.Bytes(), b.length)
+ } else {
+ b.nulls++
+ }
+ b.length++
+}
+
+// AppendValues will append the values in the v slice. The valid slice determines which values
+// in v are valid (not null). The valid slice must either be empty or be equal in length to v. If empty,
+// all values in v are appended and considered valid.
+func (b *Uint16Builder) AppendValues(v []uint16, valid []bool) {
+ if len(v) != len(valid) && len(valid) != 0 {
+ panic("len(v) != len(valid) && len(valid) != 0")
+ }
+
+ if len(v) == 0 {
+ return
+ }
+
+ b.Reserve(len(v))
+ arrow.Uint16Traits.Copy(b.rawData[b.length:], v)
+ b.builder.unsafeAppendBoolsToBitmap(valid, len(v))
+}
+
+func (b *Uint16Builder) init(capacity int) {
+ b.builder.init(capacity)
+
+ b.data = memory.NewResizableBuffer(b.mem)
+ bytesN := arrow.Uint16Traits.BytesRequired(capacity)
+ b.data.Resize(bytesN)
+ b.rawData = arrow.Uint16Traits.CastFromBytes(b.data.Bytes())
+}
+
+// Reserve ensures there is enough space for appending n elements
+// by checking the capacity and calling Resize if necessary.
+func (b *Uint16Builder) Reserve(n int) {
+ b.builder.reserve(n, b.Resize)
+}
+
+// Resize adjusts the space allocated by b to n elements. If n is greater than b.Cap(),
+// additional memory will be allocated. If n is smaller, the allocated memory may reduced.
+func (b *Uint16Builder) Resize(n int) {
+ nBuilder := n
+ if n < minBuilderCapacity {
+ n = minBuilderCapacity
+ }
+
+ if b.capacity == 0 {
+ b.init(n)
+ } else {
+ b.builder.resize(nBuilder, b.init)
+ b.data.Resize(arrow.Uint16Traits.BytesRequired(n))
+ b.rawData = arrow.Uint16Traits.CastFromBytes(b.data.Bytes())
+ }
+}
+
+func (b *Uint16Builder) Value(i int) uint16 {
+ return b.rawData[i]
+}
+
+// NewArray creates a Uint16 array from the memory buffers used by the builder and resets the Uint16Builder
+// so it can be used to build a new array.
+func (b *Uint16Builder) NewArray() arrow.Array {
+ return b.NewUint16Array()
+}
+
+// NewUint16Array creates a Uint16 array from the memory buffers used by the builder and resets the Uint16Builder
+// so it can be used to build a new array.
+func (b *Uint16Builder) NewUint16Array() (a *Uint16) {
+ data := b.newData()
+ a = NewUint16Data(data)
+ data.Release()
+ return
+}
+
+func (b *Uint16Builder) newData() (data *Data) {
+ bytesRequired := arrow.Uint16Traits.BytesRequired(b.length)
+ if bytesRequired > 0 && bytesRequired < b.data.Len() {
+ // trim buffers
+ b.data.Resize(bytesRequired)
+ }
+ data = NewData(arrow.PrimitiveTypes.Uint16, b.length, []*memory.Buffer{b.nullBitmap, b.data}, nil, b.nulls, 0)
+ b.reset()
+
+ if b.data != nil {
+ b.data.Release()
+ b.data = nil
+ b.rawData = nil
+ }
+
+ return
+}
+
+func (b *Uint16Builder) AppendValueFromString(s string) error {
+ if s == NullValueStr {
+ b.AppendNull()
+ return nil
+ }
+ v, err := strconv.ParseUint(s, 10, 2*8)
+ if err != nil {
+ b.AppendNull()
+ return err
+ }
+ b.Append(uint16(v))
+ return nil
+}
+
+func (b *Uint16Builder) UnmarshalOne(dec *json.Decoder) error {
+ t, err := dec.Token()
+ if err != nil {
+ return err
+ }
+
+ switch v := t.(type) {
+ case nil:
+ b.AppendNull()
+
+ case string:
+ f, err := strconv.ParseUint(v, 10, 2*8)
+ if err != nil {
+ return &json.UnmarshalTypeError{
+ Value: v,
+ Type: reflect.TypeOf(uint16(0)),
+ Offset: dec.InputOffset(),
+ }
+ }
+ b.Append(uint16(f))
+ case float64:
+ b.Append(uint16(v))
+ case json.Number:
+ f, err := strconv.ParseUint(v.String(), 10, 2*8)
+ if err != nil {
+ return &json.UnmarshalTypeError{
+ Value: v.String(),
+ Type: reflect.TypeOf(uint16(0)),
+ Offset: dec.InputOffset(),
+ }
+ }
+ b.Append(uint16(f))
+
+ default:
+ return &json.UnmarshalTypeError{
+ Value: fmt.Sprint(t),
+ Type: reflect.TypeOf(uint16(0)),
+ Offset: dec.InputOffset(),
+ }
+ }
+
+ return nil
+}
+
+func (b *Uint16Builder) Unmarshal(dec *json.Decoder) error {
+ for dec.More() {
+ if err := b.UnmarshalOne(dec); err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+func (b *Uint16Builder) UnmarshalJSON(data []byte) error {
+ dec := json.NewDecoder(bytes.NewReader(data))
+ t, err := dec.Token()
+ if err != nil {
+ return err
+ }
+
+ if delim, ok := t.(json.Delim); !ok || delim != '[' {
+ return fmt.Errorf("binary builder must unpack from json array, found %s", delim)
+ }
+
+ return b.Unmarshal(dec)
+}
+
+type Int8Builder struct {
+ builder
+
+ data *memory.Buffer
+ rawData []int8
+}
+
+func NewInt8Builder(mem memory.Allocator) *Int8Builder {
+ return &Int8Builder{builder: builder{refCount: 1, mem: mem}}
+}
+
+func (b *Int8Builder) Type() arrow.DataType { return arrow.PrimitiveTypes.Int8 }
+
+// Release decreases the reference count by 1.
+// When the reference count goes to zero, the memory is freed.
+func (b *Int8Builder) Release() {
+ debug.Assert(atomic.LoadInt64(&b.refCount) > 0, "too many releases")
+
+ if atomic.AddInt64(&b.refCount, -1) == 0 {
+ if b.nullBitmap != nil {
+ b.nullBitmap.Release()
+ b.nullBitmap = nil
+ }
+ if b.data != nil {
+ b.data.Release()
+ b.data = nil
+ b.rawData = nil
+ }
+ }
+}
+
+func (b *Int8Builder) Append(v int8) {
+ b.Reserve(1)
+ b.UnsafeAppend(v)
+}
+
+func (b *Int8Builder) AppendNull() {
+ b.Reserve(1)
+ b.UnsafeAppendBoolToBitmap(false)
+}
+
+func (b *Int8Builder) AppendNulls(n int) {
+ for i := 0; i < n; i++ {
+ b.AppendNull()
+ }
+}
+
+func (b *Int8Builder) AppendEmptyValue() {
+ b.Append(0)
+}
+
+func (b *Int8Builder) AppendEmptyValues(n int) {
+ for i := 0; i < n; i++ {
+ b.AppendEmptyValue()
+ }
+}
+
+func (b *Int8Builder) UnsafeAppend(v int8) {
+ bitutil.SetBit(b.nullBitmap.Bytes(), b.length)
+ b.rawData[b.length] = v
+ b.length++
+}
+
+func (b *Int8Builder) UnsafeAppendBoolToBitmap(isValid bool) {
+ if isValid {
+ bitutil.SetBit(b.nullBitmap.Bytes(), b.length)
+ } else {
+ b.nulls++
+ }
+ b.length++
+}
+
+// AppendValues will append the values in the v slice. The valid slice determines which values
+// in v are valid (not null). The valid slice must either be empty or be equal in length to v. If empty,
+// all values in v are appended and considered valid.
+func (b *Int8Builder) AppendValues(v []int8, valid []bool) {
+ if len(v) != len(valid) && len(valid) != 0 {
+ panic("len(v) != len(valid) && len(valid) != 0")
+ }
+
+ if len(v) == 0 {
+ return
+ }
+
+ b.Reserve(len(v))
+ arrow.Int8Traits.Copy(b.rawData[b.length:], v)
+ b.builder.unsafeAppendBoolsToBitmap(valid, len(v))
+}
+
+func (b *Int8Builder) init(capacity int) {
+ b.builder.init(capacity)
+
+ b.data = memory.NewResizableBuffer(b.mem)
+ bytesN := arrow.Int8Traits.BytesRequired(capacity)
+ b.data.Resize(bytesN)
+ b.rawData = arrow.Int8Traits.CastFromBytes(b.data.Bytes())
+}
+
+// Reserve ensures there is enough space for appending n elements
+// by checking the capacity and calling Resize if necessary.
+func (b *Int8Builder) Reserve(n int) {
+ b.builder.reserve(n, b.Resize)
+}
+
+// Resize adjusts the space allocated by b to n elements. If n is greater than b.Cap(),
+// additional memory will be allocated. If n is smaller, the allocated memory may reduced.
+func (b *Int8Builder) Resize(n int) {
+ nBuilder := n
+ if n < minBuilderCapacity {
+ n = minBuilderCapacity
+ }
+
+ if b.capacity == 0 {
+ b.init(n)
+ } else {
+ b.builder.resize(nBuilder, b.init)
+ b.data.Resize(arrow.Int8Traits.BytesRequired(n))
+ b.rawData = arrow.Int8Traits.CastFromBytes(b.data.Bytes())
+ }
+}
+
+func (b *Int8Builder) Value(i int) int8 {
+ return b.rawData[i]
+}
+
+// NewArray creates a Int8 array from the memory buffers used by the builder and resets the Int8Builder
+// so it can be used to build a new array.
+func (b *Int8Builder) NewArray() arrow.Array {
+ return b.NewInt8Array()
+}
+
+// NewInt8Array creates a Int8 array from the memory buffers used by the builder and resets the Int8Builder
+// so it can be used to build a new array.
+func (b *Int8Builder) NewInt8Array() (a *Int8) {
+ data := b.newData()
+ a = NewInt8Data(data)
+ data.Release()
+ return
+}
+
+func (b *Int8Builder) newData() (data *Data) {
+ bytesRequired := arrow.Int8Traits.BytesRequired(b.length)
+ if bytesRequired > 0 && bytesRequired < b.data.Len() {
+ // trim buffers
+ b.data.Resize(bytesRequired)
+ }
+ data = NewData(arrow.PrimitiveTypes.Int8, b.length, []*memory.Buffer{b.nullBitmap, b.data}, nil, b.nulls, 0)
+ b.reset()
+
+ if b.data != nil {
+ b.data.Release()
+ b.data = nil
+ b.rawData = nil
+ }
+
+ return
+}
+
+func (b *Int8Builder) AppendValueFromString(s string) error {
+ if s == NullValueStr {
+ b.AppendNull()
+ return nil
+ }
+ v, err := strconv.ParseInt(s, 10, 1*8)
+ if err != nil {
+ b.AppendNull()
+ return err
+ }
+ b.Append(int8(v))
+ return nil
+}
+
+func (b *Int8Builder) UnmarshalOne(dec *json.Decoder) error {
+ t, err := dec.Token()
+ if err != nil {
+ return err
+ }
+
+ switch v := t.(type) {
+ case nil:
+ b.AppendNull()
+
+ case string:
+ f, err := strconv.ParseInt(v, 10, 1*8)
+ if err != nil {
+ return &json.UnmarshalTypeError{
+ Value: v,
+ Type: reflect.TypeOf(int8(0)),
+ Offset: dec.InputOffset(),
+ }
+ }
+ b.Append(int8(f))
+ case float64:
+ b.Append(int8(v))
+ case json.Number:
+ f, err := strconv.ParseInt(v.String(), 10, 1*8)
+ if err != nil {
+ return &json.UnmarshalTypeError{
+ Value: v.String(),
+ Type: reflect.TypeOf(int8(0)),
+ Offset: dec.InputOffset(),
+ }
+ }
+ b.Append(int8(f))
+
+ default:
+ return &json.UnmarshalTypeError{
+ Value: fmt.Sprint(t),
+ Type: reflect.TypeOf(int8(0)),
+ Offset: dec.InputOffset(),
+ }
+ }
+
+ return nil
+}
+
+func (b *Int8Builder) Unmarshal(dec *json.Decoder) error {
+ for dec.More() {
+ if err := b.UnmarshalOne(dec); err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+func (b *Int8Builder) UnmarshalJSON(data []byte) error {
+ dec := json.NewDecoder(bytes.NewReader(data))
+ t, err := dec.Token()
+ if err != nil {
+ return err
+ }
+
+ if delim, ok := t.(json.Delim); !ok || delim != '[' {
+ return fmt.Errorf("binary builder must unpack from json array, found %s", delim)
+ }
+
+ return b.Unmarshal(dec)
+}
+
+type Uint8Builder struct {
+ builder
+
+ data *memory.Buffer
+ rawData []uint8
+}
+
+func NewUint8Builder(mem memory.Allocator) *Uint8Builder {
+ return &Uint8Builder{builder: builder{refCount: 1, mem: mem}}
+}
+
+func (b *Uint8Builder) Type() arrow.DataType { return arrow.PrimitiveTypes.Uint8 }
+
+// Release decreases the reference count by 1.
+// When the reference count goes to zero, the memory is freed.
+func (b *Uint8Builder) Release() {
+ debug.Assert(atomic.LoadInt64(&b.refCount) > 0, "too many releases")
+
+ if atomic.AddInt64(&b.refCount, -1) == 0 {
+ if b.nullBitmap != nil {
+ b.nullBitmap.Release()
+ b.nullBitmap = nil
+ }
+ if b.data != nil {
+ b.data.Release()
+ b.data = nil
+ b.rawData = nil
+ }
+ }
+}
+
+func (b *Uint8Builder) Append(v uint8) {
+ b.Reserve(1)
+ b.UnsafeAppend(v)
+}
+
+func (b *Uint8Builder) AppendNull() {
+ b.Reserve(1)
+ b.UnsafeAppendBoolToBitmap(false)
+}
+
+func (b *Uint8Builder) AppendNulls(n int) {
+ for i := 0; i < n; i++ {
+ b.AppendNull()
+ }
+}
+
+func (b *Uint8Builder) AppendEmptyValue() {
+ b.Append(0)
+}
+
+func (b *Uint8Builder) AppendEmptyValues(n int) {
+ for i := 0; i < n; i++ {
+ b.AppendEmptyValue()
+ }
+}
+
+func (b *Uint8Builder) UnsafeAppend(v uint8) {
+ bitutil.SetBit(b.nullBitmap.Bytes(), b.length)
+ b.rawData[b.length] = v
+ b.length++
+}
+
+func (b *Uint8Builder) UnsafeAppendBoolToBitmap(isValid bool) {
+ if isValid {
+ bitutil.SetBit(b.nullBitmap.Bytes(), b.length)
+ } else {
+ b.nulls++
+ }
+ b.length++
+}
+
+// AppendValues will append the values in the v slice. The valid slice determines which values
+// in v are valid (not null). The valid slice must either be empty or be equal in length to v. If empty,
+// all values in v are appended and considered valid.
+func (b *Uint8Builder) AppendValues(v []uint8, valid []bool) {
+ if len(v) != len(valid) && len(valid) != 0 {
+ panic("len(v) != len(valid) && len(valid) != 0")
+ }
+
+ if len(v) == 0 {
+ return
+ }
+
+ b.Reserve(len(v))
+ arrow.Uint8Traits.Copy(b.rawData[b.length:], v)
+ b.builder.unsafeAppendBoolsToBitmap(valid, len(v))
+}
+
+func (b *Uint8Builder) init(capacity int) {
+ b.builder.init(capacity)
+
+ b.data = memory.NewResizableBuffer(b.mem)
+ bytesN := arrow.Uint8Traits.BytesRequired(capacity)
+ b.data.Resize(bytesN)
+ b.rawData = arrow.Uint8Traits.CastFromBytes(b.data.Bytes())
+}
+
+// Reserve ensures there is enough space for appending n elements
+// by checking the capacity and calling Resize if necessary.
+func (b *Uint8Builder) Reserve(n int) {
+ b.builder.reserve(n, b.Resize)
+}
+
+// Resize adjusts the space allocated by b to n elements. If n is greater than b.Cap(),
+// additional memory will be allocated. If n is smaller, the allocated memory may reduced.
+func (b *Uint8Builder) Resize(n int) {
+ nBuilder := n
+ if n < minBuilderCapacity {
+ n = minBuilderCapacity
+ }
+
+ if b.capacity == 0 {
+ b.init(n)
+ } else {
+ b.builder.resize(nBuilder, b.init)
+ b.data.Resize(arrow.Uint8Traits.BytesRequired(n))
+ b.rawData = arrow.Uint8Traits.CastFromBytes(b.data.Bytes())
+ }
+}
+
+func (b *Uint8Builder) Value(i int) uint8 {
+ return b.rawData[i]
+}
+
+// NewArray creates a Uint8 array from the memory buffers used by the builder and resets the Uint8Builder
+// so it can be used to build a new array.
+func (b *Uint8Builder) NewArray() arrow.Array {
+ return b.NewUint8Array()
+}
+
+// NewUint8Array creates a Uint8 array from the memory buffers used by the builder and resets the Uint8Builder
+// so it can be used to build a new array.
+func (b *Uint8Builder) NewUint8Array() (a *Uint8) {
+ data := b.newData()
+ a = NewUint8Data(data)
+ data.Release()
+ return
+}
+
+func (b *Uint8Builder) newData() (data *Data) {
+ bytesRequired := arrow.Uint8Traits.BytesRequired(b.length)
+ if bytesRequired > 0 && bytesRequired < b.data.Len() {
+ // trim buffers
+ b.data.Resize(bytesRequired)
+ }
+ data = NewData(arrow.PrimitiveTypes.Uint8, b.length, []*memory.Buffer{b.nullBitmap, b.data}, nil, b.nulls, 0)
+ b.reset()
+
+ if b.data != nil {
+ b.data.Release()
+ b.data = nil
+ b.rawData = nil
+ }
+
+ return
+}
+
+func (b *Uint8Builder) AppendValueFromString(s string) error {
+ if s == NullValueStr {
+ b.AppendNull()
+ return nil
+ }
+ v, err := strconv.ParseUint(s, 10, 1*8)
+ if err != nil {
+ b.AppendNull()
+ return err
+ }
+ b.Append(uint8(v))
+ return nil
+}
+
+func (b *Uint8Builder) UnmarshalOne(dec *json.Decoder) error {
+ t, err := dec.Token()
+ if err != nil {
+ return err
+ }
+
+ switch v := t.(type) {
+ case nil:
+ b.AppendNull()
+
+ case string:
+ f, err := strconv.ParseUint(v, 10, 1*8)
+ if err != nil {
+ return &json.UnmarshalTypeError{
+ Value: v,
+ Type: reflect.TypeOf(uint8(0)),
+ Offset: dec.InputOffset(),
+ }
+ }
+ b.Append(uint8(f))
+ case float64:
+ b.Append(uint8(v))
+ case json.Number:
+ f, err := strconv.ParseUint(v.String(), 10, 1*8)
+ if err != nil {
+ return &json.UnmarshalTypeError{
+ Value: v.String(),
+ Type: reflect.TypeOf(uint8(0)),
+ Offset: dec.InputOffset(),
+ }
+ }
+ b.Append(uint8(f))
+
+ default:
+ return &json.UnmarshalTypeError{
+ Value: fmt.Sprint(t),
+ Type: reflect.TypeOf(uint8(0)),
+ Offset: dec.InputOffset(),
+ }
+ }
+
+ return nil
+}
+
+func (b *Uint8Builder) Unmarshal(dec *json.Decoder) error {
+ for dec.More() {
+ if err := b.UnmarshalOne(dec); err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+func (b *Uint8Builder) UnmarshalJSON(data []byte) error {
+ dec := json.NewDecoder(bytes.NewReader(data))
+ t, err := dec.Token()
+ if err != nil {
+ return err
+ }
+
+ if delim, ok := t.(json.Delim); !ok || delim != '[' {
+ return fmt.Errorf("binary builder must unpack from json array, found %s", delim)
+ }
+
+ return b.Unmarshal(dec)
+}
+
+type Time32Builder struct {
+ builder
+
+ dtype *arrow.Time32Type
+ data *memory.Buffer
+ rawData []arrow.Time32
+}
+
+func NewTime32Builder(mem memory.Allocator, dtype *arrow.Time32Type) *Time32Builder {
+ return &Time32Builder{builder: builder{refCount: 1, mem: mem}, dtype: dtype}
+}
+
+func (b *Time32Builder) Type() arrow.DataType { return b.dtype }
+
+// Release decreases the reference count by 1.
+// When the reference count goes to zero, the memory is freed.
+func (b *Time32Builder) Release() {
+ debug.Assert(atomic.LoadInt64(&b.refCount) > 0, "too many releases")
+
+ if atomic.AddInt64(&b.refCount, -1) == 0 {
+ if b.nullBitmap != nil {
+ b.nullBitmap.Release()
+ b.nullBitmap = nil
+ }
+ if b.data != nil {
+ b.data.Release()
+ b.data = nil
+ b.rawData = nil
+ }
+ }
+}
+
+func (b *Time32Builder) Append(v arrow.Time32) {
+ b.Reserve(1)
+ b.UnsafeAppend(v)
+}
+
+func (b *Time32Builder) AppendNull() {
+ b.Reserve(1)
+ b.UnsafeAppendBoolToBitmap(false)
+}
+
+func (b *Time32Builder) AppendNulls(n int) {
+ for i := 0; i < n; i++ {
+ b.AppendNull()
+ }
+}
+
+func (b *Time32Builder) AppendEmptyValue() {
+ b.Append(0)
+}
+
+func (b *Time32Builder) AppendEmptyValues(n int) {
+ for i := 0; i < n; i++ {
+ b.AppendEmptyValue()
+ }
+}
+
+func (b *Time32Builder) UnsafeAppend(v arrow.Time32) {
+ bitutil.SetBit(b.nullBitmap.Bytes(), b.length)
+ b.rawData[b.length] = v
+ b.length++
+}
+
+func (b *Time32Builder) UnsafeAppendBoolToBitmap(isValid bool) {
+ if isValid {
+ bitutil.SetBit(b.nullBitmap.Bytes(), b.length)
+ } else {
+ b.nulls++
+ }
+ b.length++
+}
+
+// AppendValues will append the values in the v slice. The valid slice determines which values
+// in v are valid (not null). The valid slice must either be empty or be equal in length to v. If empty,
+// all values in v are appended and considered valid.
+func (b *Time32Builder) AppendValues(v []arrow.Time32, valid []bool) {
+ if len(v) != len(valid) && len(valid) != 0 {
+ panic("len(v) != len(valid) && len(valid) != 0")
+ }
+
+ if len(v) == 0 {
+ return
+ }
+
+ b.Reserve(len(v))
+ arrow.Time32Traits.Copy(b.rawData[b.length:], v)
+ b.builder.unsafeAppendBoolsToBitmap(valid, len(v))
+}
+
+func (b *Time32Builder) init(capacity int) {
+ b.builder.init(capacity)
+
+ b.data = memory.NewResizableBuffer(b.mem)
+ bytesN := arrow.Time32Traits.BytesRequired(capacity)
+ b.data.Resize(bytesN)
+ b.rawData = arrow.Time32Traits.CastFromBytes(b.data.Bytes())
+}
+
+// Reserve ensures there is enough space for appending n elements
+// by checking the capacity and calling Resize if necessary.
+func (b *Time32Builder) Reserve(n int) {
+ b.builder.reserve(n, b.Resize)
+}
+
+// Resize adjusts the space allocated by b to n elements. If n is greater than b.Cap(),
+// additional memory will be allocated. If n is smaller, the allocated memory may reduced.
+func (b *Time32Builder) Resize(n int) {
+ nBuilder := n
+ if n < minBuilderCapacity {
+ n = minBuilderCapacity
+ }
+
+ if b.capacity == 0 {
+ b.init(n)
+ } else {
+ b.builder.resize(nBuilder, b.init)
+ b.data.Resize(arrow.Time32Traits.BytesRequired(n))
+ b.rawData = arrow.Time32Traits.CastFromBytes(b.data.Bytes())
+ }
+}
+
+func (b *Time32Builder) Value(i int) arrow.Time32 {
+ return b.rawData[i]
+}
+
+// NewArray creates a Time32 array from the memory buffers used by the builder and resets the Time32Builder
+// so it can be used to build a new array.
+func (b *Time32Builder) NewArray() arrow.Array {
+ return b.NewTime32Array()
+}
+
+// NewTime32Array creates a Time32 array from the memory buffers used by the builder and resets the Time32Builder
+// so it can be used to build a new array.
+func (b *Time32Builder) NewTime32Array() (a *Time32) {
+ data := b.newData()
+ a = NewTime32Data(data)
+ data.Release()
+ return
+}
+
+func (b *Time32Builder) newData() (data *Data) {
+ bytesRequired := arrow.Time32Traits.BytesRequired(b.length)
+ if bytesRequired > 0 && bytesRequired < b.data.Len() {
+ // trim buffers
+ b.data.Resize(bytesRequired)
+ }
+ data = NewData(b.dtype, b.length, []*memory.Buffer{b.nullBitmap, b.data}, nil, b.nulls, 0)
+ b.reset()
+
+ if b.data != nil {
+ b.data.Release()
+ b.data = nil
+ b.rawData = nil
+ }
+
+ return
+}
+
+func (b *Time32Builder) AppendValueFromString(s string) error {
+ if s == NullValueStr {
+ b.AppendNull()
+ return nil
+ }
+ val, err := arrow.Time32FromString(s, b.dtype.Unit)
+ if err != nil {
+ b.AppendNull()
+ return err
+ }
+ b.Append(val)
+ return nil
+}
+
+func (b *Time32Builder) UnmarshalOne(dec *json.Decoder) error {
+ t, err := dec.Token()
+ if err != nil {
+ return err
+ }
+
+ switch v := t.(type) {
+ case nil:
+ b.AppendNull()
+ case string:
+ tm, err := arrow.Time32FromString(v, b.dtype.Unit)
+ if err != nil {
+ return &json.UnmarshalTypeError{
+ Value: v,
+ Type: reflect.TypeOf(arrow.Time32(0)),
+ Offset: dec.InputOffset(),
+ }
+ }
+
+ b.Append(tm)
+ case json.Number:
+ n, err := v.Int64()
+ if err != nil {
+ return &json.UnmarshalTypeError{
+ Value: v.String(),
+ Type: reflect.TypeOf(arrow.Time32(0)),
+ Offset: dec.InputOffset(),
+ }
+ }
+ b.Append(arrow.Time32(n))
+ case float64:
+ b.Append(arrow.Time32(v))
+
+ default:
+ return &json.UnmarshalTypeError{
+ Value: fmt.Sprint(t),
+ Type: reflect.TypeOf(arrow.Time32(0)),
+ Offset: dec.InputOffset(),
+ }
+ }
+
+ return nil
+}
+
+func (b *Time32Builder) Unmarshal(dec *json.Decoder) error {
+ for dec.More() {
+ if err := b.UnmarshalOne(dec); err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+func (b *Time32Builder) UnmarshalJSON(data []byte) error {
+ dec := json.NewDecoder(bytes.NewReader(data))
+ t, err := dec.Token()
+ if err != nil {
+ return err
+ }
+
+ if delim, ok := t.(json.Delim); !ok || delim != '[' {
+ return fmt.Errorf("binary builder must unpack from json array, found %s", delim)
+ }
+
+ return b.Unmarshal(dec)
+}
+
+type Time64Builder struct {
+ builder
+
+ dtype *arrow.Time64Type
+ data *memory.Buffer
+ rawData []arrow.Time64
+}
+
+func NewTime64Builder(mem memory.Allocator, dtype *arrow.Time64Type) *Time64Builder {
+ return &Time64Builder{builder: builder{refCount: 1, mem: mem}, dtype: dtype}
+}
+
+func (b *Time64Builder) Type() arrow.DataType { return b.dtype }
+
+// Release decreases the reference count by 1.
+// When the reference count goes to zero, the memory is freed.
+func (b *Time64Builder) Release() {
+ debug.Assert(atomic.LoadInt64(&b.refCount) > 0, "too many releases")
+
+ if atomic.AddInt64(&b.refCount, -1) == 0 {
+ if b.nullBitmap != nil {
+ b.nullBitmap.Release()
+ b.nullBitmap = nil
+ }
+ if b.data != nil {
+ b.data.Release()
+ b.data = nil
+ b.rawData = nil
+ }
+ }
+}
+
+func (b *Time64Builder) Append(v arrow.Time64) {
+ b.Reserve(1)
+ b.UnsafeAppend(v)
+}
+
+func (b *Time64Builder) AppendNull() {
+ b.Reserve(1)
+ b.UnsafeAppendBoolToBitmap(false)
+}
+
+func (b *Time64Builder) AppendNulls(n int) {
+ for i := 0; i < n; i++ {
+ b.AppendNull()
+ }
+}
+
+func (b *Time64Builder) AppendEmptyValue() {
+ b.Append(0)
+}
+
+func (b *Time64Builder) AppendEmptyValues(n int) {
+ for i := 0; i < n; i++ {
+ b.AppendEmptyValue()
+ }
+}
+
+func (b *Time64Builder) UnsafeAppend(v arrow.Time64) {
+ bitutil.SetBit(b.nullBitmap.Bytes(), b.length)
+ b.rawData[b.length] = v
+ b.length++
+}
+
+func (b *Time64Builder) UnsafeAppendBoolToBitmap(isValid bool) {
+ if isValid {
+ bitutil.SetBit(b.nullBitmap.Bytes(), b.length)
+ } else {
+ b.nulls++
+ }
+ b.length++
+}
+
+// AppendValues will append the values in the v slice. The valid slice determines which values
+// in v are valid (not null). The valid slice must either be empty or be equal in length to v. If empty,
+// all values in v are appended and considered valid.
+func (b *Time64Builder) AppendValues(v []arrow.Time64, valid []bool) {
+ if len(v) != len(valid) && len(valid) != 0 {
+ panic("len(v) != len(valid) && len(valid) != 0")
+ }
+
+ if len(v) == 0 {
+ return
+ }
+
+ b.Reserve(len(v))
+ arrow.Time64Traits.Copy(b.rawData[b.length:], v)
+ b.builder.unsafeAppendBoolsToBitmap(valid, len(v))
+}
+
+func (b *Time64Builder) init(capacity int) {
+ b.builder.init(capacity)
+
+ b.data = memory.NewResizableBuffer(b.mem)
+ bytesN := arrow.Time64Traits.BytesRequired(capacity)
+ b.data.Resize(bytesN)
+ b.rawData = arrow.Time64Traits.CastFromBytes(b.data.Bytes())
+}
+
+// Reserve ensures there is enough space for appending n elements
+// by checking the capacity and calling Resize if necessary.
+func (b *Time64Builder) Reserve(n int) {
+ b.builder.reserve(n, b.Resize)
+}
+
+// Resize adjusts the space allocated by b to n elements. If n is greater than b.Cap(),
+// additional memory will be allocated. If n is smaller, the allocated memory may reduced.
+func (b *Time64Builder) Resize(n int) {
+ nBuilder := n
+ if n < minBuilderCapacity {
+ n = minBuilderCapacity
+ }
+
+ if b.capacity == 0 {
+ b.init(n)
+ } else {
+ b.builder.resize(nBuilder, b.init)
+ b.data.Resize(arrow.Time64Traits.BytesRequired(n))
+ b.rawData = arrow.Time64Traits.CastFromBytes(b.data.Bytes())
+ }
+}
+
+func (b *Time64Builder) Value(i int) arrow.Time64 {
+ return b.rawData[i]
+}
+
+// NewArray creates a Time64 array from the memory buffers used by the builder and resets the Time64Builder
+// so it can be used to build a new array.
+func (b *Time64Builder) NewArray() arrow.Array {
+ return b.NewTime64Array()
+}
+
+// NewTime64Array creates a Time64 array from the memory buffers used by the builder and resets the Time64Builder
+// so it can be used to build a new array.
+func (b *Time64Builder) NewTime64Array() (a *Time64) {
+ data := b.newData()
+ a = NewTime64Data(data)
+ data.Release()
+ return
+}
+
+func (b *Time64Builder) newData() (data *Data) {
+ bytesRequired := arrow.Time64Traits.BytesRequired(b.length)
+ if bytesRequired > 0 && bytesRequired < b.data.Len() {
+ // trim buffers
+ b.data.Resize(bytesRequired)
+ }
+ data = NewData(b.dtype, b.length, []*memory.Buffer{b.nullBitmap, b.data}, nil, b.nulls, 0)
+ b.reset()
+
+ if b.data != nil {
+ b.data.Release()
+ b.data = nil
+ b.rawData = nil
+ }
+
+ return
+}
+
+func (b *Time64Builder) AppendValueFromString(s string) error {
+ if s == NullValueStr {
+ b.AppendNull()
+ return nil
+ }
+ val, err := arrow.Time64FromString(s, b.dtype.Unit)
+ if err != nil {
+ b.AppendNull()
+ return err
+ }
+ b.Append(val)
+ return nil
+}
+
+func (b *Time64Builder) UnmarshalOne(dec *json.Decoder) error {
+ t, err := dec.Token()
+ if err != nil {
+ return err
+ }
+
+ switch v := t.(type) {
+ case nil:
+ b.AppendNull()
+ case string:
+ tm, err := arrow.Time64FromString(v, b.dtype.Unit)
+ if err != nil {
+ return &json.UnmarshalTypeError{
+ Value: v,
+ Type: reflect.TypeOf(arrow.Time64(0)),
+ Offset: dec.InputOffset(),
+ }
+ }
+
+ b.Append(tm)
+ case json.Number:
+ n, err := v.Int64()
+ if err != nil {
+ return &json.UnmarshalTypeError{
+ Value: v.String(),
+ Type: reflect.TypeOf(arrow.Time64(0)),
+ Offset: dec.InputOffset(),
+ }
+ }
+ b.Append(arrow.Time64(n))
+ case float64:
+ b.Append(arrow.Time64(v))
+
+ default:
+ return &json.UnmarshalTypeError{
+ Value: fmt.Sprint(t),
+ Type: reflect.TypeOf(arrow.Time64(0)),
+ Offset: dec.InputOffset(),
+ }
+ }
+
+ return nil
+}
+
+func (b *Time64Builder) Unmarshal(dec *json.Decoder) error {
+ for dec.More() {
+ if err := b.UnmarshalOne(dec); err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+func (b *Time64Builder) UnmarshalJSON(data []byte) error {
+ dec := json.NewDecoder(bytes.NewReader(data))
+ t, err := dec.Token()
+ if err != nil {
+ return err
+ }
+
+ if delim, ok := t.(json.Delim); !ok || delim != '[' {
+ return fmt.Errorf("binary builder must unpack from json array, found %s", delim)
+ }
+
+ return b.Unmarshal(dec)
+}
+
+type Date32Builder struct {
+ builder
+
+ data *memory.Buffer
+ rawData []arrow.Date32
+}
+
+func NewDate32Builder(mem memory.Allocator) *Date32Builder {
+ return &Date32Builder{builder: builder{refCount: 1, mem: mem}}
+}
+
+func (b *Date32Builder) Type() arrow.DataType { return arrow.PrimitiveTypes.Date32 }
+
+// Release decreases the reference count by 1.
+// When the reference count goes to zero, the memory is freed.
+func (b *Date32Builder) Release() {
+ debug.Assert(atomic.LoadInt64(&b.refCount) > 0, "too many releases")
+
+ if atomic.AddInt64(&b.refCount, -1) == 0 {
+ if b.nullBitmap != nil {
+ b.nullBitmap.Release()
+ b.nullBitmap = nil
+ }
+ if b.data != nil {
+ b.data.Release()
+ b.data = nil
+ b.rawData = nil
+ }
+ }
+}
+
+func (b *Date32Builder) Append(v arrow.Date32) {
+ b.Reserve(1)
+ b.UnsafeAppend(v)
+}
+
+func (b *Date32Builder) AppendNull() {
+ b.Reserve(1)
+ b.UnsafeAppendBoolToBitmap(false)
+}
+
+func (b *Date32Builder) AppendNulls(n int) {
+ for i := 0; i < n; i++ {
+ b.AppendNull()
+ }
+}
+
+func (b *Date32Builder) AppendEmptyValue() {
+ b.Append(0)
+}
+
+func (b *Date32Builder) AppendEmptyValues(n int) {
+ for i := 0; i < n; i++ {
+ b.AppendEmptyValue()
+ }
+}
+
+func (b *Date32Builder) UnsafeAppend(v arrow.Date32) {
+ bitutil.SetBit(b.nullBitmap.Bytes(), b.length)
+ b.rawData[b.length] = v
+ b.length++
+}
+
+func (b *Date32Builder) UnsafeAppendBoolToBitmap(isValid bool) {
+ if isValid {
+ bitutil.SetBit(b.nullBitmap.Bytes(), b.length)
+ } else {
+ b.nulls++
+ }
+ b.length++
+}
+
+// AppendValues will append the values in the v slice. The valid slice determines which values
+// in v are valid (not null). The valid slice must either be empty or be equal in length to v. If empty,
+// all values in v are appended and considered valid.
+func (b *Date32Builder) AppendValues(v []arrow.Date32, valid []bool) {
+ if len(v) != len(valid) && len(valid) != 0 {
+ panic("len(v) != len(valid) && len(valid) != 0")
+ }
+
+ if len(v) == 0 {
+ return
+ }
+
+ b.Reserve(len(v))
+ arrow.Date32Traits.Copy(b.rawData[b.length:], v)
+ b.builder.unsafeAppendBoolsToBitmap(valid, len(v))
+}
+
+func (b *Date32Builder) init(capacity int) {
+ b.builder.init(capacity)
+
+ b.data = memory.NewResizableBuffer(b.mem)
+ bytesN := arrow.Date32Traits.BytesRequired(capacity)
+ b.data.Resize(bytesN)
+ b.rawData = arrow.Date32Traits.CastFromBytes(b.data.Bytes())
+}
+
+// Reserve ensures there is enough space for appending n elements
+// by checking the capacity and calling Resize if necessary.
+func (b *Date32Builder) Reserve(n int) {
+ b.builder.reserve(n, b.Resize)
+}
+
+// Resize adjusts the space allocated by b to n elements. If n is greater than b.Cap(),
+// additional memory will be allocated. If n is smaller, the allocated memory may reduced.
+func (b *Date32Builder) Resize(n int) {
+ nBuilder := n
+ if n < minBuilderCapacity {
+ n = minBuilderCapacity
+ }
+
+ if b.capacity == 0 {
+ b.init(n)
+ } else {
+ b.builder.resize(nBuilder, b.init)
+ b.data.Resize(arrow.Date32Traits.BytesRequired(n))
+ b.rawData = arrow.Date32Traits.CastFromBytes(b.data.Bytes())
+ }
+}
+
+func (b *Date32Builder) Value(i int) arrow.Date32 {
+ return b.rawData[i]
+}
+
+// NewArray creates a Date32 array from the memory buffers used by the builder and resets the Date32Builder
+// so it can be used to build a new array.
+func (b *Date32Builder) NewArray() arrow.Array {
+ return b.NewDate32Array()
+}
+
+// NewDate32Array creates a Date32 array from the memory buffers used by the builder and resets the Date32Builder
+// so it can be used to build a new array.
+func (b *Date32Builder) NewDate32Array() (a *Date32) {
+ data := b.newData()
+ a = NewDate32Data(data)
+ data.Release()
+ return
+}
+
+func (b *Date32Builder) newData() (data *Data) {
+ bytesRequired := arrow.Date32Traits.BytesRequired(b.length)
+ if bytesRequired > 0 && bytesRequired < b.data.Len() {
+ // trim buffers
+ b.data.Resize(bytesRequired)
+ }
+ data = NewData(arrow.PrimitiveTypes.Date32, b.length, []*memory.Buffer{b.nullBitmap, b.data}, nil, b.nulls, 0)
+ b.reset()
+
+ if b.data != nil {
+ b.data.Release()
+ b.data = nil
+ b.rawData = nil
+ }
+
+ return
+}
+
+func (b *Date32Builder) AppendValueFromString(s string) error {
+ if s == NullValueStr {
+ b.AppendNull()
+ return nil
+ }
+ tm, err := time.Parse("2006-01-02", s)
+ if err != nil {
+ b.AppendNull()
+ return err
+ }
+ b.Append(arrow.Date32FromTime(tm))
+ return nil
+}
+
+func (b *Date32Builder) UnmarshalOne(dec *json.Decoder) error {
+ t, err := dec.Token()
+ if err != nil {
+ return err
+ }
+
+ switch v := t.(type) {
+ case nil:
+ b.AppendNull()
+ case string:
+ tm, err := time.Parse("2006-01-02", v)
+ if err != nil {
+ return &json.UnmarshalTypeError{
+ Value: v,
+ Type: reflect.TypeOf(arrow.Date32(0)),
+ Offset: dec.InputOffset(),
+ }
+ }
+
+ b.Append(arrow.Date32FromTime(tm))
+ case json.Number:
+ n, err := v.Int64()
+ if err != nil {
+ return &json.UnmarshalTypeError{
+ Value: v.String(),
+ Type: reflect.TypeOf(arrow.Date32(0)),
+ Offset: dec.InputOffset(),
+ }
+ }
+ b.Append(arrow.Date32(n))
+ case float64:
+ b.Append(arrow.Date32(v))
+
+ default:
+ return &json.UnmarshalTypeError{
+ Value: fmt.Sprint(t),
+ Type: reflect.TypeOf(arrow.Date32(0)),
+ Offset: dec.InputOffset(),
+ }
+ }
+
+ return nil
+}
+
+func (b *Date32Builder) Unmarshal(dec *json.Decoder) error {
+ for dec.More() {
+ if err := b.UnmarshalOne(dec); err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+func (b *Date32Builder) UnmarshalJSON(data []byte) error {
+ dec := json.NewDecoder(bytes.NewReader(data))
+ t, err := dec.Token()
+ if err != nil {
+ return err
+ }
+
+ if delim, ok := t.(json.Delim); !ok || delim != '[' {
+ return fmt.Errorf("binary builder must unpack from json array, found %s", delim)
+ }
+
+ return b.Unmarshal(dec)
+}
+
+type Date64Builder struct {
+ builder
+
+ data *memory.Buffer
+ rawData []arrow.Date64
+}
+
+func NewDate64Builder(mem memory.Allocator) *Date64Builder {
+ return &Date64Builder{builder: builder{refCount: 1, mem: mem}}
+}
+
+func (b *Date64Builder) Type() arrow.DataType { return arrow.PrimitiveTypes.Date64 }
+
+// Release decreases the reference count by 1.
+// When the reference count goes to zero, the memory is freed.
+func (b *Date64Builder) Release() {
+ debug.Assert(atomic.LoadInt64(&b.refCount) > 0, "too many releases")
+
+ if atomic.AddInt64(&b.refCount, -1) == 0 {
+ if b.nullBitmap != nil {
+ b.nullBitmap.Release()
+ b.nullBitmap = nil
+ }
+ if b.data != nil {
+ b.data.Release()
+ b.data = nil
+ b.rawData = nil
+ }
+ }
+}
+
+func (b *Date64Builder) Append(v arrow.Date64) {
+ b.Reserve(1)
+ b.UnsafeAppend(v)
+}
+
+func (b *Date64Builder) AppendNull() {
+ b.Reserve(1)
+ b.UnsafeAppendBoolToBitmap(false)
+}
+
+func (b *Date64Builder) AppendNulls(n int) {
+ for i := 0; i < n; i++ {
+ b.AppendNull()
+ }
+}
+
+func (b *Date64Builder) AppendEmptyValue() {
+ b.Append(0)
+}
+
+func (b *Date64Builder) AppendEmptyValues(n int) {
+ for i := 0; i < n; i++ {
+ b.AppendEmptyValue()
+ }
+}
+
+func (b *Date64Builder) UnsafeAppend(v arrow.Date64) {
+ bitutil.SetBit(b.nullBitmap.Bytes(), b.length)
+ b.rawData[b.length] = v
+ b.length++
+}
+
+func (b *Date64Builder) UnsafeAppendBoolToBitmap(isValid bool) {
+ if isValid {
+ bitutil.SetBit(b.nullBitmap.Bytes(), b.length)
+ } else {
+ b.nulls++
+ }
+ b.length++
+}
+
+// AppendValues will append the values in the v slice. The valid slice determines which values
+// in v are valid (not null). The valid slice must either be empty or be equal in length to v. If empty,
+// all values in v are appended and considered valid.
+func (b *Date64Builder) AppendValues(v []arrow.Date64, valid []bool) {
+ if len(v) != len(valid) && len(valid) != 0 {
+ panic("len(v) != len(valid) && len(valid) != 0")
+ }
+
+ if len(v) == 0 {
+ return
+ }
+
+ b.Reserve(len(v))
+ arrow.Date64Traits.Copy(b.rawData[b.length:], v)
+ b.builder.unsafeAppendBoolsToBitmap(valid, len(v))
+}
+
+func (b *Date64Builder) init(capacity int) {
+ b.builder.init(capacity)
+
+ b.data = memory.NewResizableBuffer(b.mem)
+ bytesN := arrow.Date64Traits.BytesRequired(capacity)
+ b.data.Resize(bytesN)
+ b.rawData = arrow.Date64Traits.CastFromBytes(b.data.Bytes())
+}
+
+// Reserve ensures there is enough space for appending n elements
+// by checking the capacity and calling Resize if necessary.
+func (b *Date64Builder) Reserve(n int) {
+ b.builder.reserve(n, b.Resize)
+}
+
+// Resize adjusts the space allocated by b to n elements. If n is greater than b.Cap(),
+// additional memory will be allocated. If n is smaller, the allocated memory may reduced.
+func (b *Date64Builder) Resize(n int) {
+ nBuilder := n
+ if n < minBuilderCapacity {
+ n = minBuilderCapacity
+ }
+
+ if b.capacity == 0 {
+ b.init(n)
+ } else {
+ b.builder.resize(nBuilder, b.init)
+ b.data.Resize(arrow.Date64Traits.BytesRequired(n))
+ b.rawData = arrow.Date64Traits.CastFromBytes(b.data.Bytes())
+ }
+}
+
+func (b *Date64Builder) Value(i int) arrow.Date64 {
+ return b.rawData[i]
+}
+
+// NewArray creates a Date64 array from the memory buffers used by the builder and resets the Date64Builder
+// so it can be used to build a new array.
+func (b *Date64Builder) NewArray() arrow.Array {
+ return b.NewDate64Array()
+}
+
+// NewDate64Array creates a Date64 array from the memory buffers used by the builder and resets the Date64Builder
+// so it can be used to build a new array.
+func (b *Date64Builder) NewDate64Array() (a *Date64) {
+ data := b.newData()
+ a = NewDate64Data(data)
+ data.Release()
+ return
+}
+
+func (b *Date64Builder) newData() (data *Data) {
+ bytesRequired := arrow.Date64Traits.BytesRequired(b.length)
+ if bytesRequired > 0 && bytesRequired < b.data.Len() {
+ // trim buffers
+ b.data.Resize(bytesRequired)
+ }
+ data = NewData(arrow.PrimitiveTypes.Date64, b.length, []*memory.Buffer{b.nullBitmap, b.data}, nil, b.nulls, 0)
+ b.reset()
+
+ if b.data != nil {
+ b.data.Release()
+ b.data = nil
+ b.rawData = nil
+ }
+
+ return
+}
+
+func (b *Date64Builder) AppendValueFromString(s string) error {
+ if s == NullValueStr {
+ b.AppendNull()
+ return nil
+ }
+ tm, err := time.Parse("2006-01-02", s)
+ if err != nil {
+ b.AppendNull()
+ return err
+ }
+ b.Append(arrow.Date64FromTime(tm))
+ return nil
+}
+
+func (b *Date64Builder) UnmarshalOne(dec *json.Decoder) error {
+ t, err := dec.Token()
+ if err != nil {
+ return err
+ }
+
+ switch v := t.(type) {
+ case nil:
+ b.AppendNull()
+ case string:
+ tm, err := time.Parse("2006-01-02", v)
+ if err != nil {
+ return &json.UnmarshalTypeError{
+ Value: v,
+ Type: reflect.TypeOf(arrow.Date64(0)),
+ Offset: dec.InputOffset(),
+ }
+ }
+
+ b.Append(arrow.Date64FromTime(tm))
+ case json.Number:
+ n, err := v.Int64()
+ if err != nil {
+ return &json.UnmarshalTypeError{
+ Value: v.String(),
+ Type: reflect.TypeOf(arrow.Date64(0)),
+ Offset: dec.InputOffset(),
+ }
+ }
+ b.Append(arrow.Date64(n))
+ case float64:
+ b.Append(arrow.Date64(v))
+
+ default:
+ return &json.UnmarshalTypeError{
+ Value: fmt.Sprint(t),
+ Type: reflect.TypeOf(arrow.Date64(0)),
+ Offset: dec.InputOffset(),
+ }
+ }
+
+ return nil
+}
+
+func (b *Date64Builder) Unmarshal(dec *json.Decoder) error {
+ for dec.More() {
+ if err := b.UnmarshalOne(dec); err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+func (b *Date64Builder) UnmarshalJSON(data []byte) error {
+ dec := json.NewDecoder(bytes.NewReader(data))
+ t, err := dec.Token()
+ if err != nil {
+ return err
+ }
+
+ if delim, ok := t.(json.Delim); !ok || delim != '[' {
+ return fmt.Errorf("binary builder must unpack from json array, found %s", delim)
+ }
+
+ return b.Unmarshal(dec)
+}
+
+type DurationBuilder struct {
+ builder
+
+ dtype *arrow.DurationType
+ data *memory.Buffer
+ rawData []arrow.Duration
+}
+
+func NewDurationBuilder(mem memory.Allocator, dtype *arrow.DurationType) *DurationBuilder {
+ return &DurationBuilder{builder: builder{refCount: 1, mem: mem}, dtype: dtype}
+}
+
+func (b *DurationBuilder) Type() arrow.DataType { return b.dtype }
+
+// Release decreases the reference count by 1.
+// When the reference count goes to zero, the memory is freed.
+func (b *DurationBuilder) Release() {
+ debug.Assert(atomic.LoadInt64(&b.refCount) > 0, "too many releases")
+
+ if atomic.AddInt64(&b.refCount, -1) == 0 {
+ if b.nullBitmap != nil {
+ b.nullBitmap.Release()
+ b.nullBitmap = nil
+ }
+ if b.data != nil {
+ b.data.Release()
+ b.data = nil
+ b.rawData = nil
+ }
+ }
+}
+
+func (b *DurationBuilder) Append(v arrow.Duration) {
+ b.Reserve(1)
+ b.UnsafeAppend(v)
+}
+
+func (b *DurationBuilder) AppendNull() {
+ b.Reserve(1)
+ b.UnsafeAppendBoolToBitmap(false)
+}
+
+func (b *DurationBuilder) AppendNulls(n int) {
+ for i := 0; i < n; i++ {
+ b.AppendNull()
+ }
+}
+
+func (b *DurationBuilder) AppendEmptyValue() {
+ b.Append(0)
+}
+
+func (b *DurationBuilder) AppendEmptyValues(n int) {
+ for i := 0; i < n; i++ {
+ b.AppendEmptyValue()
+ }
+}
+
+func (b *DurationBuilder) UnsafeAppend(v arrow.Duration) {
+ bitutil.SetBit(b.nullBitmap.Bytes(), b.length)
+ b.rawData[b.length] = v
+ b.length++
+}
+
+func (b *DurationBuilder) UnsafeAppendBoolToBitmap(isValid bool) {
+ if isValid {
+ bitutil.SetBit(b.nullBitmap.Bytes(), b.length)
+ } else {
+ b.nulls++
+ }
+ b.length++
+}
+
+// AppendValues will append the values in the v slice. The valid slice determines which values
+// in v are valid (not null). The valid slice must either be empty or be equal in length to v. If empty,
+// all values in v are appended and considered valid.
+func (b *DurationBuilder) AppendValues(v []arrow.Duration, valid []bool) {
+ if len(v) != len(valid) && len(valid) != 0 {
+ panic("len(v) != len(valid) && len(valid) != 0")
+ }
+
+ if len(v) == 0 {
+ return
+ }
+
+ b.Reserve(len(v))
+ arrow.DurationTraits.Copy(b.rawData[b.length:], v)
+ b.builder.unsafeAppendBoolsToBitmap(valid, len(v))
+}
+
+func (b *DurationBuilder) init(capacity int) {
+ b.builder.init(capacity)
+
+ b.data = memory.NewResizableBuffer(b.mem)
+ bytesN := arrow.DurationTraits.BytesRequired(capacity)
+ b.data.Resize(bytesN)
+ b.rawData = arrow.DurationTraits.CastFromBytes(b.data.Bytes())
+}
+
+// Reserve ensures there is enough space for appending n elements
+// by checking the capacity and calling Resize if necessary.
+func (b *DurationBuilder) Reserve(n int) {
+ b.builder.reserve(n, b.Resize)
+}
+
+// Resize adjusts the space allocated by b to n elements. If n is greater than b.Cap(),
+// additional memory will be allocated. If n is smaller, the allocated memory may reduced.
+func (b *DurationBuilder) Resize(n int) {
+ nBuilder := n
+ if n < minBuilderCapacity {
+ n = minBuilderCapacity
+ }
+
+ if b.capacity == 0 {
+ b.init(n)
+ } else {
+ b.builder.resize(nBuilder, b.init)
+ b.data.Resize(arrow.DurationTraits.BytesRequired(n))
+ b.rawData = arrow.DurationTraits.CastFromBytes(b.data.Bytes())
+ }
+}
+
+func (b *DurationBuilder) Value(i int) arrow.Duration {
+ return b.rawData[i]
+}
+
+// NewArray creates a Duration array from the memory buffers used by the builder and resets the DurationBuilder
+// so it can be used to build a new array.
+func (b *DurationBuilder) NewArray() arrow.Array {
+ return b.NewDurationArray()
+}
+
+// NewDurationArray creates a Duration array from the memory buffers used by the builder and resets the DurationBuilder
+// so it can be used to build a new array.
+func (b *DurationBuilder) NewDurationArray() (a *Duration) {
+ data := b.newData()
+ a = NewDurationData(data)
+ data.Release()
+ return
+}
+
+func (b *DurationBuilder) newData() (data *Data) {
+ bytesRequired := arrow.DurationTraits.BytesRequired(b.length)
+ if bytesRequired > 0 && bytesRequired < b.data.Len() {
+ // trim buffers
+ b.data.Resize(bytesRequired)
+ }
+ data = NewData(b.dtype, b.length, []*memory.Buffer{b.nullBitmap, b.data}, nil, b.nulls, 0)
+ b.reset()
+
+ if b.data != nil {
+ b.data.Release()
+ b.data = nil
+ b.rawData = nil
+ }
+
+ return
+}
+
+func (b *DurationBuilder) AppendValueFromString(s string) error {
+ if s == NullValueStr {
+ b.AppendNull()
+ return nil
+ }
+ dur, err := time.ParseDuration(s)
+ if err != nil {
+ return err
+ }
+
+ b.Append(arrow.Duration(dur / b.dtype.Unit.Multiplier()))
+ return nil
+}
+
+func (b *DurationBuilder) UnmarshalOne(dec *json.Decoder) error {
+ t, err := dec.Token()
+ if err != nil {
+ return err
+ }
+
+ switch v := t.(type) {
+ case nil:
+ b.AppendNull()
+ case json.Number:
+ n, err := v.Int64()
+ if err != nil {
+ return &json.UnmarshalTypeError{
+ Value: v.String(),
+ Type: reflect.TypeOf(arrow.Duration(0)),
+ Offset: dec.InputOffset(),
+ }
+ }
+ b.Append(arrow.Duration(n))
+ case float64:
+ b.Append(arrow.Duration(v))
+ case string:
+ // be flexible for specifying durations by accepting forms like
+ // 3h2m0.5s regardless of the unit and converting it to the proper
+ // precision.
+ val, err := time.ParseDuration(v)
+ if err != nil {
+ // if we got an error, maybe it was because the attempt to create
+ // a time.Duration (int64) in nanoseconds would overflow. check if
+ // the string is just a large number followed by the unit suffix
+ if strings.HasSuffix(v, b.dtype.Unit.String()) {
+ value, err := strconv.ParseInt(v[:len(v)-len(b.dtype.Unit.String())], 10, 64)
+ if err == nil {
+ b.Append(arrow.Duration(value))
+ break
+ }
+ }
+
+ return &json.UnmarshalTypeError{
+ Value: v,
+ Type: reflect.TypeOf(arrow.Duration(0)),
+ Offset: dec.InputOffset(),
+ }
+ }
+
+ switch b.dtype.Unit {
+ case arrow.Nanosecond:
+ b.Append(arrow.Duration(val.Nanoseconds()))
+ case arrow.Microsecond:
+ b.Append(arrow.Duration(val.Microseconds()))
+ case arrow.Millisecond:
+ b.Append(arrow.Duration(val.Milliseconds()))
+ case arrow.Second:
+ b.Append(arrow.Duration(val.Seconds()))
+ }
+
+ default:
+ return &json.UnmarshalTypeError{
+ Value: fmt.Sprint(t),
+ Type: reflect.TypeOf(arrow.Duration(0)),
+ Offset: dec.InputOffset(),
+ }
+ }
+
+ return nil
+}
+
+func (b *DurationBuilder) Unmarshal(dec *json.Decoder) error {
+ for dec.More() {
+ if err := b.UnmarshalOne(dec); err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+func (b *DurationBuilder) UnmarshalJSON(data []byte) error {
+ dec := json.NewDecoder(bytes.NewReader(data))
+ t, err := dec.Token()
+ if err != nil {
+ return err
+ }
+
+ if delim, ok := t.(json.Delim); !ok || delim != '[' {
+ return fmt.Errorf("binary builder must unpack from json array, found %s", delim)
+ }
+
+ return b.Unmarshal(dec)
+}
+
+var (
+ _ Builder = (*Int64Builder)(nil)
+ _ Builder = (*Uint64Builder)(nil)
+ _ Builder = (*Float64Builder)(nil)
+ _ Builder = (*Int32Builder)(nil)
+ _ Builder = (*Uint32Builder)(nil)
+ _ Builder = (*Float32Builder)(nil)
+ _ Builder = (*Int16Builder)(nil)
+ _ Builder = (*Uint16Builder)(nil)
+ _ Builder = (*Int8Builder)(nil)
+ _ Builder = (*Uint8Builder)(nil)
+ _ Builder = (*Time32Builder)(nil)
+ _ Builder = (*Time64Builder)(nil)
+ _ Builder = (*Date32Builder)(nil)
+ _ Builder = (*Date64Builder)(nil)
+ _ Builder = (*DurationBuilder)(nil)
+)
diff --git a/vendor/github.com/apache/arrow/go/v14/arrow/array/numericbuilder.gen.go.tmpl b/vendor/github.com/apache/arrow/go/v14/arrow/array/numericbuilder.gen.go.tmpl
new file mode 100644
index 000000000..cf663c031
--- /dev/null
+++ b/vendor/github.com/apache/arrow/go/v14/arrow/array/numericbuilder.gen.go.tmpl
@@ -0,0 +1,447 @@
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements. See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership. The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License. You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package array
+
+import (
+ "github.com/apache/arrow/go/v14/arrow"
+ "github.com/apache/arrow/go/v14/arrow/bitutil"
+ "github.com/apache/arrow/go/v14/arrow/internal/debug"
+ "github.com/apache/arrow/go/v14/arrow/memory"
+ "github.com/apache/arrow/go/v14/internal/json"
+)
+
+{{range .In}}
+
+type {{.Name}}Builder struct {
+ builder
+
+{{if .Opt.Parametric -}}
+ dtype *arrow.{{.Name}}Type
+{{end -}}
+ data *memory.Buffer
+ rawData []{{or .QualifiedType .Type}}
+}
+
+{{if .Opt.Parametric}}
+func New{{.Name}}Builder(mem memory.Allocator, dtype *arrow.{{.Name}}Type) *{{.Name}}Builder {
+ return &{{.Name}}Builder{builder: builder{refCount:1, mem: mem}, dtype: dtype}
+}
+
+func (b *{{.Name}}Builder) Type() arrow.DataType { return b.dtype }
+
+{{else}}
+func New{{.Name}}Builder(mem memory.Allocator) *{{.Name}}Builder {
+ return &{{.Name}}Builder{builder: builder{refCount:1, mem: mem}}
+}
+
+func (b *{{.Name}}Builder) Type() arrow.DataType { return arrow.PrimitiveTypes.{{.Name}} }
+{{end}}
+
+// Release decreases the reference count by 1.
+// When the reference count goes to zero, the memory is freed.
+func (b *{{.Name}}Builder) Release() {
+ debug.Assert(atomic.LoadInt64(&b.refCount) > 0, "too many releases")
+
+ if atomic.AddInt64(&b.refCount, -1) == 0 {
+ if b.nullBitmap != nil {
+ b.nullBitmap.Release()
+ b.nullBitmap = nil
+ }
+ if b.data != nil {
+ b.data.Release()
+ b.data = nil
+ b.rawData = nil
+ }
+ }
+}
+
+func (b *{{.Name}}Builder) Append(v {{or .QualifiedType .Type}}) {
+ b.Reserve(1)
+ b.UnsafeAppend(v)
+}
+
+func (b *{{.Name}}Builder) AppendNull() {
+ b.Reserve(1)
+ b.UnsafeAppendBoolToBitmap(false)
+}
+
+func (b *{{.Name}}Builder) AppendNulls(n int) {
+ for i := 0; i < n; i++ {
+ b.AppendNull()
+ }
+}
+
+func (b *{{.Name}}Builder) AppendEmptyValue() {
+ b.Append(0)
+}
+
+func (b *{{.Name}}Builder) AppendEmptyValues(n int) {
+ for i := 0; i < n; i ++ {
+ b.AppendEmptyValue()
+ }
+}
+
+func (b *{{.Name}}Builder) UnsafeAppend(v {{or .QualifiedType .Type}}) {
+ bitutil.SetBit(b.nullBitmap.Bytes(), b.length)
+ b.rawData[b.length] = v
+ b.length++
+}
+
+func (b *{{.Name}}Builder) UnsafeAppendBoolToBitmap(isValid bool) {
+ if isValid {
+ bitutil.SetBit(b.nullBitmap.Bytes(), b.length)
+ } else {
+ b.nulls++
+ }
+ b.length++
+}
+
+// AppendValues will append the values in the v slice. The valid slice determines which values
+// in v are valid (not null). The valid slice must either be empty or be equal in length to v. If empty,
+// all values in v are appended and considered valid.
+func (b *{{.Name}}Builder) AppendValues(v []{{or .QualifiedType .Type}}, valid []bool) {
+ if len(v) != len(valid) && len(valid) != 0 {
+ panic("len(v) != len(valid) && len(valid) != 0")
+ }
+
+ if len(v) == 0 {
+ return
+ }
+
+ b.Reserve(len(v))
+ arrow.{{.Name}}Traits.Copy(b.rawData[b.length:], v)
+ b.builder.unsafeAppendBoolsToBitmap(valid, len(v))
+}
+
+func (b *{{.Name}}Builder) init(capacity int) {
+ b.builder.init(capacity)
+
+ b.data = memory.NewResizableBuffer(b.mem)
+ bytesN := arrow.{{.Name}}Traits.BytesRequired(capacity)
+ b.data.Resize(bytesN)
+ b.rawData = arrow.{{.Name}}Traits.CastFromBytes(b.data.Bytes())
+}
+
+// Reserve ensures there is enough space for appending n elements
+// by checking the capacity and calling Resize if necessary.
+func (b *{{.Name}}Builder) Reserve(n int) {
+ b.builder.reserve(n, b.Resize)
+}
+
+// Resize adjusts the space allocated by b to n elements. If n is greater than b.Cap(),
+// additional memory will be allocated. If n is smaller, the allocated memory may reduced.
+func (b *{{.Name}}Builder) Resize(n int) {
+ nBuilder := n
+ if n < minBuilderCapacity {
+ n = minBuilderCapacity
+ }
+
+ if b.capacity == 0 {
+ b.init(n)
+ } else {
+ b.builder.resize(nBuilder, b.init)
+ b.data.Resize(arrow.{{.Name}}Traits.BytesRequired(n))
+ b.rawData = arrow.{{.Name}}Traits.CastFromBytes(b.data.Bytes())
+ }
+}
+
+func (b *{{.Name}}Builder) Value(i int) {{or .QualifiedType .Type}} {
+ return b.rawData[i]
+}
+
+// NewArray creates a {{.Name}} array from the memory buffers used by the builder and resets the {{.Name}}Builder
+// so it can be used to build a new array.
+func (b *{{.Name}}Builder) NewArray() arrow.Array {
+ return b.New{{.Name}}Array()
+}
+
+// New{{.Name}}Array creates a {{.Name}} array from the memory buffers used by the builder and resets the {{.Name}}Builder
+// so it can be used to build a new array.
+func (b *{{.Name}}Builder) New{{.Name}}Array() (a *{{.Name}}) {
+ data := b.newData()
+ a = New{{.Name}}Data(data)
+ data.Release()
+ return
+}
+
+func (b *{{.Name}}Builder) newData() (data *Data) {
+ bytesRequired := arrow.{{.Name}}Traits.BytesRequired(b.length)
+ if bytesRequired > 0 && bytesRequired < b.data.Len() {
+ // trim buffers
+ b.data.Resize(bytesRequired)
+ }
+{{if .Opt.Parametric -}}
+ data = NewData(b.dtype, b.length, []*memory.Buffer{b.nullBitmap, b.data}, nil, b.nulls, 0)
+{{else -}}
+ data = NewData(arrow.PrimitiveTypes.{{.Name}}, b.length, []*memory.Buffer{b.nullBitmap, b.data}, nil, b.nulls, 0)
+{{end -}}
+ b.reset()
+
+ if b.data != nil {
+ b.data.Release()
+ b.data = nil
+ b.rawData = nil
+ }
+
+ return
+}
+
+func (b *{{.Name}}Builder) AppendValueFromString(s string) error {
+ if s == NullValueStr {
+ b.AppendNull()
+ return nil
+ }
+ {{if or (eq .Name "Date32") -}}
+ tm, err := time.Parse("2006-01-02", s)
+ if err != nil {
+ b.AppendNull()
+ return err
+ }
+ b.Append(arrow.Date32FromTime(tm))
+ {{else if or (eq .Name "Date64") -}}
+ tm, err := time.Parse("2006-01-02", s)
+ if err != nil {
+ b.AppendNull()
+ return err
+ }
+ b.Append(arrow.Date64FromTime(tm))
+ {{else if or (eq .Name "Time32") -}}
+ val, err := arrow.Time32FromString(s, b.dtype.Unit)
+ if err != nil {
+ b.AppendNull()
+ return err
+ }
+ b.Append(val)
+ {{else if or (eq .Name "Time64") -}}
+ val, err := arrow.Time64FromString(s, b.dtype.Unit)
+ if err != nil {
+ b.AppendNull()
+ return err
+ }
+ b.Append(val)
+ {{else if (eq .Name "Duration") -}}
+ dur, err := time.ParseDuration(s)
+ if err != nil {
+ return err
+ }
+
+ b.Append(arrow.Duration(dur / b.dtype.Unit.Multiplier()))
+ {{else if or (eq .Name "Int8") (eq .Name "Int16") (eq .Name "Int32") (eq .Name "Int64") -}}
+ v, err := strconv.ParseInt(s, 10, {{.Size}} * 8)
+ if err != nil {
+ b.AppendNull()
+ return err
+ }
+ b.Append({{.name}}(v))
+ {{else if or (eq .Name "Uint8") (eq .Name "Uint16") (eq .Name "Uint32") (eq .Name "Uint64") -}}
+ v, err := strconv.ParseUint(s, 10, {{.Size}} * 8)
+ if err != nil {
+ b.AppendNull()
+ return err
+ }
+ b.Append({{.name}}(v))
+ {{else if or (eq .Name "Float32") (eq .Name "Float64") -}}
+ v, err := strconv.ParseFloat(s, {{.Size}} * 8)
+ if err != nil {
+ b.AppendNull()
+ return err
+ }
+ b.Append({{.name}}(v))
+ {{end -}}
+ return nil
+}
+
+func (b *{{.Name}}Builder) UnmarshalOne(dec *json.Decoder) error {
+ t, err := dec.Token()
+ if err != nil {
+ return err
+ }
+
+ switch v := t.(type) {
+ case nil:
+ b.AppendNull()
+{{if or (eq .Name "Date32") (eq .Name "Date64") -}}
+ case string:
+ tm, err := time.Parse("2006-01-02", v)
+ if err != nil {
+ return &json.UnmarshalTypeError{
+ Value: v,
+ Type: reflect.TypeOf({{.QualifiedType}}(0)),
+ Offset: dec.InputOffset(),
+ }
+ }
+
+ b.Append({{.QualifiedType}}FromTime(tm))
+ case json.Number:
+ n, err := v.Int64()
+ if err != nil {
+ return &json.UnmarshalTypeError{
+ Value: v.String(),
+ Type: reflect.TypeOf({{.QualifiedType}}(0)),
+ Offset: dec.InputOffset(),
+ }
+ }
+ b.Append({{.QualifiedType}}(n))
+ case float64:
+ b.Append({{.QualifiedType}}(v))
+{{else if or (eq .Name "Time32") (eq .Name "Time64") -}}
+ case string:
+ tm, err := {{.QualifiedType}}FromString(v, b.dtype.Unit)
+ if err != nil {
+ return &json.UnmarshalTypeError{
+ Value: v,
+ Type: reflect.TypeOf({{.QualifiedType}}(0)),
+ Offset: dec.InputOffset(),
+ }
+ }
+
+ b.Append(tm)
+ case json.Number:
+ n, err := v.Int64()
+ if err != nil {
+ return &json.UnmarshalTypeError{
+ Value: v.String(),
+ Type: reflect.TypeOf({{.QualifiedType}}(0)),
+ Offset: dec.InputOffset(),
+ }
+ }
+ b.Append({{.QualifiedType}}(n))
+ case float64:
+ b.Append({{.QualifiedType}}(v))
+{{else if eq .Name "Duration" -}}
+ case json.Number:
+ n, err := v.Int64()
+ if err != nil {
+ return &json.UnmarshalTypeError{
+ Value: v.String(),
+ Type: reflect.TypeOf({{.QualifiedType}}(0)),
+ Offset: dec.InputOffset(),
+ }
+ }
+ b.Append({{.QualifiedType}}(n))
+ case float64:
+ b.Append({{.QualifiedType}}(v))
+ case string:
+ // be flexible for specifying durations by accepting forms like
+ // 3h2m0.5s regardless of the unit and converting it to the proper
+ // precision.
+ val, err := time.ParseDuration(v)
+ if err != nil {
+ // if we got an error, maybe it was because the attempt to create
+ // a time.Duration (int64) in nanoseconds would overflow. check if
+ // the string is just a large number followed by the unit suffix
+ if strings.HasSuffix(v, b.dtype.Unit.String()) {
+ value, err := strconv.ParseInt(v[:len(v)-len(b.dtype.Unit.String())], 10, 64)
+ if err == nil {
+ b.Append(arrow.Duration(value))
+ break
+ }
+ }
+
+ return &json.UnmarshalTypeError{
+ Value: v,
+ Type: reflect.TypeOf({{.QualifiedType}}(0)),
+ Offset: dec.InputOffset(),
+ }
+ }
+
+ switch b.dtype.Unit {
+ case arrow.Nanosecond:
+ b.Append({{.QualifiedType}}(val.Nanoseconds()))
+ case arrow.Microsecond:
+ b.Append({{.QualifiedType}}(val.Microseconds()))
+ case arrow.Millisecond:
+ b.Append({{.QualifiedType}}(val.Milliseconds()))
+ case arrow.Second:
+ b.Append({{.QualifiedType}}(val.Seconds()))
+ }
+{{else}}
+ case string:
+{{if or (eq .Name "Float32") (eq .Name "Float64") -}}
+ f, err := strconv.ParseFloat(v, {{.Size}}*8)
+{{else if eq (printf "%.1s" .Name) "U" -}}
+ f, err := strconv.ParseUint(v, 10, {{.Size}}*8)
+{{else -}}
+ f, err := strconv.ParseInt(v, 10, {{.Size}}*8)
+{{end -}}
+ if err != nil {
+ return &json.UnmarshalTypeError{
+ Value: v,
+ Type: reflect.TypeOf({{.name}}(0)),
+ Offset: dec.InputOffset(),
+ }
+ }
+ b.Append({{.name}}(f))
+ case float64:
+ b.Append({{.name}}(v))
+ case json.Number:
+{{if or (eq .Name "Float32") (eq .Name "Float64") -}}
+ f, err := strconv.ParseFloat(v.String(), {{.Size}}*8)
+{{else if eq (printf "%.1s" .Name) "U" -}}
+ f, err := strconv.ParseUint(v.String(), 10, {{.Size}}*8)
+{{else -}}
+ f, err := strconv.ParseInt(v.String(), 10, {{.Size}}*8)
+{{end -}}
+ if err != nil {
+ return &json.UnmarshalTypeError{
+ Value: v.String(),
+ Type: reflect.TypeOf({{.name}}(0)),
+ Offset: dec.InputOffset(),
+ }
+ }
+ b.Append({{.name}}(f))
+{{end}}
+ default:
+ return &json.UnmarshalTypeError{
+ Value: fmt.Sprint(t),
+ Type: reflect.TypeOf({{or .QualifiedType .Type}}(0)),
+ Offset: dec.InputOffset(),
+ }
+ }
+
+ return nil
+}
+
+func (b *{{.Name}}Builder) Unmarshal(dec *json.Decoder) error {
+ for dec.More() {
+ if err := b.UnmarshalOne(dec); err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+func (b *{{.Name}}Builder) UnmarshalJSON(data []byte) error {
+ dec := json.NewDecoder(bytes.NewReader(data))
+ t, err := dec.Token()
+ if err != nil {
+ return err
+ }
+
+ if delim, ok := t.(json.Delim); !ok || delim != '[' {
+ return fmt.Errorf("binary builder must unpack from json array, found %s", delim)
+ }
+
+ return b.Unmarshal(dec)
+}
+{{end}}
+
+var (
+{{- range .In}}
+ _ Builder = (*{{.Name}}Builder)(nil)
+{{- end}}
+)
diff --git a/vendor/github.com/apache/arrow/go/v14/arrow/array/numericbuilder.gen_test.go.tmpl b/vendor/github.com/apache/arrow/go/v14/arrow/array/numericbuilder.gen_test.go.tmpl
new file mode 100644
index 000000000..bc8c99337
--- /dev/null
+++ b/vendor/github.com/apache/arrow/go/v14/arrow/array/numericbuilder.gen_test.go.tmpl
@@ -0,0 +1,276 @@
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements. See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership. The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License. You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package array_test
+
+import (
+ "testing"
+
+ "github.com/apache/arrow/go/v14/arrow"
+ "github.com/apache/arrow/go/v14/arrow/array"
+ "github.com/apache/arrow/go/v14/arrow/memory"
+ "github.com/stretchr/testify/assert"
+)
+
+{{range .In}}
+func Test{{.Name}}StringRoundTrip(t *testing.T) {
+ // 1. create array
+ mem := memory.NewCheckedAllocator(memory.NewGoAllocator())
+ defer mem.AssertSize(t, 0)
+
+{{if .Opt.Parametric -}}
+{{ if or (eq .Name "Time64") -}}
+ dt := &arrow.{{.Name}}Type{Unit: arrow.Microsecond}
+{{else -}}
+ dt := &arrow.{{.Name}}Type{Unit: arrow.Second}
+{{end -}}
+ b := array.New{{.Name}}Builder(mem, dt)
+{{else -}}
+ b := array.New{{.Name}}Builder(mem)
+{{end -}}
+ defer b.Release()
+
+ b.Append(1)
+ b.Append(2)
+ b.Append(3)
+ b.AppendNull()
+ b.Append(5)
+ b.Append(6)
+ b.AppendNull()
+ b.Append(8)
+ b.Append(9)
+ b.Append(10)
+
+ arr := b.NewArray().(*array.{{.Name}})
+ defer arr.Release()
+
+ // 2. create array via AppendValueFromString
+{{if .Opt.Parametric -}}
+ b1 := array.New{{.Name}}Builder(mem, dt)
+{{else -}}
+ b1 := array.New{{.Name}}Builder(mem)
+{{end -}}
+ defer b1.Release()
+
+ for i := 0; i < arr.Len(); i++ {
+ assert.NoError(t, b1.AppendValueFromString(arr.ValueStr(i)))
+ }
+
+ arr1 := b1.NewArray().(*array.{{.Name}})
+ defer arr1.Release()
+
+{{ if or (eq .Name "Date64") -}}
+ assert.Exactly(t, arr.Len(), arr1.Len())
+ for i := 0; i < arr.Len(); i++ {
+ assert.Exactly(t, arr.IsValid(i), arr1.IsValid(i))
+ assert.Exactly(t, arr.ValueStr(i), arr1.ValueStr(i))
+ if arr.IsValid(i) {
+ assert.Exactly(t, arr.Value(i).ToTime(), arr1.Value(i).ToTime())
+ }
+ }
+{{else -}}
+ assert.True(t, array.Equal(arr, arr1))
+{{end -}}
+}
+
+func TestNew{{.Name}}Builder(t *testing.T) {
+ mem := memory.NewCheckedAllocator(memory.NewGoAllocator())
+ defer mem.AssertSize(t, 0)
+
+{{if .Opt.Parametric -}}
+ dtype := &arrow.{{.Name}}Type{Unit: arrow.Second}
+ ab := array.New{{.Name}}Builder(mem, dtype)
+{{else}}
+ ab := array.New{{.Name}}Builder(mem)
+{{end -}}
+ defer ab.Release()
+
+ ab.Retain()
+ ab.Release()
+
+ ab.Append(1)
+ ab.Append(2)
+ ab.Append(3)
+ ab.AppendNull()
+ ab.Append(5)
+ ab.Append(6)
+ ab.AppendNull()
+ ab.Append(8)
+ ab.Append(9)
+ ab.Append(10)
+
+ // check state of builder before New{{.Name}}Array
+ assert.Equal(t, 10, ab.Len(), "unexpected Len()")
+ assert.Equal(t, 2, ab.NullN(), "unexpected NullN()")
+
+ a := ab.New{{.Name}}Array()
+
+ // check state of builder after New{{.Name}}Array
+ assert.Zero(t, ab.Len(), "unexpected ArrayBuilder.Len(), New{{.Name}}Array did not reset state")
+ assert.Zero(t, ab.Cap(), "unexpected ArrayBuilder.Cap(), New{{.Name}}Array did not reset state")
+ assert.Zero(t, ab.NullN(), "unexpected ArrayBuilder.NullN(), New{{.Name}}Array did not reset state")
+
+ // check state of array
+ assert.Equal(t, 2, a.NullN(), "unexpected null count")
+ assert.Equal(t, []{{or .QualifiedType .Type}}{1, 2, 3, 0, 5, 6, 0, 8, 9, 10}, a.{{.Name}}Values(), "unexpected {{.Name}}Values")
+ assert.Equal(t, []byte{0xb7}, a.NullBitmapBytes()[:1]) // 4 bytes due to minBuilderCapacity
+ assert.Len(t, a.{{.Name}}Values(), 10, "unexpected length of {{.Name}}Values")
+
+ a.Release()
+
+ ab.Append(7)
+ ab.Append(8)
+
+ a = ab.New{{.Name}}Array()
+
+ assert.Equal(t, 0, a.NullN())
+ assert.Equal(t, []{{or .QualifiedType .Type}}{7, 8}, a.{{.Name}}Values())
+ assert.Len(t, a.{{.Name}}Values(), 2)
+
+ a.Release()
+
+ var (
+ want = []{{or .QualifiedType .Type}}{1, 2, 3, 4}
+ valids = []bool{true, true, false, true}
+ )
+
+ ab.AppendValues(want, valids)
+ a = ab.New{{.Name}}Array()
+
+ sub := array.MakeFromData(a.Data())
+ defer sub.Release()
+
+ if got, want := sub.DataType().ID(), a.DataType().ID(); got != want {
+ t.Fatalf("invalid type: got=%q, want=%q", got, want)
+ }
+
+ if _, ok := sub.(*array.{{.Name}}); !ok {
+ t.Fatalf("could not type-assert to array.{{.Name}}")
+ }
+
+ if got, want := a.String(), `[1 2 (null) 4]`; got != want {
+ t.Fatalf("got=%q, want=%q", got, want)
+ }
+
+ slice := array.NewSliceData(a.Data(), 2, 4)
+ defer slice.Release()
+
+ sub1 := array.MakeFromData(slice)
+ defer sub1.Release()
+
+ v, ok := sub1.(*array.{{.Name}})
+ if !ok {
+ t.Fatalf("could not type-assert to array.{{.Name}}")
+ }
+
+ if got, want := v.String(), `[(null) 4]`; got != want {
+ t.Fatalf("got=%q, want=%q", got, want)
+ }
+
+ a.Release()
+}
+
+func Test{{.Name}}Builder_AppendValues(t *testing.T) {
+ mem := memory.NewCheckedAllocator(memory.NewGoAllocator())
+ defer mem.AssertSize(t, 0)
+
+{{if .Opt.Parametric -}}
+ dtype := &arrow.{{.Name}}Type{Unit: arrow.Second}
+ ab := array.New{{.Name}}Builder(mem, dtype)
+{{else}}
+ ab := array.New{{.Name}}Builder(mem)
+{{end -}}
+ defer ab.Release()
+
+ exp := []{{or .QualifiedType .Type}}{0, 1, 2, 3}
+ ab.AppendValues(exp, nil)
+ a := ab.New{{.Name}}Array()
+ assert.Equal(t, exp, a.{{.Name}}Values())
+
+ a.Release()
+}
+
+func Test{{.Name}}Builder_Empty(t *testing.T) {
+ mem := memory.NewCheckedAllocator(memory.NewGoAllocator())
+ defer mem.AssertSize(t, 0)
+
+{{if .Opt.Parametric -}}
+ dtype := &arrow.{{.Name}}Type{Unit: arrow.Second}
+ ab := array.New{{.Name}}Builder(mem, dtype)
+{{else}}
+ ab := array.New{{.Name}}Builder(mem)
+{{end -}}
+ defer ab.Release()
+
+ exp := []{{or .QualifiedType .Type}}{0, 1, 2, 3}
+
+ ab.AppendValues([]{{or .QualifiedType .Type}}{}, nil)
+ a := ab.New{{.Name}}Array()
+ assert.Zero(t, a.Len())
+ a.Release()
+
+ ab.AppendValues(nil, nil)
+ a = ab.New{{.Name}}Array()
+ assert.Zero(t, a.Len())
+ a.Release()
+
+ ab.AppendValues([]{{or .QualifiedType .Type}}{}, nil)
+ ab.AppendValues(exp, nil)
+ a = ab.New{{.Name}}Array()
+ assert.Equal(t, exp, a.{{.Name}}Values())
+ a.Release()
+
+ ab.AppendValues(exp, nil)
+ ab.AppendValues([]{{or .QualifiedType .Type}}{}, nil)
+ a = ab.New{{.Name}}Array()
+ assert.Equal(t, exp, a.{{.Name}}Values())
+ a.Release()
+}
+
+func Test{{.Name}}Builder_Resize(t *testing.T) {
+ mem := memory.NewCheckedAllocator(memory.NewGoAllocator())
+ defer mem.AssertSize(t, 0)
+
+{{if .Opt.Parametric -}}
+ dtype := &arrow.{{.Name}}Type{Unit: arrow.Second}
+ ab := array.New{{.Name}}Builder(mem, dtype)
+{{else}}
+ ab := array.New{{.Name}}Builder(mem)
+{{end -}}
+ defer ab.Release()
+
+ assert.Equal(t, 0, ab.Cap())
+ assert.Equal(t, 0, ab.Len())
+
+ ab.Reserve(63)
+ assert.Equal(t, 64, ab.Cap())
+ assert.Equal(t, 0, ab.Len())
+
+ for i := 0; i < 63; i++ {
+ ab.Append(0)
+ }
+ assert.Equal(t, 64, ab.Cap())
+ assert.Equal(t, 63, ab.Len())
+
+ ab.Resize(5)
+ assert.Equal(t, 5, ab.Len())
+
+ ab.Resize(32)
+ assert.Equal(t, 5, ab.Len())
+}
+{{end}}
+
+
diff --git a/vendor/github.com/apache/arrow/go/v14/arrow/array/record.go b/vendor/github.com/apache/arrow/go/v14/arrow/array/record.go
new file mode 100644
index 000000000..0b0fe4c38
--- /dev/null
+++ b/vendor/github.com/apache/arrow/go/v14/arrow/array/record.go
@@ -0,0 +1,411 @@
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements. See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership. The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License. You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package array
+
+import (
+ "bytes"
+ "fmt"
+ "strings"
+ "sync/atomic"
+
+ "github.com/apache/arrow/go/v14/arrow"
+ "github.com/apache/arrow/go/v14/arrow/internal/debug"
+ "github.com/apache/arrow/go/v14/arrow/memory"
+ "github.com/apache/arrow/go/v14/internal/json"
+)
+
+// RecordReader reads a stream of records.
+type RecordReader interface {
+ Retain()
+ Release()
+
+ Schema() *arrow.Schema
+
+ Next() bool
+ Record() arrow.Record
+ Err() error
+}
+
+// simpleRecords is a simple iterator over a collection of records.
+type simpleRecords struct {
+ refCount int64
+
+ schema *arrow.Schema
+ recs []arrow.Record
+ cur arrow.Record
+}
+
+// NewRecordReader returns a simple iterator over the given slice of records.
+func NewRecordReader(schema *arrow.Schema, recs []arrow.Record) (*simpleRecords, error) {
+ rs := &simpleRecords{
+ refCount: 1,
+ schema: schema,
+ recs: recs,
+ cur: nil,
+ }
+
+ for _, rec := range rs.recs {
+ rec.Retain()
+ }
+
+ for _, rec := range recs {
+ if !rec.Schema().Equal(rs.schema) {
+ rs.Release()
+ return nil, fmt.Errorf("arrow/array: mismatch schema")
+ }
+ }
+
+ return rs, nil
+}
+
+// Retain increases the reference count by 1.
+// Retain may be called simultaneously from multiple goroutines.
+func (rs *simpleRecords) Retain() {
+ atomic.AddInt64(&rs.refCount, 1)
+}
+
+// Release decreases the reference count by 1.
+// When the reference count goes to zero, the memory is freed.
+// Release may be called simultaneously from multiple goroutines.
+func (rs *simpleRecords) Release() {
+ debug.Assert(atomic.LoadInt64(&rs.refCount) > 0, "too many releases")
+
+ if atomic.AddInt64(&rs.refCount, -1) == 0 {
+ if rs.cur != nil {
+ rs.cur.Release()
+ }
+ for _, rec := range rs.recs {
+ rec.Release()
+ }
+ rs.recs = nil
+ }
+}
+
+func (rs *simpleRecords) Schema() *arrow.Schema { return rs.schema }
+func (rs *simpleRecords) Record() arrow.Record { return rs.cur }
+func (rs *simpleRecords) Next() bool {
+ if len(rs.recs) == 0 {
+ return false
+ }
+ if rs.cur != nil {
+ rs.cur.Release()
+ }
+ rs.cur = rs.recs[0]
+ rs.recs = rs.recs[1:]
+ return true
+}
+func (rs *simpleRecords) Err() error { return nil }
+
+// simpleRecord is a basic, non-lazy in-memory record batch.
+type simpleRecord struct {
+ refCount int64
+
+ schema *arrow.Schema
+
+ rows int64
+ arrs []arrow.Array
+}
+
+// NewRecord returns a basic, non-lazy in-memory record batch.
+//
+// NewRecord panics if the columns and schema are inconsistent.
+// NewRecord panics if rows is larger than the height of the columns.
+func NewRecord(schema *arrow.Schema, cols []arrow.Array, nrows int64) *simpleRecord {
+ rec := &simpleRecord{
+ refCount: 1,
+ schema: schema,
+ rows: nrows,
+ arrs: make([]arrow.Array, len(cols)),
+ }
+ copy(rec.arrs, cols)
+ for _, arr := range rec.arrs {
+ arr.Retain()
+ }
+
+ if rec.rows < 0 {
+ switch len(rec.arrs) {
+ case 0:
+ rec.rows = 0
+ default:
+ rec.rows = int64(rec.arrs[0].Len())
+ }
+ }
+
+ err := rec.validate()
+ if err != nil {
+ rec.Release()
+ panic(err)
+ }
+
+ return rec
+}
+
+func (rec *simpleRecord) SetColumn(i int, arr arrow.Array) (arrow.Record, error) {
+ if i < 0 || i >= len(rec.arrs) {
+ return nil, fmt.Errorf("arrow/array: column index out of range [0, %d): got=%d", len(rec.arrs), i)
+ }
+
+ if arr.Len() != int(rec.rows) {
+ return nil, fmt.Errorf("arrow/array: mismatch number of rows in column %q: got=%d, want=%d",
+ rec.schema.Field(i).Name,
+ arr.Len(), rec.rows,
+ )
+ }
+
+ f := rec.schema.Field(i)
+ if !arrow.TypeEqual(f.Type, arr.DataType()) {
+ return nil, fmt.Errorf("arrow/array: column %q type mismatch: got=%v, want=%v",
+ f.Name,
+ arr.DataType(), f.Type,
+ )
+ }
+ arrs := make([]arrow.Array, len(rec.arrs))
+ copy(arrs, rec.arrs)
+ arrs[i] = arr
+
+ return NewRecord(rec.schema, arrs, rec.rows), nil
+}
+
+func (rec *simpleRecord) validate() error {
+ if rec.rows == 0 && len(rec.arrs) == 0 {
+ return nil
+ }
+
+ if len(rec.arrs) != len(rec.schema.Fields()) {
+ return fmt.Errorf("arrow/array: number of columns/fields mismatch")
+ }
+
+ for i, arr := range rec.arrs {
+ f := rec.schema.Field(i)
+ if int64(arr.Len()) < rec.rows {
+ return fmt.Errorf("arrow/array: mismatch number of rows in column %q: got=%d, want=%d",
+ f.Name,
+ arr.Len(), rec.rows,
+ )
+ }
+ if !arrow.TypeEqual(f.Type, arr.DataType()) {
+ return fmt.Errorf("arrow/array: column %q type mismatch: got=%v, want=%v",
+ f.Name,
+ arr.DataType(), f.Type,
+ )
+ }
+ }
+ return nil
+}
+
+// Retain increases the reference count by 1.
+// Retain may be called simultaneously from multiple goroutines.
+func (rec *simpleRecord) Retain() {
+ atomic.AddInt64(&rec.refCount, 1)
+}
+
+// Release decreases the reference count by 1.
+// When the reference count goes to zero, the memory is freed.
+// Release may be called simultaneously from multiple goroutines.
+func (rec *simpleRecord) Release() {
+ debug.Assert(atomic.LoadInt64(&rec.refCount) > 0, "too many releases")
+
+ if atomic.AddInt64(&rec.refCount, -1) == 0 {
+ for _, arr := range rec.arrs {
+ arr.Release()
+ }
+ rec.arrs = nil
+ }
+}
+
+func (rec *simpleRecord) Schema() *arrow.Schema { return rec.schema }
+func (rec *simpleRecord) NumRows() int64 { return rec.rows }
+func (rec *simpleRecord) NumCols() int64 { return int64(len(rec.arrs)) }
+func (rec *simpleRecord) Columns() []arrow.Array { return rec.arrs }
+func (rec *simpleRecord) Column(i int) arrow.Array { return rec.arrs[i] }
+func (rec *simpleRecord) ColumnName(i int) string { return rec.schema.Field(i).Name }
+
+// NewSlice constructs a zero-copy slice of the record with the indicated
+// indices i and j, corresponding to array[i:j].
+// The returned record must be Release()'d after use.
+//
+// NewSlice panics if the slice is outside the valid range of the record array.
+// NewSlice panics if j < i.
+func (rec *simpleRecord) NewSlice(i, j int64) arrow.Record {
+ arrs := make([]arrow.Array, len(rec.arrs))
+ for ii, arr := range rec.arrs {
+ arrs[ii] = NewSlice(arr, i, j)
+ }
+ defer func() {
+ for _, arr := range arrs {
+ arr.Release()
+ }
+ }()
+ return NewRecord(rec.schema, arrs, j-i)
+}
+
+func (rec *simpleRecord) String() string {
+ o := new(strings.Builder)
+ fmt.Fprintf(o, "record:\n %v\n", rec.schema)
+ fmt.Fprintf(o, " rows: %d\n", rec.rows)
+ for i, col := range rec.arrs {
+ fmt.Fprintf(o, " col[%d][%s]: %v\n", i, rec.schema.Field(i).Name, col)
+ }
+
+ return o.String()
+}
+
+func (rec *simpleRecord) MarshalJSON() ([]byte, error) {
+ arr := RecordToStructArray(rec)
+ defer arr.Release()
+ return arr.MarshalJSON()
+}
+
+// RecordBuilder eases the process of building a Record, iteratively, from
+// a known Schema.
+type RecordBuilder struct {
+ refCount int64
+ mem memory.Allocator
+ schema *arrow.Schema
+ fields []Builder
+}
+
+// NewRecordBuilder returns a builder, using the provided memory allocator and a schema.
+func NewRecordBuilder(mem memory.Allocator, schema *arrow.Schema) *RecordBuilder {
+ b := &RecordBuilder{
+ refCount: 1,
+ mem: mem,
+ schema: schema,
+ fields: make([]Builder, len(schema.Fields())),
+ }
+
+ for i, f := range schema.Fields() {
+ b.fields[i] = NewBuilder(b.mem, f.Type)
+ }
+
+ return b
+}
+
+// Retain increases the reference count by 1.
+// Retain may be called simultaneously from multiple goroutines.
+func (b *RecordBuilder) Retain() {
+ atomic.AddInt64(&b.refCount, 1)
+}
+
+// Release decreases the reference count by 1.
+func (b *RecordBuilder) Release() {
+ debug.Assert(atomic.LoadInt64(&b.refCount) > 0, "too many releases")
+
+ if atomic.AddInt64(&b.refCount, -1) == 0 {
+ for _, f := range b.fields {
+ f.Release()
+ }
+ b.fields = nil
+ }
+}
+
+func (b *RecordBuilder) Schema() *arrow.Schema { return b.schema }
+func (b *RecordBuilder) Fields() []Builder { return b.fields }
+func (b *RecordBuilder) Field(i int) Builder { return b.fields[i] }
+
+func (b *RecordBuilder) Reserve(size int) {
+ for _, f := range b.fields {
+ f.Reserve(size)
+ }
+}
+
+// NewRecord creates a new record from the memory buffers and resets the
+// RecordBuilder so it can be used to build a new record.
+//
+// The returned Record must be Release()'d after use.
+//
+// NewRecord panics if the fields' builder do not have the same length.
+func (b *RecordBuilder) NewRecord() arrow.Record {
+ cols := make([]arrow.Array, len(b.fields))
+ rows := int64(0)
+
+ defer func(cols []arrow.Array) {
+ for _, col := range cols {
+ if col == nil {
+ continue
+ }
+ col.Release()
+ }
+ }(cols)
+
+ for i, f := range b.fields {
+ cols[i] = f.NewArray()
+ irow := int64(cols[i].Len())
+ if i > 0 && irow != rows {
+ panic(fmt.Errorf("arrow/array: field %d has %d rows. want=%d", i, irow, rows))
+ }
+ rows = irow
+ }
+
+ return NewRecord(b.schema, cols, rows)
+}
+
+// UnmarshalJSON for record builder will read in a single object and add the values
+// to each field in the recordbuilder, missing fields will get a null and unexpected
+// keys will be ignored. If reading in an array of records as a single batch, then use
+// a structbuilder and use RecordFromStruct.
+func (b *RecordBuilder) UnmarshalJSON(data []byte) error {
+ dec := json.NewDecoder(bytes.NewReader(data))
+ // should start with a '{'
+ t, err := dec.Token()
+ if err != nil {
+ return err
+ }
+
+ if delim, ok := t.(json.Delim); !ok || delim != '{' {
+ return fmt.Errorf("record should start with '{', not %s", t)
+ }
+
+ keylist := make(map[string]bool)
+ for dec.More() {
+ keyTok, err := dec.Token()
+ if err != nil {
+ return err
+ }
+
+ key := keyTok.(string)
+ if keylist[key] {
+ return fmt.Errorf("key %s shows up twice in row to be decoded", key)
+ }
+ keylist[key] = true
+
+ indices := b.schema.FieldIndices(key)
+ if len(indices) == 0 {
+ var extra interface{}
+ if err := dec.Decode(&extra); err != nil {
+ return err
+ }
+ continue
+ }
+
+ if err := b.fields[indices[0]].UnmarshalOne(dec); err != nil {
+ return err
+ }
+ }
+
+ for i, f := range b.schema.Fields() {
+ if !keylist[f.Name] {
+ b.fields[i].AppendNull()
+ }
+ }
+ return nil
+}
+
+var (
+ _ arrow.Record = (*simpleRecord)(nil)
+ _ RecordReader = (*simpleRecords)(nil)
+)
diff --git a/vendor/github.com/apache/arrow/go/v14/arrow/array/string.go b/vendor/github.com/apache/arrow/go/v14/arrow/array/string.go
new file mode 100644
index 000000000..86e27c970
--- /dev/null
+++ b/vendor/github.com/apache/arrow/go/v14/arrow/array/string.go
@@ -0,0 +1,521 @@
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements. See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership. The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License. You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package array
+
+import (
+ "bytes"
+ "fmt"
+ "reflect"
+ "strings"
+ "unsafe"
+
+ "github.com/apache/arrow/go/v14/arrow"
+ "github.com/apache/arrow/go/v14/arrow/memory"
+ "github.com/apache/arrow/go/v14/internal/json"
+)
+
+// String represents an immutable sequence of variable-length UTF-8 strings.
+type String struct {
+ array
+ offsets []int32
+ values string
+}
+
+// NewStringData constructs a new String array from data.
+func NewStringData(data arrow.ArrayData) *String {
+ a := &String{}
+ a.refCount = 1
+ a.setData(data.(*Data))
+ return a
+}
+
+// Reset resets the String with a different set of Data.
+func (a *String) Reset(data arrow.ArrayData) {
+ a.setData(data.(*Data))
+}
+
+// Value returns the slice at index i. This value should not be mutated.
+func (a *String) Value(i int) string {
+ i = i + a.array.data.offset
+ return a.values[a.offsets[i]:a.offsets[i+1]]
+}
+
+func (a *String) ValueStr(i int) string {
+ if a.IsNull(i) {
+ return NullValueStr
+ }
+ return a.Value(i)
+}
+
+// ValueOffset returns the offset of the value at index i.
+func (a *String) ValueOffset(i int) int {
+ if i < 0 || i > a.array.data.length {
+ panic("arrow/array: index out of range")
+ }
+ return int(a.offsets[i+a.array.data.offset])
+}
+
+func (a *String) ValueOffset64(i int) int64 {
+ return int64(a.ValueOffset(i))
+}
+
+func (a *String) ValueLen(i int) int {
+ if i < 0 || i >= a.array.data.length {
+ panic("arrow/array: index out of range")
+ }
+ beg := a.array.data.offset + i
+ return int(a.offsets[beg+1] - a.offsets[beg])
+}
+
+func (a *String) ValueOffsets() []int32 {
+ beg := a.array.data.offset
+ end := beg + a.array.data.length + 1
+ return a.offsets[beg:end]
+}
+
+func (a *String) ValueBytes() []byte {
+ beg := a.array.data.offset
+ end := beg + a.array.data.length
+ if a.array.data.buffers[2] != nil {
+ return a.array.data.buffers[2].Bytes()[a.offsets[beg]:a.offsets[end]]
+ }
+ return nil
+}
+
+func (a *String) String() string {
+ o := new(strings.Builder)
+ o.WriteString("[")
+ for i := 0; i < a.Len(); i++ {
+ if i > 0 {
+ o.WriteString(" ")
+ }
+ switch {
+ case a.IsNull(i):
+ o.WriteString(NullValueStr)
+ default:
+ fmt.Fprintf(o, "%q", a.Value(i))
+ }
+ }
+ o.WriteString("]")
+ return o.String()
+}
+
+func (a *String) setData(data *Data) {
+ if len(data.buffers) != 3 {
+ panic("arrow/array: len(data.buffers) != 3")
+ }
+
+ a.array.setData(data)
+
+ if vdata := data.buffers[2]; vdata != nil {
+ b := vdata.Bytes()
+ a.values = *(*string)(unsafe.Pointer(&b))
+ }
+
+ if offsets := data.buffers[1]; offsets != nil {
+ a.offsets = arrow.Int32Traits.CastFromBytes(offsets.Bytes())
+ }
+
+ if a.array.data.length < 1 {
+ return
+ }
+
+ expNumOffsets := a.array.data.offset + a.array.data.length + 1
+ if len(a.offsets) < expNumOffsets {
+ panic(fmt.Errorf("arrow/array: string offset buffer must have at least %d values", expNumOffsets))
+ }
+
+ if int(a.offsets[expNumOffsets-1]) > len(a.values) {
+ panic("arrow/array: string offsets out of bounds of data buffer")
+ }
+}
+
+func (a *String) GetOneForMarshal(i int) interface{} {
+ if a.IsValid(i) {
+ return a.Value(i)
+ }
+ return nil
+}
+
+func (a *String) MarshalJSON() ([]byte, error) {
+ vals := make([]interface{}, a.Len())
+ for i := 0; i < a.Len(); i++ {
+ if a.IsValid(i) {
+ vals[i] = a.Value(i)
+ } else {
+ vals[i] = nil
+ }
+ }
+ return json.Marshal(vals)
+}
+
+func arrayEqualString(left, right *String) bool {
+ for i := 0; i < left.Len(); i++ {
+ if left.IsNull(i) {
+ continue
+ }
+ if left.Value(i) != right.Value(i) {
+ return false
+ }
+ }
+ return true
+}
+
+// String represents an immutable sequence of variable-length UTF-8 strings.
+type LargeString struct {
+ array
+ offsets []int64
+ values string
+}
+
+// NewStringData constructs a new String array from data.
+func NewLargeStringData(data arrow.ArrayData) *LargeString {
+ a := &LargeString{}
+ a.refCount = 1
+ a.setData(data.(*Data))
+ return a
+}
+
+// Reset resets the String with a different set of Data.
+func (a *LargeString) Reset(data arrow.ArrayData) {
+ a.setData(data.(*Data))
+}
+
+// Value returns the slice at index i. This value should not be mutated.
+func (a *LargeString) Value(i int) string {
+ i = i + a.array.data.offset
+ return a.values[a.offsets[i]:a.offsets[i+1]]
+}
+
+func (a *LargeString) ValueStr(i int) string {
+ if a.IsNull(i) {
+ return NullValueStr
+ }
+ return a.Value(i)
+}
+
+// ValueOffset returns the offset of the value at index i.
+func (a *LargeString) ValueOffset(i int) int64 {
+ if i < 0 || i > a.array.data.length {
+ panic("arrow/array: index out of range")
+ }
+ return a.offsets[i+a.array.data.offset]
+}
+
+func (a *LargeString) ValueOffset64(i int) int64 {
+ return a.ValueOffset(i)
+}
+
+func (a *LargeString) ValueOffsets() []int64 {
+ beg := a.array.data.offset
+ end := beg + a.array.data.length + 1
+ return a.offsets[beg:end]
+}
+
+func (a *LargeString) ValueBytes() []byte {
+ beg := a.array.data.offset
+ end := beg + a.array.data.length
+ if a.array.data.buffers[2] != nil {
+ return a.array.data.buffers[2].Bytes()[a.offsets[beg]:a.offsets[end]]
+ }
+ return nil
+}
+
+func (a *LargeString) String() string {
+ o := new(strings.Builder)
+ o.WriteString("[")
+ for i := 0; i < a.Len(); i++ {
+ if i > 0 {
+ o.WriteString(" ")
+ }
+ switch {
+ case a.IsNull(i):
+ o.WriteString(NullValueStr)
+ default:
+ fmt.Fprintf(o, "%q", a.Value(i))
+ }
+ }
+ o.WriteString("]")
+ return o.String()
+}
+
+func (a *LargeString) setData(data *Data) {
+ if len(data.buffers) != 3 {
+ panic("arrow/array: len(data.buffers) != 3")
+ }
+
+ a.array.setData(data)
+
+ if vdata := data.buffers[2]; vdata != nil {
+ b := vdata.Bytes()
+ a.values = *(*string)(unsafe.Pointer(&b))
+ }
+
+ if offsets := data.buffers[1]; offsets != nil {
+ a.offsets = arrow.Int64Traits.CastFromBytes(offsets.Bytes())
+ }
+
+ if a.array.data.length < 1 {
+ return
+ }
+
+ expNumOffsets := a.array.data.offset + a.array.data.length + 1
+ if len(a.offsets) < expNumOffsets {
+ panic(fmt.Errorf("arrow/array: string offset buffer must have at least %d values", expNumOffsets))
+ }
+
+ if int(a.offsets[expNumOffsets-1]) > len(a.values) {
+ panic("arrow/array: string offsets out of bounds of data buffer")
+ }
+}
+
+func (a *LargeString) GetOneForMarshal(i int) interface{} {
+ if a.IsValid(i) {
+ return a.Value(i)
+ }
+ return nil
+}
+
+func (a *LargeString) MarshalJSON() ([]byte, error) {
+ vals := make([]interface{}, a.Len())
+ for i := 0; i < a.Len(); i++ {
+ vals[i] = a.GetOneForMarshal(i)
+ }
+ return json.Marshal(vals)
+}
+
+func arrayEqualLargeString(left, right *LargeString) bool {
+ for i := 0; i < left.Len(); i++ {
+ if left.IsNull(i) {
+ continue
+ }
+ if left.Value(i) != right.Value(i) {
+ return false
+ }
+ }
+ return true
+}
+
+// A StringBuilder is used to build a String array using the Append methods.
+type StringBuilder struct {
+ *BinaryBuilder
+}
+
+// NewStringBuilder creates a new StringBuilder.
+func NewStringBuilder(mem memory.Allocator) *StringBuilder {
+ b := &StringBuilder{
+ BinaryBuilder: NewBinaryBuilder(mem, arrow.BinaryTypes.String),
+ }
+ return b
+}
+
+func (b *StringBuilder) Type() arrow.DataType {
+ return arrow.BinaryTypes.String
+}
+
+// Append appends a string to the builder.
+func (b *StringBuilder) Append(v string) {
+ b.BinaryBuilder.Append([]byte(v))
+}
+
+// AppendValues will append the values in the v slice. The valid slice determines which values
+// in v are valid (not null). The valid slice must either be empty or be equal in length to v. If empty,
+// all values in v are appended and considered valid.
+func (b *StringBuilder) AppendValues(v []string, valid []bool) {
+ b.BinaryBuilder.AppendStringValues(v, valid)
+}
+
+// Value returns the string at index i.
+func (b *StringBuilder) Value(i int) string {
+ return string(b.BinaryBuilder.Value(i))
+}
+
+// func (b *StringBuilder) UnsafeAppend(v string) {
+// b.BinaryBuilder.UnsafeAppend([]byte(v))
+// }
+
+// NewArray creates a String array from the memory buffers used by the builder and resets the StringBuilder
+// so it can be used to build a new array.
+func (b *StringBuilder) NewArray() arrow.Array {
+ return b.NewStringArray()
+}
+
+// NewStringArray creates a String array from the memory buffers used by the builder and resets the StringBuilder
+// so it can be used to build a new array.
+func (b *StringBuilder) NewStringArray() (a *String) {
+ data := b.newData()
+ a = NewStringData(data)
+ data.Release()
+ return
+}
+
+func (b *StringBuilder) UnmarshalOne(dec *json.Decoder) error {
+ t, err := dec.Token()
+ if err != nil {
+ return err
+ }
+
+ switch v := t.(type) {
+ case nil:
+ b.AppendNull()
+ case string:
+ b.Append(v)
+ default:
+ return &json.UnmarshalTypeError{
+ Value: fmt.Sprint(v),
+ Type: reflect.TypeOf(string("")),
+ Offset: dec.InputOffset(),
+ }
+ }
+ return nil
+}
+
+func (b *StringBuilder) Unmarshal(dec *json.Decoder) error {
+ for dec.More() {
+ if err := b.UnmarshalOne(dec); err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+func (b *StringBuilder) UnmarshalJSON(data []byte) error {
+ dec := json.NewDecoder(bytes.NewReader(data))
+ t, err := dec.Token()
+ if err != nil {
+ return err
+ }
+
+ if delim, ok := t.(json.Delim); !ok || delim != '[' {
+ return fmt.Errorf("string builder must unpack from json array, found %s", delim)
+ }
+
+ return b.Unmarshal(dec)
+}
+
+// A LargeStringBuilder is used to build a LargeString array using the Append methods.
+// LargeString is for when you need the offset buffer to be 64-bit integers
+// instead of 32-bit integers.
+type LargeStringBuilder struct {
+ *BinaryBuilder
+}
+
+// NewStringBuilder creates a new StringBuilder.
+func NewLargeStringBuilder(mem memory.Allocator) *LargeStringBuilder {
+ b := &LargeStringBuilder{
+ BinaryBuilder: NewBinaryBuilder(mem, arrow.BinaryTypes.LargeString),
+ }
+ return b
+}
+
+func (b *LargeStringBuilder) Type() arrow.DataType { return arrow.BinaryTypes.LargeString }
+
+// Append appends a string to the builder.
+func (b *LargeStringBuilder) Append(v string) {
+ b.BinaryBuilder.Append([]byte(v))
+}
+
+// AppendValues will append the values in the v slice. The valid slice determines which values
+// in v are valid (not null). The valid slice must either be empty or be equal in length to v. If empty,
+// all values in v are appended and considered valid.
+func (b *LargeStringBuilder) AppendValues(v []string, valid []bool) {
+ b.BinaryBuilder.AppendStringValues(v, valid)
+}
+
+// Value returns the string at index i.
+func (b *LargeStringBuilder) Value(i int) string {
+ return string(b.BinaryBuilder.Value(i))
+}
+
+// func (b *LargeStringBuilder) UnsafeAppend(v string) {
+// b.BinaryBuilder.UnsafeAppend([]byte(v))
+// }
+
+// NewArray creates a String array from the memory buffers used by the builder and resets the StringBuilder
+// so it can be used to build a new array.
+func (b *LargeStringBuilder) NewArray() arrow.Array {
+ return b.NewLargeStringArray()
+}
+
+// NewStringArray creates a String array from the memory buffers used by the builder and resets the StringBuilder
+// so it can be used to build a new array.
+func (b *LargeStringBuilder) NewLargeStringArray() (a *LargeString) {
+ data := b.newData()
+ a = NewLargeStringData(data)
+ data.Release()
+ return
+}
+
+func (b *LargeStringBuilder) UnmarshalOne(dec *json.Decoder) error {
+ t, err := dec.Token()
+ if err != nil {
+ return err
+ }
+
+ switch v := t.(type) {
+ case nil:
+ b.AppendNull()
+ case string:
+ b.Append(v)
+ default:
+ return &json.UnmarshalTypeError{
+ Value: fmt.Sprint(v),
+ Type: reflect.TypeOf(string("")),
+ Offset: dec.InputOffset(),
+ }
+ }
+ return nil
+}
+
+func (b *LargeStringBuilder) Unmarshal(dec *json.Decoder) error {
+ for dec.More() {
+ if err := b.UnmarshalOne(dec); err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+func (b *LargeStringBuilder) UnmarshalJSON(data []byte) error {
+ dec := json.NewDecoder(bytes.NewReader(data))
+ t, err := dec.Token()
+ if err != nil {
+ return err
+ }
+
+ if delim, ok := t.(json.Delim); !ok || delim != '[' {
+ return fmt.Errorf("string builder must unpack from json array, found %s", delim)
+ }
+
+ return b.Unmarshal(dec)
+}
+
+type StringLikeBuilder interface {
+ Builder
+ Append(string)
+ UnsafeAppend([]byte)
+ ReserveData(int)
+}
+
+var (
+ _ arrow.Array = (*String)(nil)
+ _ arrow.Array = (*LargeString)(nil)
+ _ Builder = (*StringBuilder)(nil)
+ _ Builder = (*LargeStringBuilder)(nil)
+ _ StringLikeBuilder = (*StringBuilder)(nil)
+ _ StringLikeBuilder = (*LargeStringBuilder)(nil)
+)
diff --git a/vendor/github.com/apache/arrow/go/v14/arrow/array/struct.go b/vendor/github.com/apache/arrow/go/v14/arrow/array/struct.go
new file mode 100644
index 000000000..248a25bf6
--- /dev/null
+++ b/vendor/github.com/apache/arrow/go/v14/arrow/array/struct.go
@@ -0,0 +1,491 @@
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements. See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership. The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License. You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package array
+
+import (
+ "bytes"
+ "errors"
+ "fmt"
+ "strings"
+ "sync/atomic"
+
+ "github.com/apache/arrow/go/v14/arrow"
+ "github.com/apache/arrow/go/v14/arrow/bitutil"
+ "github.com/apache/arrow/go/v14/arrow/internal/debug"
+ "github.com/apache/arrow/go/v14/arrow/memory"
+ "github.com/apache/arrow/go/v14/internal/json"
+)
+
+// Struct represents an ordered sequence of relative types.
+type Struct struct {
+ array
+ fields []arrow.Array
+}
+
+// NewStructArray constructs a new Struct Array out of the columns passed
+// in and the field names. The length of all cols must be the same and
+// there should be the same number of columns as names.
+func NewStructArray(cols []arrow.Array, names []string) (*Struct, error) {
+ return NewStructArrayWithNulls(cols, names, nil, 0, 0)
+}
+
+// NewStructArrayWithNulls is like NewStructArray as a convenience function,
+// but also takes in a null bitmap, the number of nulls, and an optional offset
+// to use for creating the Struct Array.
+func NewStructArrayWithNulls(cols []arrow.Array, names []string, nullBitmap *memory.Buffer, nullCount int, offset int) (*Struct, error) {
+ if len(cols) != len(names) {
+ return nil, fmt.Errorf("%w: mismatching number of fields and child arrays", arrow.ErrInvalid)
+ }
+ if len(cols) == 0 {
+ return nil, fmt.Errorf("%w: can't infer struct array length with 0 child arrays", arrow.ErrInvalid)
+ }
+ length := cols[0].Len()
+ children := make([]arrow.ArrayData, len(cols))
+ fields := make([]arrow.Field, len(cols))
+ for i, c := range cols {
+ if length != c.Len() {
+ return nil, fmt.Errorf("%w: mismatching child array lengths", arrow.ErrInvalid)
+ }
+ children[i] = c.Data()
+ fields[i].Name = names[i]
+ fields[i].Type = c.DataType()
+ fields[i].Nullable = true
+ }
+ data := NewData(arrow.StructOf(fields...), length, []*memory.Buffer{nullBitmap}, children, nullCount, offset)
+ defer data.Release()
+ return NewStructData(data), nil
+}
+
+// NewStructData returns a new Struct array value from data.
+func NewStructData(data arrow.ArrayData) *Struct {
+ a := &Struct{}
+ a.refCount = 1
+ a.setData(data.(*Data))
+ return a
+}
+
+func (a *Struct) NumField() int { return len(a.fields) }
+func (a *Struct) Field(i int) arrow.Array { return a.fields[i] }
+
+// ValueStr returns the string representation (as json) of the value at index i.
+func (a *Struct) ValueStr(i int) string {
+ if a.IsNull(i) {
+ return NullValueStr
+ }
+
+ data, err := json.Marshal(a.GetOneForMarshal(i))
+ if err != nil {
+ panic(err)
+ }
+ return string(data)
+}
+
+func (a *Struct) String() string {
+ o := new(strings.Builder)
+ o.WriteString("{")
+
+ structBitmap := a.NullBitmapBytes()
+ for i, v := range a.fields {
+ if i > 0 {
+ o.WriteString(" ")
+ }
+ if arrow.IsUnion(v.DataType().ID()) {
+ fmt.Fprintf(o, "%v", v)
+ continue
+ } else if !bytes.Equal(structBitmap, v.NullBitmapBytes()) {
+ masked := a.newStructFieldWithParentValidityMask(i)
+ fmt.Fprintf(o, "%v", masked)
+ masked.Release()
+ continue
+ }
+ fmt.Fprintf(o, "%v", v)
+ }
+ o.WriteString("}")
+ return o.String()
+}
+
+// newStructFieldWithParentValidityMask returns the Interface at fieldIndex
+// with a nullBitmapBytes adjusted according on the parent struct nullBitmapBytes.
+// From the docs:
+//
+// "When reading the struct array the parent validity bitmap takes priority."
+func (a *Struct) newStructFieldWithParentValidityMask(fieldIndex int) arrow.Array {
+ field := a.Field(fieldIndex)
+ nullBitmapBytes := field.NullBitmapBytes()
+ maskedNullBitmapBytes := make([]byte, len(nullBitmapBytes))
+ copy(maskedNullBitmapBytes, nullBitmapBytes)
+ for i := 0; i < field.Len(); i++ {
+ if a.IsNull(i) {
+ bitutil.ClearBit(maskedNullBitmapBytes, i)
+ }
+ }
+ data := NewSliceData(field.Data(), 0, int64(field.Len())).(*Data)
+ defer data.Release()
+ bufs := make([]*memory.Buffer, len(data.Buffers()))
+ copy(bufs, data.buffers)
+ bufs[0].Release()
+ bufs[0] = memory.NewBufferBytes(maskedNullBitmapBytes)
+ data.buffers = bufs
+ maskedField := MakeFromData(data)
+ return maskedField
+}
+
+func (a *Struct) setData(data *Data) {
+ a.array.setData(data)
+ a.fields = make([]arrow.Array, len(data.childData))
+ for i, child := range data.childData {
+ if data.offset != 0 || child.Len() != data.length {
+ sub := NewSliceData(child, int64(data.offset), int64(data.offset+data.length))
+ a.fields[i] = MakeFromData(sub)
+ sub.Release()
+ } else {
+ a.fields[i] = MakeFromData(child)
+ }
+ }
+}
+
+func (a *Struct) GetOneForMarshal(i int) interface{} {
+ if a.IsNull(i) {
+ return nil
+ }
+
+ tmp := make(map[string]interface{})
+ fieldList := a.data.dtype.(*arrow.StructType).Fields()
+ for j, d := range a.fields {
+ tmp[fieldList[j].Name] = d.GetOneForMarshal(i)
+ }
+ return tmp
+}
+
+func (a *Struct) MarshalJSON() ([]byte, error) {
+ var buf bytes.Buffer
+ enc := json.NewEncoder(&buf)
+
+ buf.WriteByte('[')
+ for i := 0; i < a.Len(); i++ {
+ if i != 0 {
+ buf.WriteByte(',')
+ }
+ if err := enc.Encode(a.GetOneForMarshal(i)); err != nil {
+ return nil, err
+ }
+ }
+ buf.WriteByte(']')
+ return buf.Bytes(), nil
+}
+
+func arrayEqualStruct(left, right *Struct) bool {
+ for i, lf := range left.fields {
+ rf := right.fields[i]
+ if !Equal(lf, rf) {
+ return false
+ }
+ }
+ return true
+}
+
+func (a *Struct) Retain() {
+ a.array.Retain()
+ for _, f := range a.fields {
+ f.Retain()
+ }
+}
+
+func (a *Struct) Release() {
+ a.array.Release()
+ for _, f := range a.fields {
+ f.Release()
+ }
+}
+
+type StructBuilder struct {
+ builder
+
+ dtype arrow.DataType
+ fields []Builder
+}
+
+// NewStructBuilder returns a builder, using the provided memory allocator.
+func NewStructBuilder(mem memory.Allocator, dtype *arrow.StructType) *StructBuilder {
+ b := &StructBuilder{
+ builder: builder{refCount: 1, mem: mem},
+ dtype: dtype,
+ fields: make([]Builder, len(dtype.Fields())),
+ }
+ for i, f := range dtype.Fields() {
+ b.fields[i] = NewBuilder(b.mem, f.Type)
+ }
+ return b
+}
+
+func (b *StructBuilder) Type() arrow.DataType {
+ fields := make([]arrow.Field, len(b.fields))
+ copy(fields, b.dtype.(*arrow.StructType).Fields())
+ for i, b := range b.fields {
+ fields[i].Type = b.Type()
+ }
+ return arrow.StructOf(fields...)
+}
+
+// Release decreases the reference count by 1.
+// When the reference count goes to zero, the memory is freed.
+func (b *StructBuilder) Release() {
+ debug.Assert(atomic.LoadInt64(&b.refCount) > 0, "too many releases")
+
+ if atomic.AddInt64(&b.refCount, -1) == 0 {
+ if b.nullBitmap != nil {
+ b.nullBitmap.Release()
+ b.nullBitmap = nil
+ }
+
+ for _, f := range b.fields {
+ f.Release()
+ }
+ }
+}
+
+func (b *StructBuilder) Append(v bool) {
+ // Intentionally not calling `Reserve` as it will recursively call
+ // `Reserve` on the child builders, which during profiling has shown to be
+ // very expensive due to iterating over children, dynamic dispatch and all
+ // other code that gets executed even if previously `Reserve` was called to
+ // preallocate. Not calling `Reserve` has no downsides as when appending to
+ // the underlying children they already ensure they have enough space
+ // reserved. The only thing we must do is ensure we have enough space in
+ // the validity bitmap of the struct builder itself.
+ b.builder.reserve(1, b.resizeHelper)
+ b.unsafeAppendBoolToBitmap(v)
+ if !v {
+ for _, f := range b.fields {
+ f.AppendNull()
+ }
+ }
+}
+
+func (b *StructBuilder) AppendValues(valids []bool) {
+ b.Reserve(len(valids))
+ b.builder.unsafeAppendBoolsToBitmap(valids, len(valids))
+}
+
+func (b *StructBuilder) AppendNull() { b.Append(false) }
+
+func (b *StructBuilder) AppendNulls(n int) {
+ for i := 0; i < n; i++ {
+ b.AppendNull()
+ }
+}
+
+func (b *StructBuilder) AppendEmptyValue() {
+ b.Append(true)
+ for _, f := range b.fields {
+ f.AppendEmptyValue()
+ }
+}
+
+func (b *StructBuilder) AppendEmptyValues(n int) {
+ for i := 0; i < n; i++ {
+ b.AppendEmptyValue()
+ }
+}
+
+func (b *StructBuilder) unsafeAppendBoolToBitmap(isValid bool) {
+ if isValid {
+ bitutil.SetBit(b.nullBitmap.Bytes(), b.length)
+ } else {
+ b.nulls++
+ }
+ b.length++
+}
+
+func (b *StructBuilder) init(capacity int) {
+ b.builder.init(capacity)
+}
+
+// Reserve ensures there is enough space for appending n elements
+// by checking the capacity and calling Resize if necessary.
+func (b *StructBuilder) Reserve(n int) {
+ b.builder.reserve(n, b.resizeHelper)
+ for _, f := range b.fields {
+ f.Reserve(n)
+ }
+}
+
+// Resize adjusts the space allocated by b to n elements. If n is greater than b.Cap(),
+// additional memory will be allocated. If n is smaller, the allocated memory may reduced.
+func (b *StructBuilder) Resize(n int) {
+ b.resizeHelper(n)
+ for _, f := range b.fields {
+ f.Resize(n)
+ }
+}
+
+func (b *StructBuilder) resizeHelper(n int) {
+ if n < minBuilderCapacity {
+ n = minBuilderCapacity
+ }
+
+ if b.capacity == 0 {
+ b.init(n)
+ } else {
+ b.builder.resize(n, b.builder.init)
+ }
+}
+
+func (b *StructBuilder) NumField() int { return len(b.fields) }
+func (b *StructBuilder) FieldBuilder(i int) Builder { return b.fields[i] }
+
+// NewArray creates a Struct array from the memory buffers used by the builder and resets the StructBuilder
+// so it can be used to build a new array.
+func (b *StructBuilder) NewArray() arrow.Array {
+ return b.NewStructArray()
+}
+
+// NewStructArray creates a Struct array from the memory buffers used by the builder and resets the StructBuilder
+// so it can be used to build a new array.
+func (b *StructBuilder) NewStructArray() (a *Struct) {
+ data := b.newData()
+ a = NewStructData(data)
+ data.Release()
+ return
+}
+
+func (b *StructBuilder) newData() (data *Data) {
+ fields := make([]arrow.ArrayData, len(b.fields))
+ for i, f := range b.fields {
+ arr := f.NewArray()
+ defer arr.Release()
+ fields[i] = arr.Data()
+ }
+
+ data = NewData(
+ b.Type(), b.length,
+ []*memory.Buffer{
+ b.nullBitmap,
+ },
+ fields,
+ b.nulls,
+ 0,
+ )
+ b.reset()
+
+ return
+}
+
+func (b *StructBuilder) AppendValueFromString(s string) error {
+ if s == NullValueStr {
+ b.AppendNull()
+ return nil
+ }
+
+ if !strings.HasPrefix(s, "{") && !strings.HasSuffix(s, "}") {
+ return fmt.Errorf("%w: invalid string for struct should be be of form: {*}", arrow.ErrInvalid)
+ }
+ dec := json.NewDecoder(strings.NewReader(s))
+ return b.UnmarshalOne(dec)
+}
+
+func (b *StructBuilder) UnmarshalOne(dec *json.Decoder) error {
+ t, err := dec.Token()
+ if err != nil {
+ return err
+ }
+
+ switch t {
+ case json.Delim('{'):
+ b.Append(true)
+ keylist := make(map[string]bool)
+ for dec.More() {
+ keyTok, err := dec.Token()
+ if err != nil {
+ return err
+ }
+
+ key, ok := keyTok.(string)
+ if !ok {
+ return errors.New("missing key")
+ }
+
+ if keylist[key] {
+ return fmt.Errorf("key %s is specified twice", key)
+ }
+
+ keylist[key] = true
+
+ idx, ok := b.dtype.(*arrow.StructType).FieldIdx(key)
+ if !ok {
+ var extra interface{}
+ dec.Decode(&extra)
+ continue
+ }
+
+ if err := b.fields[idx].UnmarshalOne(dec); err != nil {
+ return err
+ }
+ }
+
+ // Append null values to all optional fields that were not presented in the json input
+ for _, field := range b.dtype.(*arrow.StructType).Fields() {
+ if !field.Nullable {
+ continue
+ }
+ idx, _ := b.dtype.(*arrow.StructType).FieldIdx(field.Name)
+ if _, hasKey := keylist[field.Name]; !hasKey {
+ b.fields[idx].AppendNull()
+ }
+ }
+
+ // consume '}'
+ _, err := dec.Token()
+ return err
+ case nil:
+ b.AppendNull()
+ default:
+ return &json.UnmarshalTypeError{
+ Offset: dec.InputOffset(),
+ Struct: fmt.Sprint(b.dtype),
+ }
+ }
+ return nil
+}
+
+func (b *StructBuilder) Unmarshal(dec *json.Decoder) error {
+ for dec.More() {
+ if err := b.UnmarshalOne(dec); err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+func (b *StructBuilder) UnmarshalJSON(data []byte) error {
+ dec := json.NewDecoder(bytes.NewReader(data))
+ t, err := dec.Token()
+ if err != nil {
+ return err
+ }
+
+ if delim, ok := t.(json.Delim); !ok || delim != '[' {
+ return fmt.Errorf("struct builder must unpack from json array, found %s", delim)
+ }
+
+ return b.Unmarshal(dec)
+}
+
+var (
+ _ arrow.Array = (*Struct)(nil)
+ _ Builder = (*StructBuilder)(nil)
+)
diff --git a/vendor/github.com/apache/arrow/go/v14/arrow/array/table.go b/vendor/github.com/apache/arrow/go/v14/arrow/array/table.go
new file mode 100644
index 000000000..6456992e3
--- /dev/null
+++ b/vendor/github.com/apache/arrow/go/v14/arrow/array/table.go
@@ -0,0 +1,421 @@
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements. See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership. The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License. You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package array
+
+import (
+ "errors"
+ "fmt"
+ "math"
+ "strings"
+ "sync/atomic"
+
+ "github.com/apache/arrow/go/v14/arrow"
+ "github.com/apache/arrow/go/v14/arrow/internal/debug"
+)
+
+// NewColumnSlice returns a new zero-copy slice of the column with the indicated
+// indices i and j, corresponding to the column's array[i:j].
+// The returned column must be Release()'d after use.
+//
+// NewColSlice panics if the slice is outside the valid range of the column's array.
+// NewColSlice panics if j < i.
+func NewColumnSlice(col *arrow.Column, i, j int64) *arrow.Column {
+ slice := NewChunkedSlice(col.Data(), i, j)
+ defer slice.Release()
+ return arrow.NewColumn(col.Field(), slice)
+}
+
+// NewChunkedSlice constructs a zero-copy slice of the chunked array with the indicated
+// indices i and j, corresponding to array[i:j].
+// The returned chunked array must be Release()'d after use.
+//
+// NewSlice panics if the slice is outside the valid range of the input array.
+// NewSlice panics if j < i.
+func NewChunkedSlice(a *arrow.Chunked, i, j int64) *arrow.Chunked {
+ if j > int64(a.Len()) || i > j || i > int64(a.Len()) {
+ panic("arrow/array: index out of range")
+ }
+
+ var (
+ cur = 0
+ beg = i
+ sz = j - i
+ chunks = make([]arrow.Array, 0, len(a.Chunks()))
+ )
+
+ for cur < len(a.Chunks()) && beg >= int64(a.Chunks()[cur].Len()) {
+ beg -= int64(a.Chunks()[cur].Len())
+ cur++
+ }
+
+ for cur < len(a.Chunks()) && sz > 0 {
+ arr := a.Chunks()[cur]
+ end := beg + sz
+ if end > int64(arr.Len()) {
+ end = int64(arr.Len())
+ }
+ chunks = append(chunks, NewSlice(arr, beg, end))
+ sz -= int64(arr.Len()) - beg
+ beg = 0
+ cur++
+ }
+ chunks = chunks[:len(chunks):len(chunks)]
+ defer func() {
+ for _, chunk := range chunks {
+ chunk.Release()
+ }
+ }()
+
+ return arrow.NewChunked(a.DataType(), chunks)
+}
+
+// simpleTable is a basic, non-lazy in-memory table.
+type simpleTable struct {
+ refCount int64
+
+ rows int64
+ cols []arrow.Column
+
+ schema *arrow.Schema
+}
+
+// NewTable returns a new basic, non-lazy in-memory table.
+// If rows is negative, the number of rows will be inferred from the height
+// of the columns.
+//
+// NewTable panics if the columns and schema are inconsistent.
+// NewTable panics if rows is larger than the height of the columns.
+func NewTable(schema *arrow.Schema, cols []arrow.Column, rows int64) *simpleTable {
+ tbl := simpleTable{
+ refCount: 1,
+ rows: rows,
+ cols: cols,
+ schema: schema,
+ }
+
+ if tbl.rows < 0 {
+ switch len(tbl.cols) {
+ case 0:
+ tbl.rows = 0
+ default:
+ tbl.rows = int64(tbl.cols[0].Len())
+ }
+ }
+
+ // validate the table and its constituents.
+ // note we retain the columns after having validated the table
+ // in case the validation fails and panics (and would otherwise leak
+ // a ref-count on the columns.)
+ tbl.validate()
+
+ for i := range tbl.cols {
+ tbl.cols[i].Retain()
+ }
+
+ return &tbl
+}
+
+// NewTableFromSlice is a convenience function to create a table from a slice
+// of slices of arrow.Array.
+//
+// Like other NewTable functions this can panic if:
+// - len(schema.Fields) != len(data)
+// - the total length of each column's array slice (ie: number of rows
+// in the column) aren't the same for all columns.
+func NewTableFromSlice(schema *arrow.Schema, data [][]arrow.Array) *simpleTable {
+ if len(data) != len(schema.Fields()) {
+ panic("array/table: mismatch in number of columns and data for creating a table")
+ }
+
+ cols := make([]arrow.Column, len(schema.Fields()))
+ for i, arrs := range data {
+ field := schema.Field(i)
+ chunked := arrow.NewChunked(field.Type, arrs)
+ cols[i] = *arrow.NewColumn(field, chunked)
+ chunked.Release()
+ }
+
+ tbl := simpleTable{
+ refCount: 1,
+ schema: schema,
+ cols: cols,
+ rows: int64(cols[0].Len()),
+ }
+
+ defer func() {
+ if r := recover(); r != nil {
+ // if validate panics, let's release the columns
+ // so that we don't leak them, then propagate the panic
+ for _, c := range cols {
+ c.Release()
+ }
+ panic(r)
+ }
+ }()
+ // validate the table and its constituents.
+ tbl.validate()
+
+ return &tbl
+}
+
+// NewTableFromRecords returns a new basic, non-lazy in-memory table.
+//
+// NewTableFromRecords panics if the records and schema are inconsistent.
+func NewTableFromRecords(schema *arrow.Schema, recs []arrow.Record) *simpleTable {
+ arrs := make([]arrow.Array, len(recs))
+ cols := make([]arrow.Column, len(schema.Fields()))
+
+ defer func(cols []arrow.Column) {
+ for i := range cols {
+ cols[i].Release()
+ }
+ }(cols)
+
+ for i := range cols {
+ field := schema.Field(i)
+ for j, rec := range recs {
+ arrs[j] = rec.Column(i)
+ }
+ chunk := arrow.NewChunked(field.Type, arrs)
+ cols[i] = *arrow.NewColumn(field, chunk)
+ chunk.Release()
+ }
+
+ return NewTable(schema, cols, -1)
+}
+
+func (tbl *simpleTable) Schema() *arrow.Schema { return tbl.schema }
+
+func (tbl *simpleTable) AddColumn(i int, field arrow.Field, column arrow.Column) (arrow.Table, error) {
+ if int64(column.Len()) != tbl.rows {
+ return nil, fmt.Errorf("arrow/array: column length mismatch: %d != %d", column.Len(), tbl.rows)
+ }
+ if field.Type != column.DataType() {
+ return nil, fmt.Errorf("arrow/array: column type mismatch: %v != %v", field.Type, column.DataType())
+ }
+ newSchema, err := tbl.schema.AddField(i, field)
+ if err != nil {
+ return nil, err
+ }
+ cols := make([]arrow.Column, len(tbl.cols)+1)
+ copy(cols[:i], tbl.cols[:i])
+ cols[i] = column
+ copy(cols[i+1:], tbl.cols[i:])
+ newTable := NewTable(newSchema, cols, tbl.rows)
+ return newTable, nil
+}
+
+func (tbl *simpleTable) NumRows() int64 { return tbl.rows }
+func (tbl *simpleTable) NumCols() int64 { return int64(len(tbl.cols)) }
+func (tbl *simpleTable) Column(i int) *arrow.Column { return &tbl.cols[i] }
+
+func (tbl *simpleTable) validate() {
+ if len(tbl.cols) != len(tbl.schema.Fields()) {
+ panic(errors.New("arrow/array: table schema mismatch"))
+ }
+ for i, col := range tbl.cols {
+ if !col.Field().Equal(tbl.schema.Field(i)) {
+ panic(fmt.Errorf("arrow/array: column field %q is inconsistent with schema", col.Name()))
+ }
+
+ if int64(col.Len()) < tbl.rows {
+ panic(fmt.Errorf("arrow/array: column %q expected length >= %d but got length %d", col.Name(), tbl.rows, col.Len()))
+ }
+ }
+}
+
+// Retain increases the reference count by 1.
+// Retain may be called simultaneously from multiple goroutines.
+func (tbl *simpleTable) Retain() {
+ atomic.AddInt64(&tbl.refCount, 1)
+}
+
+// Release decreases the reference count by 1.
+// When the reference count goes to zero, the memory is freed.
+// Release may be called simultaneously from multiple goroutines.
+func (tbl *simpleTable) Release() {
+ debug.Assert(atomic.LoadInt64(&tbl.refCount) > 0, "too many releases")
+
+ if atomic.AddInt64(&tbl.refCount, -1) == 0 {
+ for i := range tbl.cols {
+ tbl.cols[i].Release()
+ }
+ tbl.cols = nil
+ }
+}
+
+func (tbl *simpleTable) String() string {
+ o := new(strings.Builder)
+ o.WriteString(tbl.Schema().String())
+ o.WriteString("\n")
+
+ for i := 0; i < int(tbl.NumCols()); i++ {
+ col := tbl.Column(i)
+ o.WriteString(col.Field().Name + ": [")
+ for j, chunk := range col.Data().Chunks() {
+ if j != 0 {
+ o.WriteString(", ")
+ }
+ o.WriteString(chunk.String())
+ }
+ o.WriteString("]\n")
+ }
+ return o.String()
+}
+
+// TableReader is a Record iterator over a (possibly chunked) Table
+type TableReader struct {
+ refCount int64
+
+ tbl arrow.Table
+ cur int64 // current row
+ max int64 // total number of rows
+ rec arrow.Record // current Record
+ chksz int64 // chunk size
+
+ chunks []*arrow.Chunked
+ slots []int // chunk indices
+ offsets []int64 // chunk offsets
+}
+
+// NewTableReader returns a new TableReader to iterate over the (possibly chunked) Table.
+// if chunkSize is <= 0, the biggest possible chunk will be selected.
+func NewTableReader(tbl arrow.Table, chunkSize int64) *TableReader {
+ ncols := tbl.NumCols()
+ tr := &TableReader{
+ refCount: 1,
+ tbl: tbl,
+ cur: 0,
+ max: int64(tbl.NumRows()),
+ chksz: chunkSize,
+ chunks: make([]*arrow.Chunked, ncols),
+ slots: make([]int, ncols),
+ offsets: make([]int64, ncols),
+ }
+ tr.tbl.Retain()
+
+ if tr.chksz <= 0 {
+ tr.chksz = math.MaxInt64
+ }
+
+ for i := range tr.chunks {
+ col := tr.tbl.Column(i)
+ tr.chunks[i] = col.Data()
+ tr.chunks[i].Retain()
+ }
+ return tr
+}
+
+func (tr *TableReader) Schema() *arrow.Schema { return tr.tbl.Schema() }
+func (tr *TableReader) Record() arrow.Record { return tr.rec }
+
+func (tr *TableReader) Next() bool {
+ if tr.cur >= tr.max {
+ return false
+ }
+
+ if tr.rec != nil {
+ tr.rec.Release()
+ }
+
+ // determine the minimum contiguous slice across all columns
+ chunksz := imin64(tr.max, tr.chksz)
+ chunks := make([]arrow.Array, len(tr.chunks))
+ for i := range chunks {
+ j := tr.slots[i]
+ chunk := tr.chunks[i].Chunk(j)
+ remain := int64(chunk.Len()) - tr.offsets[i]
+ if remain < chunksz {
+ chunksz = remain
+ }
+
+ chunks[i] = chunk
+ }
+
+ // slice the chunks, advance each chunk slot as appropriate.
+ batch := make([]arrow.Array, len(tr.chunks))
+ for i, chunk := range chunks {
+ var slice arrow.Array
+ offset := tr.offsets[i]
+ switch int64(chunk.Len()) - offset {
+ case chunksz:
+ tr.slots[i]++
+ tr.offsets[i] = 0
+ if offset > 0 {
+ // need to slice
+ slice = NewSlice(chunk, offset, offset+chunksz)
+ } else {
+ // no need to slice
+ slice = chunk
+ slice.Retain()
+ }
+ default:
+ tr.offsets[i] += chunksz
+ slice = NewSlice(chunk, offset, offset+chunksz)
+ }
+ batch[i] = slice
+ }
+
+ tr.cur += chunksz
+ tr.rec = NewRecord(tr.tbl.Schema(), batch, chunksz)
+
+ for _, arr := range batch {
+ arr.Release()
+ }
+
+ return true
+}
+
+// Retain increases the reference count by 1.
+// Retain may be called simultaneously from multiple goroutines.
+func (tr *TableReader) Retain() {
+ atomic.AddInt64(&tr.refCount, 1)
+}
+
+// Release decreases the reference count by 1.
+// When the reference count goes to zero, the memory is freed.
+// Release may be called simultaneously from multiple goroutines.
+func (tr *TableReader) Release() {
+ debug.Assert(atomic.LoadInt64(&tr.refCount) > 0, "too many releases")
+
+ if atomic.AddInt64(&tr.refCount, -1) == 0 {
+ tr.tbl.Release()
+ for _, chk := range tr.chunks {
+ chk.Release()
+ }
+ if tr.rec != nil {
+ tr.rec.Release()
+ }
+ tr.tbl = nil
+ tr.chunks = nil
+ tr.slots = nil
+ tr.offsets = nil
+ }
+}
+func (tr *TableReader) Err() error { return nil }
+
+func imin64(a, b int64) int64 {
+ if a < b {
+ return a
+ }
+ return b
+}
+
+var (
+ _ arrow.Table = (*simpleTable)(nil)
+ _ RecordReader = (*TableReader)(nil)
+)
diff --git a/vendor/github.com/apache/arrow/go/v14/arrow/array/timestamp.go b/vendor/github.com/apache/arrow/go/v14/arrow/array/timestamp.go
new file mode 100644
index 000000000..2928b1fc7
--- /dev/null
+++ b/vendor/github.com/apache/arrow/go/v14/arrow/array/timestamp.go
@@ -0,0 +1,381 @@
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements. See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership. The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License. You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package array
+
+import (
+ "bytes"
+ "fmt"
+ "reflect"
+ "strings"
+ "sync/atomic"
+ "time"
+
+ "github.com/apache/arrow/go/v14/arrow"
+ "github.com/apache/arrow/go/v14/arrow/bitutil"
+ "github.com/apache/arrow/go/v14/arrow/internal/debug"
+ "github.com/apache/arrow/go/v14/arrow/memory"
+ "github.com/apache/arrow/go/v14/internal/json"
+)
+
+// Timestamp represents an immutable sequence of arrow.Timestamp values.
+type Timestamp struct {
+ array
+ values []arrow.Timestamp
+}
+
+// NewTimestampData creates a new Timestamp from Data.
+func NewTimestampData(data arrow.ArrayData) *Timestamp {
+ a := &Timestamp{}
+ a.refCount = 1
+ a.setData(data.(*Data))
+ return a
+}
+
+// Reset resets the array for re-use.
+func (a *Timestamp) Reset(data *Data) {
+ a.setData(data)
+}
+
+// Value returns the value at the specified index.
+func (a *Timestamp) Value(i int) arrow.Timestamp { return a.values[i] }
+
+// TimestampValues returns the values.
+func (a *Timestamp) TimestampValues() []arrow.Timestamp { return a.values }
+
+// String returns a string representation of the array.
+func (a *Timestamp) String() string {
+ o := new(strings.Builder)
+ o.WriteString("[")
+ for i, v := range a.values {
+ if i > 0 {
+ fmt.Fprintf(o, " ")
+ }
+ switch {
+ case a.IsNull(i):
+ o.WriteString(NullValueStr)
+ default:
+ fmt.Fprintf(o, "%v", v)
+ }
+ }
+ o.WriteString("]")
+ return o.String()
+}
+
+func (a *Timestamp) setData(data *Data) {
+ a.array.setData(data)
+ vals := data.buffers[1]
+ if vals != nil {
+ a.values = arrow.TimestampTraits.CastFromBytes(vals.Bytes())
+ beg := a.array.data.offset
+ end := beg + a.array.data.length
+ a.values = a.values[beg:end]
+ }
+}
+
+func (a *Timestamp) ValueStr(i int) string {
+ if a.IsNull(i) {
+ return NullValueStr
+ }
+
+ dt := a.DataType().(*arrow.TimestampType)
+ z, _ := dt.GetZone()
+ return a.values[i].ToTime(dt.Unit).In(z).Format("2006-01-02 15:04:05.999999999Z0700")
+}
+
+func (a *Timestamp) GetOneForMarshal(i int) interface{} {
+ if a.IsNull(i) {
+ return nil
+ }
+ return a.values[i].ToTime(a.DataType().(*arrow.TimestampType).Unit).Format("2006-01-02 15:04:05.999999999")
+}
+
+func (a *Timestamp) MarshalJSON() ([]byte, error) {
+ vals := make([]interface{}, a.Len())
+ for i := range a.values {
+ vals[i] = a.GetOneForMarshal(i)
+ }
+
+ return json.Marshal(vals)
+}
+
+func arrayEqualTimestamp(left, right *Timestamp) bool {
+ for i := 0; i < left.Len(); i++ {
+ if left.IsNull(i) {
+ continue
+ }
+ if left.Value(i) != right.Value(i) {
+ return false
+ }
+ }
+ return true
+}
+
+type TimestampBuilder struct {
+ builder
+
+ dtype *arrow.TimestampType
+ data *memory.Buffer
+ rawData []arrow.Timestamp
+}
+
+func NewTimestampBuilder(mem memory.Allocator, dtype *arrow.TimestampType) *TimestampBuilder {
+ return &TimestampBuilder{builder: builder{refCount: 1, mem: mem}, dtype: dtype}
+}
+
+func (b *TimestampBuilder) Type() arrow.DataType { return b.dtype }
+
+// Release decreases the reference count by 1.
+// When the reference count goes to zero, the memory is freed.
+func (b *TimestampBuilder) Release() {
+ debug.Assert(atomic.LoadInt64(&b.refCount) > 0, "too many releases")
+
+ if atomic.AddInt64(&b.refCount, -1) == 0 {
+ if b.nullBitmap != nil {
+ b.nullBitmap.Release()
+ b.nullBitmap = nil
+ }
+ if b.data != nil {
+ b.data.Release()
+ b.data = nil
+ b.rawData = nil
+ }
+ }
+}
+
+func (b *TimestampBuilder) AppendTime(t time.Time) {
+ ts, err := arrow.TimestampFromTime(t, b.dtype.Unit)
+ if err != nil {
+ panic(err)
+ }
+ b.Append(ts)
+}
+
+func (b *TimestampBuilder) Append(v arrow.Timestamp) {
+ b.Reserve(1)
+ b.UnsafeAppend(v)
+}
+
+func (b *TimestampBuilder) AppendNull() {
+ b.Reserve(1)
+ b.UnsafeAppendBoolToBitmap(false)
+}
+
+func (b *TimestampBuilder) AppendNulls(n int) {
+ for i := 0; i < n; i++ {
+ b.AppendNull()
+ }
+}
+
+func (b *TimestampBuilder) AppendEmptyValue() {
+ b.Append(0)
+}
+
+func (b *TimestampBuilder) AppendEmptyValues(n int) {
+ for i := 0; i < n; i++ {
+ b.AppendEmptyValue()
+ }
+}
+
+func (b *TimestampBuilder) UnsafeAppend(v arrow.Timestamp) {
+ bitutil.SetBit(b.nullBitmap.Bytes(), b.length)
+ b.rawData[b.length] = v
+ b.length++
+}
+
+func (b *TimestampBuilder) UnsafeAppendBoolToBitmap(isValid bool) {
+ if isValid {
+ bitutil.SetBit(b.nullBitmap.Bytes(), b.length)
+ } else {
+ b.nulls++
+ }
+ b.length++
+}
+
+// AppendValues will append the values in the v slice. The valid slice determines which values
+// in v are valid (not null). The valid slice must either be empty or be equal in length to v. If empty,
+// all values in v are appended and considered valid.
+func (b *TimestampBuilder) AppendValues(v []arrow.Timestamp, valid []bool) {
+ if len(v) != len(valid) && len(valid) != 0 {
+ panic("len(v) != len(valid) && len(valid) != 0")
+ }
+
+ if len(v) == 0 {
+ return
+ }
+
+ b.Reserve(len(v))
+ arrow.TimestampTraits.Copy(b.rawData[b.length:], v)
+ b.builder.unsafeAppendBoolsToBitmap(valid, len(v))
+}
+
+func (b *TimestampBuilder) init(capacity int) {
+ b.builder.init(capacity)
+
+ b.data = memory.NewResizableBuffer(b.mem)
+ bytesN := arrow.TimestampTraits.BytesRequired(capacity)
+ b.data.Resize(bytesN)
+ b.rawData = arrow.TimestampTraits.CastFromBytes(b.data.Bytes())
+}
+
+// Reserve ensures there is enough space for appending n elements
+// by checking the capacity and calling Resize if necessary.
+func (b *TimestampBuilder) Reserve(n int) {
+ b.builder.reserve(n, b.Resize)
+}
+
+// Resize adjusts the space allocated by b to n elements. If n is greater than b.Cap(),
+// additional memory will be allocated. If n is smaller, the allocated memory may reduced.
+func (b *TimestampBuilder) Resize(n int) {
+ nBuilder := n
+ if n < minBuilderCapacity {
+ n = minBuilderCapacity
+ }
+
+ if b.capacity == 0 {
+ b.init(n)
+ } else {
+ b.builder.resize(nBuilder, b.init)
+ b.data.Resize(arrow.TimestampTraits.BytesRequired(n))
+ b.rawData = arrow.TimestampTraits.CastFromBytes(b.data.Bytes())
+ }
+}
+
+// NewArray creates a Timestamp array from the memory buffers used by the builder and resets the TimestampBuilder
+// so it can be used to build a new array.
+func (b *TimestampBuilder) NewArray() arrow.Array {
+ return b.NewTimestampArray()
+}
+
+// NewTimestampArray creates a Timestamp array from the memory buffers used by the builder and resets the TimestampBuilder
+// so it can be used to build a new array.
+func (b *TimestampBuilder) NewTimestampArray() (a *Timestamp) {
+ data := b.newData()
+ a = NewTimestampData(data)
+ data.Release()
+ return
+}
+
+func (b *TimestampBuilder) newData() (data *Data) {
+ bytesRequired := arrow.TimestampTraits.BytesRequired(b.length)
+ if bytesRequired > 0 && bytesRequired < b.data.Len() {
+ // trim buffers
+ b.data.Resize(bytesRequired)
+ }
+ data = NewData(b.dtype, b.length, []*memory.Buffer{b.nullBitmap, b.data}, nil, b.nulls, 0)
+ b.reset()
+
+ if b.data != nil {
+ b.data.Release()
+ b.data = nil
+ b.rawData = nil
+ }
+
+ return
+}
+
+func (b *TimestampBuilder) AppendValueFromString(s string) error {
+ if s == NullValueStr {
+ b.AppendNull()
+ return nil
+ }
+
+ loc, err := b.dtype.GetZone()
+ if err != nil {
+ return err
+ }
+
+ v, _, err := arrow.TimestampFromStringInLocation(s, b.dtype.Unit, loc)
+ if err != nil {
+ b.AppendNull()
+ return err
+ }
+ b.Append(v)
+ return nil
+}
+
+func (b *TimestampBuilder) UnmarshalOne(dec *json.Decoder) error {
+ t, err := dec.Token()
+ if err != nil {
+ return err
+ }
+
+ switch v := t.(type) {
+ case nil:
+ b.AppendNull()
+ case string:
+ loc, _ := b.dtype.GetZone()
+ tm, _, err := arrow.TimestampFromStringInLocation(v, b.dtype.Unit, loc)
+ if err != nil {
+ return &json.UnmarshalTypeError{
+ Value: v,
+ Type: reflect.TypeOf(arrow.Timestamp(0)),
+ Offset: dec.InputOffset(),
+ }
+ }
+
+ b.Append(tm)
+ case json.Number:
+ n, err := v.Int64()
+ if err != nil {
+ return &json.UnmarshalTypeError{
+ Value: v.String(),
+ Type: reflect.TypeOf(arrow.Timestamp(0)),
+ Offset: dec.InputOffset(),
+ }
+ }
+ b.Append(arrow.Timestamp(n))
+ case float64:
+ b.Append(arrow.Timestamp(v))
+
+ default:
+ return &json.UnmarshalTypeError{
+ Value: fmt.Sprint(t),
+ Type: reflect.TypeOf(arrow.Timestamp(0)),
+ Offset: dec.InputOffset(),
+ }
+ }
+
+ return nil
+}
+
+func (b *TimestampBuilder) Unmarshal(dec *json.Decoder) error {
+ for dec.More() {
+ if err := b.UnmarshalOne(dec); err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+func (b *TimestampBuilder) UnmarshalJSON(data []byte) error {
+ dec := json.NewDecoder(bytes.NewReader(data))
+ t, err := dec.Token()
+ if err != nil {
+ return err
+ }
+
+ if delim, ok := t.(json.Delim); !ok || delim != '[' {
+ return fmt.Errorf("binary builder must unpack from json array, found %s", delim)
+ }
+
+ return b.Unmarshal(dec)
+}
+
+var (
+ _ arrow.Array = (*Timestamp)(nil)
+ _ Builder = (*TimestampBuilder)(nil)
+)
diff --git a/vendor/github.com/apache/arrow/go/v14/arrow/array/union.go b/vendor/github.com/apache/arrow/go/v14/arrow/array/union.go
new file mode 100644
index 000000000..869355ac7
--- /dev/null
+++ b/vendor/github.com/apache/arrow/go/v14/arrow/array/union.go
@@ -0,0 +1,1370 @@
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements. See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership. The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License. You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package array
+
+import (
+ "bytes"
+ "errors"
+ "fmt"
+ "math"
+ "reflect"
+ "strings"
+ "sync/atomic"
+
+ "github.com/apache/arrow/go/v14/arrow"
+ "github.com/apache/arrow/go/v14/arrow/bitutil"
+ "github.com/apache/arrow/go/v14/arrow/internal/debug"
+ "github.com/apache/arrow/go/v14/arrow/memory"
+ "github.com/apache/arrow/go/v14/internal/bitutils"
+ "github.com/apache/arrow/go/v14/internal/json"
+)
+
+// Union is a convenience interface to encompass both Sparse and Dense
+// union array types.
+type Union interface {
+ arrow.Array
+ // NumFields returns the number of child fields in this union.
+ // Equivalent to len(UnionType().Fields())
+ NumFields() int
+ // Validate returns an error if there are any issues with the lengths
+ // or types of the children arrays mismatching with the Type of the
+ // Union Array. nil is returned if there are no problems.
+ Validate() error
+ // ValidateFull runs the same checks that Validate() does, but additionally
+ // checks that all childIDs are valid (>= 0 || ==InvalidID) and for
+ // dense unions validates that all offsets are within the bounds of their
+ // respective child.
+ ValidateFull() error
+ // TypeCodes returns the type id buffer for the union Array, equivalent to
+ // Data().Buffers()[1]. Note: This will not account for any slice offset.
+ TypeCodes() *memory.Buffer
+ // RawTypeCodes returns a slice of UnionTypeCodes properly accounting for
+ // any slice offset.
+ RawTypeCodes() []arrow.UnionTypeCode
+ // TypeCode returns the logical type code of the value at the requested index
+ TypeCode(i int) arrow.UnionTypeCode
+ // ChildID returns the index of the physical child containing the value
+ // at the requested index. Equivalent to:
+ //
+ // arr.UnionType().ChildIDs()[arr.RawTypeCodes()[i+arr.Data().Offset()]]
+ ChildID(i int) int
+ // UnionType is a convenience function to retrieve the properly typed UnionType
+ // instead of having to call DataType() and manually assert the type.
+ UnionType() arrow.UnionType
+ // Mode returns the union mode of the underlying Array, either arrow.SparseMode
+ // or arrow.DenseMode.
+ Mode() arrow.UnionMode
+ // Field returns the requested child array for this union. Returns nil if a
+ // non-existent position is passed in.
+ //
+ // The appropriate child for an index can be retrieved with Field(ChildID(index))
+ Field(pos int) arrow.Array
+}
+
+const kMaxElems = math.MaxInt32
+
+type union struct {
+ array
+
+ unionType arrow.UnionType
+ typecodes []arrow.UnionTypeCode
+
+ children []arrow.Array
+}
+
+func (a *union) Retain() {
+ a.array.Retain()
+ for _, c := range a.children {
+ c.Retain()
+ }
+}
+
+func (a *union) Release() {
+ a.array.Release()
+ for _, c := range a.children {
+ c.Release()
+ }
+}
+
+func (a *union) NumFields() int { return len(a.unionType.Fields()) }
+
+func (a *union) Mode() arrow.UnionMode { return a.unionType.Mode() }
+
+func (a *union) UnionType() arrow.UnionType { return a.unionType }
+
+func (a *union) TypeCodes() *memory.Buffer {
+ return a.data.buffers[1]
+}
+
+func (a *union) RawTypeCodes() []arrow.UnionTypeCode {
+ if a.data.length > 0 {
+ return a.typecodes[a.data.offset:]
+ }
+ return []arrow.UnionTypeCode{}
+}
+
+func (a *union) TypeCode(i int) arrow.UnionTypeCode {
+ return a.typecodes[i+a.data.offset]
+}
+
+func (a *union) ChildID(i int) int {
+ return a.unionType.ChildIDs()[a.typecodes[i+a.data.offset]]
+}
+
+func (a *union) setData(data *Data) {
+ a.unionType = data.dtype.(arrow.UnionType)
+ debug.Assert(len(data.buffers) >= 2, "arrow/array: invalid number of union array buffers")
+
+ if data.length > 0 {
+ a.typecodes = arrow.Int8Traits.CastFromBytes(data.buffers[1].Bytes())
+ } else {
+ a.typecodes = []int8{}
+ }
+ a.children = make([]arrow.Array, len(data.childData))
+ for i, child := range data.childData {
+ if a.unionType.Mode() == arrow.SparseMode && (data.offset != 0 || child.Len() != data.length) {
+ child = NewSliceData(child, int64(data.offset), int64(data.offset+data.length))
+ defer child.Release()
+ }
+ a.children[i] = MakeFromData(child)
+ }
+ a.array.setData(data)
+}
+
+func (a *union) Field(pos int) (result arrow.Array) {
+ if pos < 0 || pos >= len(a.children) {
+ return nil
+ }
+
+ return a.children[pos]
+}
+
+func (a *union) Validate() error {
+ fields := a.unionType.Fields()
+ for i, f := range fields {
+ fieldData := a.data.childData[i]
+ if a.unionType.Mode() == arrow.SparseMode && fieldData.Len() < a.data.length+a.data.offset {
+ return fmt.Errorf("arrow/array: sparse union child array #%d has length smaller than expected for union array (%d < %d)",
+ i, fieldData.Len(), a.data.length+a.data.offset)
+ }
+
+ if !arrow.TypeEqual(f.Type, fieldData.DataType()) {
+ return fmt.Errorf("arrow/array: union child array #%d does not match type field %s vs %s",
+ i, fieldData.DataType(), f.Type)
+ }
+ }
+ return nil
+}
+
+func (a *union) ValidateFull() error {
+ if err := a.Validate(); err != nil {
+ return err
+ }
+
+ childIDs := a.unionType.ChildIDs()
+ codesMap := a.unionType.TypeCodes()
+ codes := a.RawTypeCodes()
+
+ for i := 0; i < a.data.length; i++ {
+ code := codes[i]
+ if code < 0 || childIDs[code] == arrow.InvalidUnionChildID {
+ return fmt.Errorf("arrow/array: union value at position %d has invalid type id %d", i, code)
+ }
+ }
+
+ if a.unionType.Mode() == arrow.DenseMode {
+ // validate offsets
+
+ // map logical typeid to child length
+ var childLengths [256]int64
+ for i := range a.unionType.Fields() {
+ childLengths[codesMap[i]] = int64(a.data.childData[i].Len())
+ }
+
+ // check offsets are in bounds
+ var lastOffsets [256]int64
+ offsets := arrow.Int32Traits.CastFromBytes(a.data.buffers[2].Bytes())[a.data.offset:]
+ for i := int64(0); i < int64(a.data.length); i++ {
+ code := codes[i]
+ offset := offsets[i]
+ switch {
+ case offset < 0:
+ return fmt.Errorf("arrow/array: union value at position %d has negative offset %d", i, offset)
+ case offset >= int32(childLengths[code]):
+ return fmt.Errorf("arrow/array: union value at position %d has offset larger than child length (%d >= %d)",
+ i, offset, childLengths[code])
+ case offset < int32(lastOffsets[code]):
+ return fmt.Errorf("arrow/array: union value at position %d has non-monotonic offset %d", i, offset)
+ }
+ lastOffsets[code] = int64(offset)
+ }
+ }
+
+ return nil
+}
+
+// SparseUnion represents an array where each logical value is taken from
+// a single child. A buffer of 8-bit type ids indicates which child a given
+// logical value is to be taken from. This is represented as the ChildID,
+// which is the index into the list of children.
+//
+// In a sparse union, each child array will have the same length as the
+// union array itself, regardless of how many values in the union actually
+// refer to it.
+//
+// Unlike most other arrays, unions do not have a top-level validity bitmap.
+type SparseUnion struct {
+ union
+}
+
+// NewSparseUnion constructs a union array using the given type, length, list of
+// children and buffer of typeIDs with the given offset.
+func NewSparseUnion(dt *arrow.SparseUnionType, length int, children []arrow.Array, typeIDs *memory.Buffer, offset int) *SparseUnion {
+ childData := make([]arrow.ArrayData, len(children))
+ for i, c := range children {
+ childData[i] = c.Data()
+ }
+ data := NewData(dt, length, []*memory.Buffer{nil, typeIDs}, childData, 0, offset)
+ defer data.Release()
+ return NewSparseUnionData(data)
+}
+
+// NewSparseUnionData constructs a SparseUnion array from the given ArrayData object.
+func NewSparseUnionData(data arrow.ArrayData) *SparseUnion {
+ a := &SparseUnion{}
+ a.refCount = 1
+ a.setData(data.(*Data))
+ return a
+}
+
+// NewSparseUnionFromArrays constructs a new SparseUnion array with the provided
+// values.
+//
+// typeIDs *must* be an INT8 array with no nulls
+// len(codes) *must* be either 0 or equal to len(children). If len(codes) is 0,
+// the type codes used will be sequentially numeric starting at 0.
+func NewSparseUnionFromArrays(typeIDs arrow.Array, children []arrow.Array, codes ...arrow.UnionTypeCode) (*SparseUnion, error) {
+ return NewSparseUnionFromArraysWithFieldCodes(typeIDs, children, []string{}, codes)
+}
+
+// NewSparseUnionFromArrayWithFields constructs a new SparseUnion array like
+// NewSparseUnionFromArrays, but allows specifying the field names. Type codes
+// will be auto-generated sequentially starting at 0.
+//
+// typeIDs *must* be an INT8 array with no nulls.
+// len(fields) *must* either be 0 or equal to len(children). If len(fields) is 0,
+// then the fields will be named sequentially starting at "0".
+func NewSparseUnionFromArraysWithFields(typeIDs arrow.Array, children []arrow.Array, fields []string) (*SparseUnion, error) {
+ return NewSparseUnionFromArraysWithFieldCodes(typeIDs, children, fields, []arrow.UnionTypeCode{})
+}
+
+// NewSparseUnionFromArraysWithFieldCodes combines the other constructors
+// for constructing a new SparseUnion array with the provided field names
+// and type codes, along with children and type ids.
+//
+// All the requirements mentioned in NewSparseUnionFromArrays and
+// NewSparseUnionFromArraysWithFields apply.
+func NewSparseUnionFromArraysWithFieldCodes(typeIDs arrow.Array, children []arrow.Array, fields []string, codes []arrow.UnionTypeCode) (*SparseUnion, error) {
+ switch {
+ case typeIDs.DataType().ID() != arrow.INT8:
+ return nil, errors.New("arrow/array: union array type ids must be signed int8")
+ case typeIDs.NullN() != 0:
+ return nil, errors.New("arrow/array: union type ids may not have nulls")
+ case len(fields) > 0 && len(fields) != len(children):
+ return nil, errors.New("arrow/array: field names must have the same length as children")
+ case len(codes) > 0 && len(codes) != len(children):
+ return nil, errors.New("arrow/array: type codes must have same length as children")
+ }
+
+ buffers := []*memory.Buffer{nil, typeIDs.Data().Buffers()[1]}
+ ty := arrow.SparseUnionFromArrays(children, fields, codes)
+
+ childData := make([]arrow.ArrayData, len(children))
+ for i, c := range children {
+ childData[i] = c.Data()
+ if c.Len() != typeIDs.Len() {
+ return nil, errors.New("arrow/array: sparse union array must have len(child) == len(typeids) for all children")
+ }
+ }
+
+ data := NewData(ty, typeIDs.Len(), buffers, childData, 0, typeIDs.Data().Offset())
+ defer data.Release()
+ return NewSparseUnionData(data), nil
+}
+
+func (a *SparseUnion) setData(data *Data) {
+ a.union.setData(data)
+ debug.Assert(a.data.dtype.ID() == arrow.SPARSE_UNION, "arrow/array: invalid data type for SparseUnion")
+ debug.Assert(len(a.data.buffers) == 2, "arrow/array: sparse unions should have exactly 2 buffers")
+ debug.Assert(a.data.buffers[0] == nil, "arrow/array: validity bitmap for sparse unions should be nil")
+}
+
+func (a *SparseUnion) GetOneForMarshal(i int) interface{} {
+ typeID := a.RawTypeCodes()[i]
+
+ childID := a.ChildID(i)
+ data := a.Field(childID)
+
+ if data.IsNull(i) {
+ return nil
+ }
+
+ return []interface{}{typeID, data.GetOneForMarshal(i)}
+}
+
+func (a *SparseUnion) MarshalJSON() ([]byte, error) {
+ var buf bytes.Buffer
+ enc := json.NewEncoder(&buf)
+
+ buf.WriteByte('[')
+ for i := 0; i < a.Len(); i++ {
+ if i != 0 {
+ buf.WriteByte(',')
+ }
+ if err := enc.Encode(a.GetOneForMarshal(i)); err != nil {
+ return nil, err
+ }
+ }
+ buf.WriteByte(']')
+ return buf.Bytes(), nil
+}
+
+func (a *SparseUnion) ValueStr(i int) string {
+ if a.IsNull(i) {
+ return NullValueStr
+ }
+
+ val := a.GetOneForMarshal(i)
+ if val == nil {
+ // child is nil
+ return NullValueStr
+ }
+
+ data, err := json.Marshal(val)
+ if err != nil {
+ panic(err)
+ }
+ return string(data)
+}
+
+func (a *SparseUnion) String() string {
+ var b strings.Builder
+ b.WriteByte('[')
+
+ fieldList := a.unionType.Fields()
+ for i := 0; i < a.Len(); i++ {
+ if i > 0 {
+ b.WriteString(" ")
+ }
+
+ field := fieldList[a.ChildID(i)]
+ f := a.Field(a.ChildID(i))
+ fmt.Fprintf(&b, "{%s=%v}", field.Name, f.GetOneForMarshal(i))
+ }
+ b.WriteByte(']')
+ return b.String()
+}
+
+// GetFlattenedField returns a child array, adjusting its validity bitmap
+// where the union array type codes don't match.
+//
+// ie: the returned array will have a null in every index that it is
+// not referenced by union.
+func (a *SparseUnion) GetFlattenedField(mem memory.Allocator, index int) (arrow.Array, error) {
+ if index < 0 || index >= a.NumFields() {
+ return nil, fmt.Errorf("arrow/array: index out of range: %d", index)
+ }
+
+ childData := a.data.childData[index]
+ if a.data.offset != 0 || a.data.length != childData.Len() {
+ childData = NewSliceData(childData, int64(a.data.offset), int64(a.data.offset+a.data.length))
+ // NewSliceData doesn't break the slice reference for buffers
+ // since we're going to replace the null bitmap buffer we need to break the
+ // slice reference so that we don't affect a.children's references
+ newBufs := make([]*memory.Buffer, len(childData.Buffers()))
+ copy(newBufs, childData.(*Data).buffers)
+ childData.(*Data).buffers = newBufs
+ } else {
+ childData = childData.(*Data).Copy()
+ }
+ defer childData.Release()
+
+ // synthesize a null bitmap based on the union discriminant
+ // make sure the bitmap has extra bits corresponding to the child's offset
+ flattenedNullBitmap := memory.NewResizableBuffer(mem)
+ flattenedNullBitmap.Resize(childData.Len() + childData.Offset())
+
+ var (
+ childNullBitmap = childData.Buffers()[0]
+ childOffset = childData.Offset()
+ typeCode = a.unionType.TypeCodes()[index]
+ codes = a.RawTypeCodes()
+ offset int64 = 0
+ )
+ bitutils.GenerateBitsUnrolled(flattenedNullBitmap.Bytes(), int64(childOffset), int64(a.data.length),
+ func() bool {
+ b := codes[offset] == typeCode
+ offset++
+ return b
+ })
+
+ if childNullBitmap != nil {
+ defer childNullBitmap.Release()
+ bitutil.BitmapAnd(flattenedNullBitmap.Bytes(), childNullBitmap.Bytes(),
+ int64(childOffset), int64(childOffset), flattenedNullBitmap.Bytes(),
+ int64(childOffset), int64(childData.Len()))
+ }
+ childData.(*Data).buffers[0] = flattenedNullBitmap
+ childData.(*Data).nulls = childData.Len() - bitutil.CountSetBits(flattenedNullBitmap.Bytes(), childOffset, childData.Len())
+ return MakeFromData(childData), nil
+}
+
+func arraySparseUnionEqual(l, r *SparseUnion) bool {
+ childIDs := l.unionType.ChildIDs()
+ leftCodes, rightCodes := l.RawTypeCodes(), r.RawTypeCodes()
+
+ for i := 0; i < l.data.length; i++ {
+ typeID := leftCodes[i]
+ if typeID != rightCodes[i] {
+ return false
+ }
+
+ childNum := childIDs[typeID]
+ eq := SliceEqual(l.children[childNum], int64(i), int64(i+1),
+ r.children[childNum], int64(i), int64(i+1))
+ if !eq {
+ return false
+ }
+ }
+ return true
+}
+
+func arraySparseUnionApproxEqual(l, r *SparseUnion, opt equalOption) bool {
+ childIDs := l.unionType.ChildIDs()
+ leftCodes, rightCodes := l.RawTypeCodes(), r.RawTypeCodes()
+
+ for i := 0; i < l.data.length; i++ {
+ typeID := leftCodes[i]
+ if typeID != rightCodes[i] {
+ return false
+ }
+
+ childNum := childIDs[typeID]
+ eq := sliceApproxEqual(l.children[childNum], int64(i+l.data.offset), int64(i+l.data.offset+1),
+ r.children[childNum], int64(i+r.data.offset), int64(i+r.data.offset+1), opt)
+ if !eq {
+ return false
+ }
+ }
+ return true
+}
+
+// DenseUnion represents an array where each logical value is taken from
+// a single child, at a specific offset. A buffer of 8-bit type ids
+// indicates which child a given logical value is to be taken from and
+// a buffer of 32-bit offsets indicating which physical position in the
+// given child array has the logical value for that index.
+//
+// Unlike a sparse union, a dense union allows encoding only the child values
+// which are actually referred to by the union array. This is counterbalanced
+// by the additional footprint of the offsets buffer, and the additional
+// indirection cost when looking up values.
+//
+// Unlike most other arrays, unions do not have a top-level validity bitmap.
+type DenseUnion struct {
+ union
+ offsets []int32
+}
+
+// NewDenseUnion constructs a union array using the given type, length, list of
+// children and buffers of typeIDs and offsets, with the given array offset.
+func NewDenseUnion(dt *arrow.DenseUnionType, length int, children []arrow.Array, typeIDs, valueOffsets *memory.Buffer, offset int) *DenseUnion {
+ childData := make([]arrow.ArrayData, len(children))
+ for i, c := range children {
+ childData[i] = c.Data()
+ }
+
+ data := NewData(dt, length, []*memory.Buffer{nil, typeIDs, valueOffsets}, childData, 0, offset)
+ defer data.Release()
+ return NewDenseUnionData(data)
+}
+
+// NewDenseUnionData constructs a DenseUnion array from the given ArrayData object.
+func NewDenseUnionData(data arrow.ArrayData) *DenseUnion {
+ a := &DenseUnion{}
+ a.refCount = 1
+ a.setData(data.(*Data))
+ return a
+}
+
+// NewDenseUnionFromArrays constructs a new DenseUnion array with the provided
+// values.
+//
+// typeIDs *must* be an INT8 array with no nulls
+// offsets *must* be an INT32 array with no nulls
+// len(codes) *must* be either 0 or equal to len(children). If len(codes) is 0,
+// the type codes used will be sequentially numeric starting at 0.
+func NewDenseUnionFromArrays(typeIDs, offsets arrow.Array, children []arrow.Array, codes ...arrow.UnionTypeCode) (*DenseUnion, error) {
+ return NewDenseUnionFromArraysWithFieldCodes(typeIDs, offsets, children, []string{}, codes)
+}
+
+// NewDenseUnionFromArrayWithFields constructs a new DenseUnion array like
+// NewDenseUnionFromArrays, but allows specifying the field names. Type codes
+// will be auto-generated sequentially starting at 0.
+//
+// typeIDs *must* be an INT8 array with no nulls.
+// offsets *must* be an INT32 array with no nulls.
+// len(fields) *must* either be 0 or equal to len(children). If len(fields) is 0,
+// then the fields will be named sequentially starting at "0".
+func NewDenseUnionFromArraysWithFields(typeIDs, offsets arrow.Array, children []arrow.Array, fields []string) (*DenseUnion, error) {
+ return NewDenseUnionFromArraysWithFieldCodes(typeIDs, offsets, children, fields, []arrow.UnionTypeCode{})
+}
+
+// NewDenseUnionFromArraysWithFieldCodes combines the other constructors
+// for constructing a new DenseUnion array with the provided field names
+// and type codes, along with children and type ids.
+//
+// All the requirements mentioned in NewDenseUnionFromArrays and
+// NewDenseUnionFromArraysWithFields apply.
+func NewDenseUnionFromArraysWithFieldCodes(typeIDs, offsets arrow.Array, children []arrow.Array, fields []string, codes []arrow.UnionTypeCode) (*DenseUnion, error) {
+ switch {
+ case offsets.DataType().ID() != arrow.INT32:
+ return nil, errors.New("arrow/array: union offsets must be signed int32")
+ case typeIDs.DataType().ID() != arrow.INT8:
+ return nil, errors.New("arrow/array: union type_ids must be signed int8")
+ case typeIDs.NullN() != 0:
+ return nil, errors.New("arrow/array: union typeIDs may not have nulls")
+ case offsets.NullN() != 0:
+ return nil, errors.New("arrow/array: nulls are not allowed in offsets for NewDenseUnionFromArrays*")
+ case len(fields) > 0 && len(fields) != len(children):
+ return nil, errors.New("arrow/array: fields must be the same length as children")
+ case len(codes) > 0 && len(codes) != len(children):
+ return nil, errors.New("arrow/array: typecodes must have the same length as children")
+ }
+
+ ty := arrow.DenseUnionFromArrays(children, fields, codes)
+ buffers := []*memory.Buffer{nil, typeIDs.Data().Buffers()[1], offsets.Data().Buffers()[1]}
+
+ childData := make([]arrow.ArrayData, len(children))
+ for i, c := range children {
+ childData[i] = c.Data()
+ }
+
+ data := NewData(ty, typeIDs.Len(), buffers, childData, 0, typeIDs.Data().Offset())
+ defer data.Release()
+ return NewDenseUnionData(data), nil
+}
+
+func (a *DenseUnion) ValueOffsets() *memory.Buffer { return a.data.buffers[2] }
+
+func (a *DenseUnion) ValueOffset(i int) int32 { return a.offsets[i+a.data.offset] }
+
+func (a *DenseUnion) RawValueOffsets() []int32 { return a.offsets[a.data.offset:] }
+
+func (a *DenseUnion) setData(data *Data) {
+ a.union.setData(data)
+ debug.Assert(a.data.dtype.ID() == arrow.DENSE_UNION, "arrow/array: invalid data type for DenseUnion")
+ debug.Assert(len(a.data.buffers) == 3, "arrow/array: dense unions should have exactly 3 buffers")
+ debug.Assert(a.data.buffers[0] == nil, "arrow/array: validity bitmap for dense unions should be nil")
+
+ if data.length > 0 {
+ a.offsets = arrow.Int32Traits.CastFromBytes(a.data.buffers[2].Bytes())
+ } else {
+ a.offsets = []int32{}
+ }
+}
+
+func (a *DenseUnion) GetOneForMarshal(i int) interface{} {
+ typeID := a.RawTypeCodes()[i]
+
+ childID := a.ChildID(i)
+ data := a.Field(childID)
+
+ offset := int(a.RawValueOffsets()[i])
+ if data.IsNull(offset) {
+ return nil
+ }
+
+ return []interface{}{typeID, data.GetOneForMarshal(offset)}
+}
+
+func (a *DenseUnion) MarshalJSON() ([]byte, error) {
+ var buf bytes.Buffer
+ enc := json.NewEncoder(&buf)
+
+ buf.WriteByte('[')
+ for i := 0; i < a.Len(); i++ {
+ if i != 0 {
+ buf.WriteByte(',')
+ }
+ if err := enc.Encode(a.GetOneForMarshal(i)); err != nil {
+ return nil, err
+ }
+ }
+ buf.WriteByte(']')
+ return buf.Bytes(), nil
+}
+
+func (a *DenseUnion) ValueStr(i int) string {
+ if a.IsNull(i) {
+ return NullValueStr
+ }
+
+ val := a.GetOneForMarshal(i)
+ if val == nil {
+ // child in nil
+ return NullValueStr
+ }
+
+ data, err := json.Marshal(val)
+ if err != nil {
+ panic(err)
+ }
+ return string(data)
+}
+
+func (a *DenseUnion) String() string {
+ var b strings.Builder
+ b.WriteByte('[')
+
+ offsets := a.RawValueOffsets()
+
+ fieldList := a.unionType.Fields()
+ for i := 0; i < a.Len(); i++ {
+ if i > 0 {
+ b.WriteString(" ")
+ }
+
+ field := fieldList[a.ChildID(i)]
+ f := a.Field(a.ChildID(i))
+ fmt.Fprintf(&b, "{%s=%v}", field.Name, f.GetOneForMarshal(int(offsets[i])))
+ }
+ b.WriteByte(']')
+ return b.String()
+}
+
+func arrayDenseUnionEqual(l, r *DenseUnion) bool {
+ childIDs := l.unionType.ChildIDs()
+ leftCodes, rightCodes := l.RawTypeCodes(), r.RawTypeCodes()
+ leftOffsets, rightOffsets := l.RawValueOffsets(), r.RawValueOffsets()
+
+ for i := 0; i < l.data.length; i++ {
+ typeID := leftCodes[i]
+ if typeID != rightCodes[i] {
+ return false
+ }
+
+ childNum := childIDs[typeID]
+ eq := SliceEqual(l.children[childNum], int64(leftOffsets[i]), int64(leftOffsets[i]+1),
+ r.children[childNum], int64(rightOffsets[i]), int64(rightOffsets[i]+1))
+ if !eq {
+ return false
+ }
+ }
+ return true
+}
+
+func arrayDenseUnionApproxEqual(l, r *DenseUnion, opt equalOption) bool {
+ childIDs := l.unionType.ChildIDs()
+ leftCodes, rightCodes := l.RawTypeCodes(), r.RawTypeCodes()
+ leftOffsets, rightOffsets := l.RawValueOffsets(), r.RawValueOffsets()
+
+ for i := 0; i < l.data.length; i++ {
+ typeID := leftCodes[i]
+ if typeID != rightCodes[i] {
+ return false
+ }
+
+ childNum := childIDs[typeID]
+ eq := sliceApproxEqual(l.children[childNum], int64(leftOffsets[i]), int64(leftOffsets[i]+1),
+ r.children[childNum], int64(rightOffsets[i]), int64(rightOffsets[i]+1), opt)
+ if !eq {
+ return false
+ }
+ }
+ return true
+}
+
+// UnionBuilder is a convenience interface for building Union arrays of
+// either Dense or Sparse mode.
+type UnionBuilder interface {
+ Builder
+ // AppendChild allows constructing the union type on the fly by making new
+ // new array builder available to the union builder. The type code (index)
+ // of the new child is returned, which should be passed to the Append method
+ // when adding a new element to the union array.
+ AppendChild(newChild Builder, fieldName string) (newCode arrow.UnionTypeCode)
+ // Append adds an element to the UnionArray indicating which typecode the
+ // new element should use. This *must* be followed up by an append to the
+ // appropriate child builder.
+ Append(arrow.UnionTypeCode)
+ // Mode returns what kind of Union is being built, either arrow.SparseMode
+ // or arrow.DenseMode
+ Mode() arrow.UnionMode
+ // Child returns the builder for the requested child index.
+ // If an invalid index is requested (e.g. <0 or >len(children))
+ // then this will panic.
+ Child(idx int) Builder
+}
+
+type unionBuilder struct {
+ builder
+
+ childFields []arrow.Field
+ codes []arrow.UnionTypeCode
+ mode arrow.UnionMode
+
+ children []Builder
+ typeIDtoBuilder []Builder
+ typeIDtoChildID []int
+ // for all typeID < denseTypeID, typeIDtoBuilder[typeID] != nil
+ denseTypeID arrow.UnionTypeCode
+ typesBuilder *int8BufferBuilder
+}
+
+func newUnionBuilder(mem memory.Allocator, children []Builder, typ arrow.UnionType) unionBuilder {
+ if children == nil {
+ children = make([]Builder, 0)
+ }
+ b := unionBuilder{
+ builder: builder{refCount: 1, mem: mem},
+ mode: typ.Mode(),
+ codes: typ.TypeCodes(),
+ children: children,
+ typeIDtoChildID: make([]int, int(typ.MaxTypeCode())+1), // convert to int as int8(127) +1 panics
+ typeIDtoBuilder: make([]Builder, int(typ.MaxTypeCode())+1), // convert to int as int8(127) +1 panics
+ childFields: make([]arrow.Field, len(children)),
+ typesBuilder: newInt8BufferBuilder(mem),
+ }
+
+ b.typeIDtoChildID[0] = arrow.InvalidUnionChildID
+ for i := 1; i < len(b.typeIDtoChildID); i *= 2 {
+ copy(b.typeIDtoChildID[i:], b.typeIDtoChildID[:i])
+ }
+
+ debug.Assert(len(children) == len(typ.TypeCodes()), "mismatched typecodes and children")
+ debug.Assert(len(b.typeIDtoBuilder)-1 <= int(arrow.MaxUnionTypeCode), "too many typeids")
+
+ copy(b.childFields, typ.Fields())
+ for i, c := range children {
+ c.Retain()
+ typeID := typ.TypeCodes()[i]
+ b.typeIDtoChildID[typeID] = i
+ b.typeIDtoBuilder[typeID] = c
+ }
+
+ return b
+}
+
+func (b *unionBuilder) NumChildren() int {
+ return len(b.children)
+}
+
+func (b *unionBuilder) Child(idx int) Builder {
+ if idx < 0 || idx > len(b.children) {
+ panic("arrow/array: invalid child index for union builder")
+ }
+ return b.children[idx]
+}
+
+// Len returns the current number of elements in the builder.
+func (b *unionBuilder) Len() int { return b.typesBuilder.Len() }
+
+func (b *unionBuilder) Mode() arrow.UnionMode { return b.mode }
+
+func (b *unionBuilder) reserve(elements int, resize func(int)) {
+ // union has no null bitmap, ever so we can skip that handling
+ if b.length+elements > b.capacity {
+ b.capacity = bitutil.NextPowerOf2(b.length + elements)
+ resize(b.capacity)
+ }
+}
+
+func (b *unionBuilder) Release() {
+ debug.Assert(atomic.LoadInt64(&b.refCount) > 0, "too many releases")
+
+ if atomic.AddInt64(&b.refCount, -1) == 0 {
+ for _, c := range b.children {
+ c.Release()
+ }
+ b.typesBuilder.Release()
+ }
+}
+
+func (b *unionBuilder) Type() arrow.DataType {
+ fields := make([]arrow.Field, len(b.childFields))
+ for i, f := range b.childFields {
+ fields[i] = f
+ fields[i].Type = b.children[i].Type()
+ }
+
+ switch b.mode {
+ case arrow.SparseMode:
+ return arrow.SparseUnionOf(fields, b.codes)
+ case arrow.DenseMode:
+ return arrow.DenseUnionOf(fields, b.codes)
+ default:
+ panic("invalid union builder mode")
+ }
+}
+
+func (b *unionBuilder) AppendChild(newChild Builder, fieldName string) arrow.UnionTypeCode {
+ newChild.Retain()
+ b.children = append(b.children, newChild)
+ newType := b.nextTypeID()
+
+ b.typeIDtoChildID[newType] = len(b.children) - 1
+ b.typeIDtoBuilder[newType] = newChild
+ b.childFields = append(b.childFields, arrow.Field{Name: fieldName, Nullable: true})
+ b.codes = append(b.codes, newType)
+
+ return newType
+}
+
+func (b *unionBuilder) nextTypeID() arrow.UnionTypeCode {
+ // find typeID such that typeIDtoBuilder[typeID] == nil
+ // use that for the new child. Start searching at denseTypeID
+ // since typeIDtoBuilder is densely packed up at least to denseTypeID
+ for ; int(b.denseTypeID) < len(b.typeIDtoBuilder); b.denseTypeID++ {
+ if b.typeIDtoBuilder[b.denseTypeID] == nil {
+ id := b.denseTypeID
+ b.denseTypeID++
+ return id
+ }
+ }
+
+ debug.Assert(len(b.typeIDtoBuilder) < int(arrow.MaxUnionTypeCode), "too many children typeids")
+ // typeIDtoBuilder is already densely packed, so just append the new child
+ b.typeIDtoBuilder = append(b.typeIDtoBuilder, nil)
+ b.typeIDtoChildID = append(b.typeIDtoChildID, arrow.InvalidUnionChildID)
+ id := b.denseTypeID
+ b.denseTypeID++
+ return id
+
+}
+
+func (b *unionBuilder) newData() *Data {
+ length := b.typesBuilder.Len()
+ typesBuffer := b.typesBuilder.Finish()
+ defer typesBuffer.Release()
+ childData := make([]arrow.ArrayData, len(b.children))
+ for i, b := range b.children {
+ childData[i] = b.newData()
+ defer childData[i].Release()
+ }
+
+ return NewData(b.Type(), length, []*memory.Buffer{nil, typesBuffer}, childData, 0, 0)
+}
+
+// SparseUnionBuilder is used to build a Sparse Union array using the Append
+// methods. You can also add new types to the union on the fly by using
+// AppendChild.
+//
+// Keep in mind: All children of a SparseUnion should be the same length
+// as the union itself. If you add new children with AppendChild, ensure
+// that they have the correct number of preceding elements that have been
+// added to the builder beforehand.
+type SparseUnionBuilder struct {
+ unionBuilder
+}
+
+// NewEmptySparseUnionBuilder is a helper to construct a SparseUnionBuilder
+// without having to predefine the union types. It creates a builder with no
+// children and AppendChild will have to be called before appending any
+// elements to this builder.
+func NewEmptySparseUnionBuilder(mem memory.Allocator) *SparseUnionBuilder {
+ return &SparseUnionBuilder{
+ unionBuilder: newUnionBuilder(mem, nil, arrow.SparseUnionOf([]arrow.Field{}, []arrow.UnionTypeCode{})),
+ }
+}
+
+// NewSparseUnionBuilder constructs a new SparseUnionBuilder with the provided
+// children and type codes. Builders will be constructed for each child
+// using the fields in typ
+func NewSparseUnionBuilder(mem memory.Allocator, typ *arrow.SparseUnionType) *SparseUnionBuilder {
+ children := make([]Builder, len(typ.Fields()))
+ for i, f := range typ.Fields() {
+ children[i] = NewBuilder(mem, f.Type)
+ defer children[i].Release()
+ }
+ return NewSparseUnionBuilderWithBuilders(mem, typ, children)
+}
+
+// NewSparseUnionWithBuilders returns a new SparseUnionBuilder using the
+// provided type and builders.
+func NewSparseUnionBuilderWithBuilders(mem memory.Allocator, typ *arrow.SparseUnionType, children []Builder) *SparseUnionBuilder {
+ return &SparseUnionBuilder{
+ unionBuilder: newUnionBuilder(mem, children, typ),
+ }
+}
+
+func (b *SparseUnionBuilder) Reserve(n int) {
+ b.reserve(n, b.Resize)
+}
+
+func (b *SparseUnionBuilder) Resize(n int) {
+ b.typesBuilder.resize(n)
+}
+
+// AppendNull will append a null to the first child and an empty value
+// (implementation-defined) to the rest of the children.
+func (b *SparseUnionBuilder) AppendNull() {
+ firstChildCode := b.codes[0]
+ b.typesBuilder.AppendValue(firstChildCode)
+ b.typeIDtoBuilder[firstChildCode].AppendNull()
+ for _, c := range b.codes[1:] {
+ b.typeIDtoBuilder[c].AppendEmptyValue()
+ }
+}
+
+// AppendNulls is identical to calling AppendNull() n times, except
+// it will pre-allocate with reserve for all the nulls beforehand.
+func (b *SparseUnionBuilder) AppendNulls(n int) {
+ firstChildCode := b.codes[0]
+ b.Reserve(n)
+ for _, c := range b.codes {
+ b.typeIDtoBuilder[c].Reserve(n)
+ }
+ for i := 0; i < n; i++ {
+ b.typesBuilder.AppendValue(firstChildCode)
+ b.typeIDtoBuilder[firstChildCode].AppendNull()
+ for _, c := range b.codes[1:] {
+ b.typeIDtoBuilder[c].AppendEmptyValue()
+ }
+ }
+}
+
+// AppendEmptyValue appends an empty value (implementation defined)
+// to each child, and appends the type of the first typecode to the typeid
+// buffer.
+func (b *SparseUnionBuilder) AppendEmptyValue() {
+ b.typesBuilder.AppendValue(b.codes[0])
+ for _, c := range b.codes {
+ b.typeIDtoBuilder[c].AppendEmptyValue()
+ }
+}
+
+// AppendEmptyValues is identical to calling AppendEmptyValue() n times,
+// except it pre-allocates first so it is more efficient.
+func (b *SparseUnionBuilder) AppendEmptyValues(n int) {
+ b.Reserve(n)
+ firstChildCode := b.codes[0]
+ for _, c := range b.codes {
+ b.typeIDtoBuilder[c].Reserve(n)
+ }
+ for i := 0; i < n; i++ {
+ b.typesBuilder.AppendValue(firstChildCode)
+ for _, c := range b.codes {
+ b.typeIDtoBuilder[c].AppendEmptyValue()
+ }
+ }
+}
+
+// Append appends an element to the UnionArray and must be followed up
+// by an append to the appropriate child builder. The parameter should
+// be the type id of the child to which the next value will be appended.
+//
+// After appending to the corresponding child builder, all other child
+// builders should have a null or empty value appended to them (although
+// this is not enfoced and any value is theoretically allowed and will be
+// ignored).
+func (b *SparseUnionBuilder) Append(nextType arrow.UnionTypeCode) {
+ b.typesBuilder.AppendValue(nextType)
+}
+
+func (b *SparseUnionBuilder) NewArray() arrow.Array {
+ return b.NewSparseUnionArray()
+}
+
+func (b *SparseUnionBuilder) NewSparseUnionArray() (a *SparseUnion) {
+ data := b.newData()
+ a = NewSparseUnionData(data)
+ data.Release()
+ return
+}
+
+func (b *SparseUnionBuilder) UnmarshalJSON(data []byte) (err error) {
+ dec := json.NewDecoder(bytes.NewReader(data))
+ t, err := dec.Token()
+ if err != nil {
+ return err
+ }
+
+ if delim, ok := t.(json.Delim); !ok || delim != '[' {
+ return fmt.Errorf("sparse union builder must unpack from json array, found %s", t)
+ }
+ return b.Unmarshal(dec)
+}
+
+func (b *SparseUnionBuilder) Unmarshal(dec *json.Decoder) error {
+ for dec.More() {
+ if err := b.UnmarshalOne(dec); err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+func (b *SparseUnionBuilder) AppendValueFromString(s string) error {
+ if s == NullValueStr {
+ b.AppendNull()
+ return nil
+ }
+ dec := json.NewDecoder(strings.NewReader(s))
+ return b.UnmarshalOne(dec)
+}
+
+func (b *SparseUnionBuilder) UnmarshalOne(dec *json.Decoder) error {
+ t, err := dec.Token()
+ if err != nil {
+ return err
+ }
+
+ switch t {
+ case json.Delim('['):
+ // should be [type_id, Value]
+ typeID, err := dec.Token()
+ if err != nil {
+ return err
+ }
+
+ var typeCode int8
+
+ switch tid := typeID.(type) {
+ case json.Number:
+ id, err := tid.Int64()
+ if err != nil {
+ return err
+ }
+ typeCode = int8(id)
+ case float64:
+ if tid != float64(int64(tid)) {
+ return &json.UnmarshalTypeError{
+ Offset: dec.InputOffset(),
+ Type: reflect.TypeOf(int8(0)),
+ Struct: fmt.Sprint(b.Type()),
+ Value: "float",
+ }
+ }
+ typeCode = int8(tid)
+ }
+
+ childNum := b.typeIDtoChildID[typeCode]
+ if childNum == arrow.InvalidUnionChildID {
+ return &json.UnmarshalTypeError{
+ Offset: dec.InputOffset(),
+ Value: "invalid type code",
+ }
+ }
+
+ for i, c := range b.children {
+ if i != childNum {
+ c.AppendNull()
+ }
+ }
+
+ b.Append(typeCode)
+ if err := b.children[childNum].UnmarshalOne(dec); err != nil {
+ return err
+ }
+
+ endArr, err := dec.Token()
+ if err != nil {
+ return err
+ }
+
+ if endArr != json.Delim(']') {
+ return &json.UnmarshalTypeError{
+ Offset: dec.InputOffset(),
+ Value: "union value array should have exactly 2 elements",
+ }
+ }
+ case nil:
+ b.AppendNull()
+ default:
+ return &json.UnmarshalTypeError{
+ Offset: dec.InputOffset(),
+ Value: fmt.Sprint(t),
+ Struct: fmt.Sprint(b.Type()),
+ }
+ }
+ return nil
+}
+
+// DenseUnionBuilder is used to build a Dense Union array using the Append
+// methods. You can also add new types to the union on the fly by using
+// AppendChild.
+type DenseUnionBuilder struct {
+ unionBuilder
+
+ offsetsBuilder *int32BufferBuilder
+}
+
+// NewEmptyDenseUnionBuilder is a helper to construct a DenseUnionBuilder
+// without having to predefine the union types. It creates a builder with no
+// children and AppendChild will have to be called before appending any
+// elements to this builder.
+func NewEmptyDenseUnionBuilder(mem memory.Allocator) *DenseUnionBuilder {
+ return &DenseUnionBuilder{
+ unionBuilder: newUnionBuilder(mem, nil, arrow.DenseUnionOf([]arrow.Field{}, []arrow.UnionTypeCode{})),
+ offsetsBuilder: newInt32BufferBuilder(mem),
+ }
+}
+
+// NewDenseUnionBuilder constructs a new DenseUnionBuilder with the provided
+// children and type codes. Builders will be constructed for each child
+// using the fields in typ
+func NewDenseUnionBuilder(mem memory.Allocator, typ *arrow.DenseUnionType) *DenseUnionBuilder {
+ children := make([]Builder, 0, len(typ.Fields()))
+ defer func() {
+ for _, child := range children {
+ child.Release()
+ }
+ }()
+
+ for _, f := range typ.Fields() {
+ children = append(children, NewBuilder(mem, f.Type))
+ }
+ return NewDenseUnionBuilderWithBuilders(mem, typ, children)
+}
+
+// NewDenseUnionWithBuilders returns a new DenseUnionBuilder using the
+// provided type and builders.
+func NewDenseUnionBuilderWithBuilders(mem memory.Allocator, typ *arrow.DenseUnionType, children []Builder) *DenseUnionBuilder {
+ return &DenseUnionBuilder{
+ unionBuilder: newUnionBuilder(mem, children, typ),
+ offsetsBuilder: newInt32BufferBuilder(mem),
+ }
+}
+
+func (b *DenseUnionBuilder) Reserve(n int) {
+ b.reserve(n, b.Resize)
+}
+
+func (b *DenseUnionBuilder) Resize(n int) {
+ b.typesBuilder.resize(n)
+ b.offsetsBuilder.resize(n * arrow.Int32SizeBytes)
+}
+
+// AppendNull will only append a null value arbitrarily to the first child
+// and use that offset for this element of the array.
+func (b *DenseUnionBuilder) AppendNull() {
+ firstChildCode := b.codes[0]
+ childBuilder := b.typeIDtoBuilder[firstChildCode]
+ b.typesBuilder.AppendValue(firstChildCode)
+ b.offsetsBuilder.AppendValue(int32(childBuilder.Len()))
+ childBuilder.AppendNull()
+}
+
+// AppendNulls will only append a single null arbitrarily to the first child
+// and use the same offset multiple times to point to it. The result is that
+// for a DenseUnion this is more efficient than calling AppendNull multiple
+// times in a loop
+func (b *DenseUnionBuilder) AppendNulls(n int) {
+ // only append 1 null to the child builder, use the same offset twice
+ firstChildCode := b.codes[0]
+ childBuilder := b.typeIDtoBuilder[firstChildCode]
+ b.Reserve(n)
+ for i := 0; i < n; i++ {
+ b.typesBuilder.AppendValue(firstChildCode)
+ b.offsetsBuilder.AppendValue(int32(childBuilder.Len()))
+ }
+ // only append a single null to the child builder, the offsets all refer to the same value
+ childBuilder.AppendNull()
+}
+
+// AppendEmptyValue only appends an empty value arbitrarily to the first child,
+// and then uses that offset to identify the value.
+func (b *DenseUnionBuilder) AppendEmptyValue() {
+ firstChildCode := b.codes[0]
+ childBuilder := b.typeIDtoBuilder[firstChildCode]
+ b.typesBuilder.AppendValue(firstChildCode)
+ b.offsetsBuilder.AppendValue(int32(childBuilder.Len()))
+ childBuilder.AppendEmptyValue()
+}
+
+// AppendEmptyValues, like AppendNulls, will only append a single empty value
+// (implementation defined) to the first child arbitrarily, and then point
+// at that value using the offsets n times. That makes this more efficient
+// than calling AppendEmptyValue multiple times.
+func (b *DenseUnionBuilder) AppendEmptyValues(n int) {
+ // only append 1 null to the child builder, use the same offset twice
+ firstChildCode := b.codes[0]
+ childBuilder := b.typeIDtoBuilder[firstChildCode]
+ b.Reserve(n)
+ for i := 0; i < n; i++ {
+ b.typesBuilder.AppendValue(firstChildCode)
+ b.offsetsBuilder.AppendValue(int32(childBuilder.Len()))
+ }
+ // only append a single empty value to the child builder, the offsets all
+ // refer to the same value
+ childBuilder.AppendEmptyValue()
+}
+
+// Append appends the necessary offset and type code to the builder
+// and must be followed up with an append to the appropriate child builder
+func (b *DenseUnionBuilder) Append(nextType arrow.UnionTypeCode) {
+ b.typesBuilder.AppendValue(nextType)
+ bldr := b.typeIDtoBuilder[nextType]
+ if bldr.Len() == kMaxElems {
+ panic("a dense UnionArray cannot contain more than 2^31 - 1 elements from a single child")
+ }
+
+ b.offsetsBuilder.AppendValue(int32(bldr.Len()))
+}
+
+func (b *DenseUnionBuilder) Release() {
+ debug.Assert(atomic.LoadInt64(&b.refCount) > 0, "too many releases")
+
+ if atomic.AddInt64(&b.refCount, -1) == 0 {
+ for _, c := range b.children {
+ c.Release()
+ }
+ b.typesBuilder.Release()
+ b.offsetsBuilder.Release()
+ }
+}
+
+func (b *DenseUnionBuilder) newData() *Data {
+ data := b.unionBuilder.newData()
+ data.buffers = append(data.buffers, b.offsetsBuilder.Finish())
+ return data
+}
+
+func (b *DenseUnionBuilder) NewArray() arrow.Array {
+ return b.NewDenseUnionArray()
+}
+
+func (b *DenseUnionBuilder) NewDenseUnionArray() (a *DenseUnion) {
+ data := b.newData()
+ a = NewDenseUnionData(data)
+ data.Release()
+ return
+}
+
+func (b *DenseUnionBuilder) UnmarshalJSON(data []byte) (err error) {
+ dec := json.NewDecoder(bytes.NewReader(data))
+ t, err := dec.Token()
+ if err != nil {
+ return err
+ }
+
+ if delim, ok := t.(json.Delim); !ok || delim != '[' {
+ return fmt.Errorf("dense union builder must unpack from json array, found %s", t)
+ }
+ return b.Unmarshal(dec)
+}
+
+func (b *DenseUnionBuilder) Unmarshal(dec *json.Decoder) error {
+ for dec.More() {
+ if err := b.UnmarshalOne(dec); err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+func (d *DenseUnionBuilder) AppendValueFromString(s string) error {
+ if s == NullValueStr {
+ d.AppendNull()
+ return nil
+ }
+ dec := json.NewDecoder(strings.NewReader(s))
+ return d.UnmarshalOne(dec)
+}
+
+func (b *DenseUnionBuilder) UnmarshalOne(dec *json.Decoder) error {
+ t, err := dec.Token()
+ if err != nil {
+ return err
+ }
+
+ switch t {
+ case json.Delim('['):
+ // should be [type_id, Value]
+ typeID, err := dec.Token()
+ if err != nil {
+ return err
+ }
+
+ var typeCode int8
+
+ switch tid := typeID.(type) {
+ case json.Number:
+ id, err := tid.Int64()
+ if err != nil {
+ return err
+ }
+ typeCode = int8(id)
+ case float64:
+ if tid != float64(int64(tid)) {
+ return &json.UnmarshalTypeError{
+ Offset: dec.InputOffset(),
+ Type: reflect.TypeOf(int8(0)),
+ Struct: fmt.Sprint(b.Type()),
+ Value: "float",
+ }
+ }
+ typeCode = int8(tid)
+ }
+
+ childNum := b.typeIDtoChildID[typeCode]
+ if childNum == arrow.InvalidUnionChildID {
+ return &json.UnmarshalTypeError{
+ Offset: dec.InputOffset(),
+ Value: "invalid type code",
+ }
+ }
+
+ b.Append(typeCode)
+ if err := b.children[childNum].UnmarshalOne(dec); err != nil {
+ return err
+ }
+
+ endArr, err := dec.Token()
+ if err != nil {
+ return err
+ }
+
+ if endArr != json.Delim(']') {
+ return &json.UnmarshalTypeError{
+ Offset: dec.InputOffset(),
+ Value: "union value array should have exactly 2 elements",
+ }
+ }
+ case nil:
+ b.AppendNull()
+ default:
+ return &json.UnmarshalTypeError{
+ Offset: dec.InputOffset(),
+ Value: fmt.Sprint(t),
+ Struct: fmt.Sprint(b.Type()),
+ }
+ }
+ return nil
+}
+
+var (
+ _ arrow.Array = (*SparseUnion)(nil)
+ _ arrow.Array = (*DenseUnion)(nil)
+ _ Union = (*SparseUnion)(nil)
+ _ Union = (*DenseUnion)(nil)
+ _ Builder = (*SparseUnionBuilder)(nil)
+ _ Builder = (*DenseUnionBuilder)(nil)
+ _ UnionBuilder = (*SparseUnionBuilder)(nil)
+ _ UnionBuilder = (*DenseUnionBuilder)(nil)
+)
diff --git a/vendor/github.com/apache/arrow/go/v14/arrow/array/util.go b/vendor/github.com/apache/arrow/go/v14/arrow/array/util.go
new file mode 100644
index 000000000..54d15a809
--- /dev/null
+++ b/vendor/github.com/apache/arrow/go/v14/arrow/array/util.go
@@ -0,0 +1,523 @@
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements. See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership. The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License. You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package array
+
+import (
+ "errors"
+ "fmt"
+ "io"
+ "strings"
+
+ "github.com/apache/arrow/go/v14/arrow"
+ "github.com/apache/arrow/go/v14/arrow/bitutil"
+ "github.com/apache/arrow/go/v14/arrow/memory"
+ "github.com/apache/arrow/go/v14/internal/hashing"
+ "github.com/apache/arrow/go/v14/internal/json"
+)
+
+func min(a, b int) int {
+ if a < b {
+ return a
+ }
+ return b
+}
+
+type fromJSONCfg struct {
+ multiDocument bool
+ startOffset int64
+ useNumber bool
+}
+
+type FromJSONOption func(*fromJSONCfg)
+
+func WithMultipleDocs() FromJSONOption {
+ return func(c *fromJSONCfg) {
+ c.multiDocument = true
+ }
+}
+
+// WithStartOffset attempts to start decoding from the reader at the offset
+// passed in. If using this option the reader must fulfill the io.ReadSeeker
+// interface, or else an error will be returned.
+//
+// It will call Seek(off, io.SeekStart) on the reader
+func WithStartOffset(off int64) FromJSONOption {
+ return func(c *fromJSONCfg) {
+ c.startOffset = off
+ }
+}
+
+// WithUseNumber enables the 'UseNumber' option on the json decoder, using
+// the json.Number type instead of assuming float64 for numbers. This is critical
+// if you have numbers that are larger than what can fit into the 53 bits of
+// an IEEE float64 mantissa and want to preserve its value.
+func WithUseNumber() FromJSONOption {
+ return func(c *fromJSONCfg) {
+ c.useNumber = true
+ }
+}
+
+// FromJSON creates an arrow.Array from a corresponding JSON stream and defined data type. If the types in the
+// json do not match the type provided, it will return errors. This is *not* the integration test format
+// and should not be used as such. This intended to be used by consumers more similarly to the current exposing of
+// the csv reader/writer. It also returns the input offset in the reader where it finished decoding since buffering
+// by the decoder could leave the reader's cursor past where the parsing finished if attempting to parse multiple json
+// arrays from one stream.
+//
+// All the Array types implement json.Marshaller and thus can be written to json
+// using the json.Marshal function
+//
+// The JSON provided must be formatted in one of two ways:
+//
+// Default: the top level of the json must be a list which matches the type specified exactly
+// Example: `[1, 2, 3, 4, 5]` for any integer type or `[[...], null, [], .....]` for a List type
+// Struct arrays are represented a list of objects: `[{"foo": 1, "bar": "moo"}, {"foo": 5, "bar": "baz"}]`
+//
+// Using WithMultipleDocs:
+// If the JSON provided is multiple newline separated json documents, then use this option
+// and each json document will be treated as a single row of the array. This is most useful for record batches
+// and interacting with other processes that use json. For example:
+// `{"col1": 1, "col2": "row1", "col3": ...}\n{"col1": 2, "col2": "row2", "col3": ...}\n.....`
+//
+// Duration values get formated upon marshalling as a string consisting of their numeric
+// value followed by the unit suffix such as "10s" for a value of 10 and unit of Seconds.
+// with "ms" for millisecond, "us" for microsecond, and "ns" for nanosecond as the suffixes.
+// Unmarshalling duration values is more permissive since it first tries to use Go's
+// time.ParseDuration function which means it allows values in the form 3h25m0.3s in addition
+// to the same values which are output.
+//
+// Interval types are marshalled / unmarshalled as follows:
+//
+// MonthInterval is marshalled as an object with the format:
+// { "months": #}
+// DayTimeInterval is marshalled using Go's regular marshalling of structs:
+// { "days": #, "milliseconds": # }
+// MonthDayNanoInterval values are marshalled the same as DayTime using Go's struct marshalling:
+// { "months": #, "days": #, "nanoseconds": # }
+//
+// Times use a format of HH:MM or HH:MM:SS[.zzz] where the fractions of a second cannot
+// exceed the precision allowed by the time unit, otherwise unmarshalling will error.
+//
+// # Dates use YYYY-MM-DD format
+//
+// Timestamps use RFC3339Nano format except without a timezone, all of the following are valid:
+//
+// YYYY-MM-DD
+// YYYY-MM-DD[T]HH
+// YYYY-MM-DD[T]HH:MM
+// YYYY-MM-DD[T]HH:MM:SS[.zzzzzzzzzz]
+//
+// The fractions of a second cannot exceed the precision allowed by the timeunit of the datatype.
+//
+// When processing structs as objects order of keys does not matter, but keys cannot be repeated.
+func FromJSON(mem memory.Allocator, dt arrow.DataType, r io.Reader, opts ...FromJSONOption) (arr arrow.Array, offset int64, err error) {
+ var cfg fromJSONCfg
+ for _, o := range opts {
+ o(&cfg)
+ }
+
+ if cfg.startOffset != 0 {
+ seeker, ok := r.(io.ReadSeeker)
+ if !ok {
+ return nil, 0, errors.New("using StartOffset option requires reader to be a ReadSeeker, cannot seek")
+ }
+
+ seeker.Seek(cfg.startOffset, io.SeekStart)
+ }
+
+ bldr := NewBuilder(mem, dt)
+ defer bldr.Release()
+
+ dec := json.NewDecoder(r)
+ defer func() {
+ if errors.Is(err, io.EOF) {
+ err = fmt.Errorf("failed parsing json: %w", io.ErrUnexpectedEOF)
+ }
+ }()
+
+ if cfg.useNumber {
+ dec.UseNumber()
+ }
+
+ if !cfg.multiDocument {
+ t, err := dec.Token()
+ if err != nil {
+ return nil, dec.InputOffset(), err
+ }
+
+ if delim, ok := t.(json.Delim); !ok || delim != '[' {
+ return nil, dec.InputOffset(), fmt.Errorf("json doc must be an array, found %s", delim)
+ }
+ }
+
+ if err = bldr.Unmarshal(dec); err != nil {
+ return nil, dec.InputOffset(), err
+ }
+
+ if !cfg.multiDocument {
+ // consume the last ']'
+ if _, err = dec.Token(); err != nil {
+ return nil, dec.InputOffset(), err
+ }
+ }
+
+ return bldr.NewArray(), dec.InputOffset(), nil
+}
+
+// RecordToStructArray constructs a struct array from the columns of the record batch
+// by referencing them, zero-copy.
+func RecordToStructArray(rec arrow.Record) *Struct {
+ cols := make([]arrow.ArrayData, rec.NumCols())
+ for i, c := range rec.Columns() {
+ cols[i] = c.Data()
+ }
+
+ data := NewData(arrow.StructOf(rec.Schema().Fields()...), int(rec.NumRows()), []*memory.Buffer{nil}, cols, 0, 0)
+ defer data.Release()
+
+ return NewStructData(data)
+}
+
+// RecordFromStructArray is a convenience function for converting a struct array into
+// a record batch without copying the data. If the passed in schema is nil, the fields
+// of the struct will be used to define the record batch. Otherwise the passed in
+// schema will be used to create the record batch. If passed in, the schema must match
+// the fields of the struct column.
+func RecordFromStructArray(in *Struct, schema *arrow.Schema) arrow.Record {
+ if schema == nil {
+ schema = arrow.NewSchema(in.DataType().(*arrow.StructType).Fields(), nil)
+ }
+
+ return NewRecord(schema, in.fields, int64(in.Len()))
+}
+
+// RecordFromJSON creates a record batch from JSON data. See array.FromJSON for the details
+// of formatting and logic.
+//
+// A record batch from JSON is equivalent to reading a struct array in from json and then
+// converting it to a record batch.
+func RecordFromJSON(mem memory.Allocator, schema *arrow.Schema, r io.Reader, opts ...FromJSONOption) (arrow.Record, int64, error) {
+ st := arrow.StructOf(schema.Fields()...)
+ arr, off, err := FromJSON(mem, st, r, opts...)
+ if err != nil {
+ return nil, off, err
+ }
+ defer arr.Release()
+
+ return RecordFromStructArray(arr.(*Struct), schema), off, nil
+}
+
+// RecordToJSON writes out the given record following the format of each row is a single object
+// on a single line of the output.
+func RecordToJSON(rec arrow.Record, w io.Writer) error {
+ enc := json.NewEncoder(w)
+
+ fields := rec.Schema().Fields()
+
+ cols := make(map[string]interface{})
+ for i := 0; int64(i) < rec.NumRows(); i++ {
+ for j, c := range rec.Columns() {
+ cols[fields[j].Name] = c.GetOneForMarshal(i)
+ }
+ if err := enc.Encode(cols); err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+func TableFromJSON(mem memory.Allocator, sc *arrow.Schema, recJSON []string, opt ...FromJSONOption) (arrow.Table, error) {
+ batches := make([]arrow.Record, len(recJSON))
+ for i, batchJSON := range recJSON {
+ batch, _, err := RecordFromJSON(mem, sc, strings.NewReader(batchJSON), opt...)
+ if err != nil {
+ return nil, err
+ }
+ defer batch.Release()
+ batches[i] = batch
+ }
+ return NewTableFromRecords(sc, batches), nil
+}
+
+func GetDictArrayData(mem memory.Allocator, valueType arrow.DataType, memoTable hashing.MemoTable, startOffset int) (*Data, error) {
+ dictLen := memoTable.Size() - startOffset
+ buffers := []*memory.Buffer{nil, nil}
+
+ buffers[1] = memory.NewResizableBuffer(mem)
+ defer buffers[1].Release()
+
+ switch tbl := memoTable.(type) {
+ case hashing.NumericMemoTable:
+ nbytes := tbl.TypeTraits().BytesRequired(dictLen)
+ buffers[1].Resize(nbytes)
+ tbl.WriteOutSubset(startOffset, buffers[1].Bytes())
+ case *hashing.BinaryMemoTable:
+ switch valueType.ID() {
+ case arrow.BINARY, arrow.STRING:
+ buffers = append(buffers, memory.NewResizableBuffer(mem))
+ defer buffers[2].Release()
+
+ buffers[1].Resize(arrow.Int32Traits.BytesRequired(dictLen + 1))
+ offsets := arrow.Int32Traits.CastFromBytes(buffers[1].Bytes())
+ tbl.CopyOffsetsSubset(startOffset, offsets)
+
+ valuesz := offsets[len(offsets)-1] - offsets[0]
+ buffers[2].Resize(int(valuesz))
+ tbl.CopyValuesSubset(startOffset, buffers[2].Bytes())
+ case arrow.LARGE_BINARY, arrow.LARGE_STRING:
+ buffers = append(buffers, memory.NewResizableBuffer(mem))
+ defer buffers[2].Release()
+
+ buffers[1].Resize(arrow.Int64Traits.BytesRequired(dictLen + 1))
+ offsets := arrow.Int64Traits.CastFromBytes(buffers[1].Bytes())
+ tbl.CopyLargeOffsetsSubset(startOffset, offsets)
+
+ valuesz := offsets[len(offsets)-1] - offsets[0]
+ buffers[2].Resize(int(valuesz))
+ tbl.CopyValuesSubset(startOffset, buffers[2].Bytes())
+ default: // fixed size
+ bw := int(bitutil.BytesForBits(int64(valueType.(arrow.FixedWidthDataType).BitWidth())))
+ buffers[1].Resize(dictLen * bw)
+ tbl.CopyFixedWidthValues(startOffset, bw, buffers[1].Bytes())
+ }
+ default:
+ return nil, fmt.Errorf("arrow/array: dictionary unifier unimplemented type: %s", valueType)
+ }
+
+ var nullcount int
+ if idx, ok := memoTable.GetNull(); ok && idx >= startOffset {
+ buffers[0] = memory.NewResizableBuffer(mem)
+ defer buffers[0].Release()
+ nullcount = 1
+ buffers[0].Resize(int(bitutil.BytesForBits(int64(dictLen))))
+ memory.Set(buffers[0].Bytes(), 0xFF)
+ bitutil.ClearBit(buffers[0].Bytes(), idx)
+ }
+
+ return NewData(valueType, dictLen, buffers, nil, nullcount, 0), nil
+}
+
+func DictArrayFromJSON(mem memory.Allocator, dt *arrow.DictionaryType, indicesJSON, dictJSON string) (arrow.Array, error) {
+ indices, _, err := FromJSON(mem, dt.IndexType, strings.NewReader(indicesJSON))
+ if err != nil {
+ return nil, err
+ }
+ defer indices.Release()
+
+ dict, _, err := FromJSON(mem, dt.ValueType, strings.NewReader(dictJSON))
+ if err != nil {
+ return nil, err
+ }
+ defer dict.Release()
+
+ return NewDictionaryArray(dt, indices, dict), nil
+}
+
+func ChunkedFromJSON(mem memory.Allocator, dt arrow.DataType, chunkStrs []string, opts ...FromJSONOption) (*arrow.Chunked, error) {
+ chunks := make([]arrow.Array, len(chunkStrs))
+ defer func() {
+ for _, c := range chunks {
+ if c != nil {
+ c.Release()
+ }
+ }
+ }()
+
+ var err error
+ for i, c := range chunkStrs {
+ chunks[i], _, err = FromJSON(mem, dt, strings.NewReader(c), opts...)
+ if err != nil {
+ return nil, err
+ }
+ }
+
+ return arrow.NewChunked(dt, chunks), nil
+}
+
+func getMaxBufferLen(dt arrow.DataType, length int) int {
+ bufferLen := int(bitutil.BytesForBits(int64(length)))
+
+ maxOf := func(bl int) int {
+ if bl > bufferLen {
+ return bl
+ }
+ return bufferLen
+ }
+
+ switch dt := dt.(type) {
+ case *arrow.DictionaryType:
+ bufferLen = maxOf(getMaxBufferLen(dt.ValueType, length))
+ return maxOf(getMaxBufferLen(dt.IndexType, length))
+ case *arrow.FixedSizeBinaryType:
+ return maxOf(dt.ByteWidth * length)
+ case arrow.FixedWidthDataType:
+ return maxOf(int(bitutil.BytesForBits(int64(dt.BitWidth()))) * length)
+ case *arrow.StructType:
+ for _, f := range dt.Fields() {
+ bufferLen = maxOf(getMaxBufferLen(f.Type, length))
+ }
+ return bufferLen
+ case *arrow.SparseUnionType:
+ // type codes
+ bufferLen = maxOf(length)
+ // creates children of the same length of the union
+ for _, f := range dt.Fields() {
+ bufferLen = maxOf(getMaxBufferLen(f.Type, length))
+ }
+ return bufferLen
+ case *arrow.DenseUnionType:
+ // type codes
+ bufferLen = maxOf(length)
+ // offsets
+ bufferLen = maxOf(arrow.Int32SizeBytes * length)
+ // create children of length 1
+ for _, f := range dt.Fields() {
+ bufferLen = maxOf(getMaxBufferLen(f.Type, 1))
+ }
+ return bufferLen
+ case arrow.OffsetsDataType:
+ return maxOf(dt.OffsetTypeTraits().BytesRequired(length + 1))
+ case *arrow.FixedSizeListType:
+ return maxOf(getMaxBufferLen(dt.Elem(), int(dt.Len())*length))
+ case arrow.ExtensionType:
+ return maxOf(getMaxBufferLen(dt.StorageType(), length))
+ default:
+ panic(fmt.Errorf("arrow/array: arrayofnull not implemented for type %s", dt))
+ }
+}
+
+type nullArrayFactory struct {
+ mem memory.Allocator
+ dt arrow.DataType
+ len int
+ buf *memory.Buffer
+}
+
+func (n *nullArrayFactory) create() *Data {
+ if n.buf == nil {
+ bufLen := getMaxBufferLen(n.dt, n.len)
+ n.buf = memory.NewResizableBuffer(n.mem)
+ n.buf.Resize(bufLen)
+ defer n.buf.Release()
+ }
+
+ var (
+ dt = n.dt
+ bufs = []*memory.Buffer{memory.SliceBuffer(n.buf, 0, int(bitutil.BytesForBits(int64(n.len))))}
+ childData []arrow.ArrayData
+ dictData arrow.ArrayData
+ )
+ defer bufs[0].Release()
+
+ if ex, ok := dt.(arrow.ExtensionType); ok {
+ dt = ex.StorageType()
+ }
+
+ if nf, ok := dt.(arrow.NestedType); ok {
+ childData = make([]arrow.ArrayData, len(nf.Fields()))
+ }
+
+ switch dt := dt.(type) {
+ case *arrow.NullType:
+ case *arrow.DictionaryType:
+ bufs = append(bufs, n.buf)
+ arr := MakeArrayOfNull(n.mem, dt.ValueType, 0)
+ defer arr.Release()
+ dictData = arr.Data()
+ case arrow.FixedWidthDataType:
+ bufs = append(bufs, n.buf)
+ case arrow.BinaryDataType:
+ bufs = append(bufs, n.buf, n.buf)
+ case arrow.OffsetsDataType:
+ bufs = append(bufs, n.buf)
+ childData[0] = n.createChild(dt, 0, 0)
+ defer childData[0].Release()
+ case *arrow.FixedSizeListType:
+ childData[0] = n.createChild(dt, 0, n.len*int(dt.Len()))
+ defer childData[0].Release()
+ case *arrow.StructType:
+ for i := range dt.Fields() {
+ childData[i] = n.createChild(dt, i, n.len)
+ defer childData[i].Release()
+ }
+ case *arrow.RunEndEncodedType:
+ bldr := NewBuilder(n.mem, dt.RunEnds())
+ defer bldr.Release()
+
+ switch b := bldr.(type) {
+ case *Int16Builder:
+ b.Append(int16(n.len))
+ case *Int32Builder:
+ b.Append(int32(n.len))
+ case *Int64Builder:
+ b.Append(int64(n.len))
+ }
+
+ childData[0] = bldr.newData()
+ defer childData[0].Release()
+ childData[1] = n.createChild(dt.Encoded(), 1, 1)
+ defer childData[1].Release()
+ case arrow.UnionType:
+ bufs[0].Release()
+ bufs[0] = nil
+ bufs = append(bufs, n.buf)
+ // buffer is zeroed, but 0 may not be a valid type code
+ if dt.TypeCodes()[0] != 0 {
+ bufs[1] = memory.NewResizableBuffer(n.mem)
+ bufs[1].Resize(n.len)
+ defer bufs[1].Release()
+ memory.Set(bufs[1].Bytes(), byte(dt.TypeCodes()[0]))
+ }
+
+ // for sparse unions we create children with the same length
+ childLen := n.len
+ if dt.Mode() == arrow.DenseMode {
+ // for dense unions, offsets are all 0 and make children
+ // with length 1
+ bufs = append(bufs, n.buf)
+ childLen = 1
+ }
+ for i := range dt.Fields() {
+ childData[i] = n.createChild(dt, i, childLen)
+ defer childData[i].Release()
+ }
+ }
+
+ out := NewData(n.dt, n.len, bufs, childData, n.len, 0)
+ if dictData != nil {
+ out.SetDictionary(dictData)
+ }
+ return out
+}
+
+func (n *nullArrayFactory) createChild(dt arrow.DataType, i, length int) *Data {
+ childFactory := &nullArrayFactory{
+ mem: n.mem, dt: n.dt.(arrow.NestedType).Fields()[i].Type,
+ len: length, buf: n.buf}
+ return childFactory.create()
+}
+
+// MakeArrayOfNull creates an array of size length which is all null of the given data type.
+func MakeArrayOfNull(mem memory.Allocator, dt arrow.DataType, length int) arrow.Array {
+ if dt.ID() == arrow.NULL {
+ return NewNull(length)
+ }
+
+ data := (&nullArrayFactory{mem: mem, dt: dt, len: length}).create()
+ defer data.Release()
+ return MakeFromData(data)
+}
diff --git a/vendor/github.com/apache/arrow/go/v14/arrow/arrio/arrio.go b/vendor/github.com/apache/arrow/go/v14/arrow/arrio/arrio.go
new file mode 100644
index 000000000..466a93a68
--- /dev/null
+++ b/vendor/github.com/apache/arrow/go/v14/arrow/arrio/arrio.go
@@ -0,0 +1,92 @@
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements. See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership. The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License. You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Package arrio exposes functions to manipulate records, exposing and using
+// interfaces not unlike the ones defined in the stdlib io package.
+package arrio
+
+import (
+ "errors"
+ "io"
+
+ "github.com/apache/arrow/go/v14/arrow"
+)
+
+// Reader is the interface that wraps the Read method.
+type Reader interface {
+ // Read reads the current record from the underlying stream and an error, if any.
+ // When the Reader reaches the end of the underlying stream, it returns (nil, io.EOF).
+ Read() (arrow.Record, error)
+}
+
+// ReaderAt is the interface that wraps the ReadAt method.
+type ReaderAt interface {
+ // ReadAt reads the i-th record from the underlying stream and an error, if any.
+ ReadAt(i int64) (arrow.Record, error)
+}
+
+// Writer is the interface that wraps the Write method.
+type Writer interface {
+ Write(rec arrow.Record) error
+}
+
+// Copy copies all the records available from src to dst.
+// Copy returns the number of records copied and the first error
+// encountered while copying, if any.
+//
+// A successful Copy returns err == nil, not err == EOF. Because Copy is
+// defined to read from src until EOF, it does not treat an EOF from Read as an
+// error to be reported.
+func Copy(dst Writer, src Reader) (n int64, err error) {
+ for {
+ rec, err := src.Read()
+ if err != nil {
+ if errors.Is(err, io.EOF) {
+ return n, nil
+ }
+ return n, err
+ }
+ err = dst.Write(rec)
+ if err != nil {
+ return n, err
+ }
+ n++
+ }
+}
+
+// CopyN copies n records (or until an error) from src to dst. It returns the
+// number of records copied and the earliest error encountered while copying. On
+// return, written == n if and only if err == nil.
+func CopyN(dst Writer, src Reader, n int64) (written int64, err error) {
+ for ; written < n; written++ {
+ rec, err := src.Read()
+ if err != nil {
+ if errors.Is(err, io.EOF) && written == n {
+ return written, nil
+ }
+ return written, err
+ }
+ err = dst.Write(rec)
+ if err != nil {
+ return written, err
+ }
+ }
+
+ if written != n && err == nil {
+ err = io.EOF
+ }
+ return written, err
+}
diff --git a/vendor/github.com/apache/arrow/go/v14/arrow/bitutil/Makefile b/vendor/github.com/apache/arrow/go/v14/arrow/bitutil/Makefile
new file mode 100644
index 000000000..12dd1d349
--- /dev/null
+++ b/vendor/github.com/apache/arrow/go/v14/arrow/bitutil/Makefile
@@ -0,0 +1,62 @@
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements. See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership. The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# this converts rotate instructions from "ro[lr] <reg>" -> "ro[lr] <reg>, 1" for yasm compatibility
+PERL_FIXUP_ROTATE=perl -i -pe 's/(ro[rl]\s+\w{2,3})$$/\1, 1/'
+
+C2GOASM=c2goasm
+CC=clang-11
+C_FLAGS=-target x86_64-unknown-none -masm=intel -mno-red-zone -mstackrealign -mllvm -inline-threshold=1000 \
+ -fno-asynchronous-unwind-tables -fno-exceptions -fno-rtti -O3 -fno-builtin -ffast-math -fno-jump-tables -I_lib
+ASM_FLAGS_AVX2=-mavx2 -mfma
+ASM_FLAGS_SSE4=-msse4
+ASM_FLAGS_BMI2=-mbmi2
+ASM_FLAGS_POPCNT=-mpopcnt
+
+C_FLAGS_NEON=-O3 -fvectorize -mllvm -force-vector-width=16 -fno-asynchronous-unwind-tables -mno-red-zone -mstackrealign -fno-exceptions \
+ -fno-rtti -fno-builtin -ffast-math -fno-jump-tables -I_lib
+
+GO_SOURCES := $(shell find . -path ./_lib -prune -o -name '*.go' -not -name '*_test.go')
+ALL_SOURCES := $(shell find . -path ./_lib -prune -o -name '*.go' -name '*.s' -not -name '*_test.go')
+
+.PHONEY: assembly
+
+INTEL_SOURCES := \
+ bitmap_ops_avx2_amd64.s bitmap_ops_sse4_amd64.s
+
+#
+# ARROW-15336: DO NOT add the assembly target for Arm64 (ARM_SOURCES) until c2goasm added the Arm64 support.
+# min_max_neon_arm64.s was generated by asm2plan9s.
+# And manually formatted it as the Arm64 Plan9.
+#
+
+assembly: $(INTEL_SOURCES)
+
+_lib/bitmap_ops_avx2_amd64.s: _lib/bitmap_ops.c
+ $(CC) -S $(C_FLAGS) $(ASM_FLAGS_AVX2) $^ -o $@ ; $(PERL_FIXUP_ROTATE) $@
+
+_lib/bitmap_ops_sse4_amd64.s: _lib/bitmap_ops.c
+ $(CC) -S $(C_FLAGS) $(ASM_FLAGS_SSE4) $^ -o $@ ; $(PERL_FIXUP_ROTATE) $@
+
+bitmap_ops_avx2_amd64.s: _lib/bitmap_ops_avx2_amd64.s
+ $(C2GOASM) -a -f $^ $@
+
+bitmap_ops_sse4_amd64.s: _lib/bitmap_ops_sse4_amd64.s
+ $(C2GOASM) -a -f $^ $@
+
+clean:
+ rm -f $(INTEL_SOURCES)
+ rm -f $(addprefix _lib/,$(INTEL_SOURCES))
diff --git a/vendor/github.com/apache/arrow/go/v14/arrow/bitutil/bitmap_ops.go b/vendor/github.com/apache/arrow/go/v14/arrow/bitutil/bitmap_ops.go
new file mode 100644
index 000000000..7db750a6d
--- /dev/null
+++ b/vendor/github.com/apache/arrow/go/v14/arrow/bitutil/bitmap_ops.go
@@ -0,0 +1,109 @@
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements. See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership. The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License. You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package bitutil
+
+func alignedBitAndGo(left, right, out []byte) {
+ var (
+ nbytes = len(out)
+ i = 0
+ )
+ if nbytes > uint64SizeBytes {
+ // case where we have enough bytes to operate on words
+ leftWords := bytesToUint64(left[i:])
+ rightWords := bytesToUint64(right[i:])
+ outWords := bytesToUint64(out[i:])
+
+ for w := range outWords {
+ outWords[w] = leftWords[w] & rightWords[w]
+ }
+
+ i += len(outWords) * uint64SizeBytes
+ }
+ // grab any remaining bytes that were fewer than a word
+ for ; i < nbytes; i++ {
+ out[i] = left[i] & right[i]
+ }
+}
+
+func alignedBitAndNotGo(left, right, out []byte) {
+ var (
+ nbytes = len(out)
+ i = 0
+ )
+ if nbytes > uint64SizeBytes {
+ // case where we have enough bytes to operate on words
+ leftWords := bytesToUint64(left[i:])
+ rightWords := bytesToUint64(right[i:])
+ outWords := bytesToUint64(out[i:])
+
+ for w := range outWords {
+ outWords[w] = leftWords[w] &^ rightWords[w]
+ }
+
+ i += len(outWords) * uint64SizeBytes
+ }
+ // grab any remaining bytes that were fewer than a word
+ for ; i < nbytes; i++ {
+ out[i] = left[i] &^ right[i]
+ }
+}
+
+func alignedBitOrGo(left, right, out []byte) {
+ var (
+ nbytes = len(out)
+ i = 0
+ )
+ if nbytes > uint64SizeBytes {
+ // case where we have enough bytes to operate on words
+ leftWords := bytesToUint64(left[i:])
+ rightWords := bytesToUint64(right[i:])
+ outWords := bytesToUint64(out[i:])
+
+ for w := range outWords {
+ outWords[w] = leftWords[w] | rightWords[w]
+ }
+
+ i += len(outWords) * uint64SizeBytes
+ }
+ // grab any remaining bytes that were fewer than a word
+ for ; i < nbytes; i++ {
+ out[i] = left[i] | right[i]
+ }
+}
+
+func alignedBitXorGo(left, right, out []byte) {
+ var (
+ nbytes = len(out)
+ i = 0
+ )
+ if nbytes > uint64SizeBytes {
+ // case where we have enough bytes to operate on words
+ leftWords := bytesToUint64(left[i:])
+ rightWords := bytesToUint64(right[i:])
+ outWords := bytesToUint64(out[i:])
+
+ for w := range outWords {
+ outWords[w] = leftWords[w] ^ rightWords[w]
+ }
+
+ i += len(outWords) * uint64SizeBytes
+ }
+ // grab any remaining bytes that were fewer than a word
+ for ; i < nbytes; i++ {
+ out[i] = left[i] ^ right[i]
+ }
+}
diff --git a/vendor/github.com/apache/arrow/go/v14/arrow/bitutil/bitmap_ops_amd64.go b/vendor/github.com/apache/arrow/go/v14/arrow/bitutil/bitmap_ops_amd64.go
new file mode 100644
index 000000000..ad0fd674a
--- /dev/null
+++ b/vendor/github.com/apache/arrow/go/v14/arrow/bitutil/bitmap_ops_amd64.go
@@ -0,0 +1,41 @@
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements. See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership. The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License. You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+//go:build !noasm
+// +build !noasm
+
+package bitutil
+
+import "golang.org/x/sys/cpu"
+
+func init() {
+ if cpu.X86.HasAVX2 {
+ bitAndOp.opAligned = bitmapAlignedAndAVX2
+ bitOrOp.opAligned = bitmapAlignedOrAVX2
+ bitAndNotOp.opAligned = bitmapAlignedAndNotAVX2
+ bitXorOp.opAligned = bitmapAlignedXorAVX2
+ } else if cpu.X86.HasSSE42 {
+ bitAndOp.opAligned = bitmapAlignedAndSSE4
+ bitOrOp.opAligned = bitmapAlignedOrSSE4
+ bitAndNotOp.opAligned = bitmapAlignedAndNotSSE4
+ bitXorOp.opAligned = bitmapAlignedXorSSE4
+ } else {
+ bitAndOp.opAligned = alignedBitAndGo
+ bitOrOp.opAligned = alignedBitOrGo
+ bitAndNotOp.opAligned = alignedBitAndNotGo
+ bitXorOp.opAligned = alignedBitXorGo
+ }
+}
diff --git a/vendor/github.com/apache/arrow/go/v14/arrow/bitutil/bitmap_ops_arm64.go b/vendor/github.com/apache/arrow/go/v14/arrow/bitutil/bitmap_ops_arm64.go
new file mode 100644
index 000000000..28d95d84a
--- /dev/null
+++ b/vendor/github.com/apache/arrow/go/v14/arrow/bitutil/bitmap_ops_arm64.go
@@ -0,0 +1,27 @@
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements. See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership. The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License. You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+//go:build !noasm
+// +build !noasm
+
+package bitutil
+
+func init() {
+ bitAndOp.opAligned = alignedBitAndGo
+ bitOrOp.opAligned = alignedBitOrGo
+ bitAndNotOp.opAligned = alignedBitAndNotGo
+ bitXorOp.opAligned = alignedBitXorGo
+}
diff --git a/vendor/github.com/apache/arrow/go/v14/arrow/bitutil/bitmap_ops_avx2_amd64.go b/vendor/github.com/apache/arrow/go/v14/arrow/bitutil/bitmap_ops_avx2_amd64.go
new file mode 100644
index 000000000..1c01bd0f3
--- /dev/null
+++ b/vendor/github.com/apache/arrow/go/v14/arrow/bitutil/bitmap_ops_avx2_amd64.go
@@ -0,0 +1,52 @@
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements. See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership. The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License. You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+//go:build !noasm
+// +build !noasm
+
+package bitutil
+
+import (
+ "unsafe"
+)
+
+//go:noescape
+func _bitmap_aligned_and_avx2(left, right, out unsafe.Pointer, length int64)
+
+func bitmapAlignedAndAVX2(left, right, out []byte) {
+ _bitmap_aligned_and_avx2(unsafe.Pointer(&left[0]), unsafe.Pointer(&right[0]), unsafe.Pointer(&out[0]), int64(len(out)))
+}
+
+//go:noescape
+func _bitmap_aligned_or_avx2(left, right, out unsafe.Pointer, length int64)
+
+func bitmapAlignedOrAVX2(left, right, out []byte) {
+ _bitmap_aligned_or_avx2(unsafe.Pointer(&left[0]), unsafe.Pointer(&right[0]), unsafe.Pointer(&out[0]), int64(len(out)))
+}
+
+//go:noescape
+func _bitmap_aligned_and_not_avx2(left, right, out unsafe.Pointer, length int64)
+
+func bitmapAlignedAndNotAVX2(left, right, out []byte) {
+ _bitmap_aligned_and_not_avx2(unsafe.Pointer(&left[0]), unsafe.Pointer(&right[0]), unsafe.Pointer(&out[0]), int64(len(out)))
+}
+
+//go:noescape
+func _bitmap_aligned_xor_avx2(left, right, out unsafe.Pointer, length int64)
+
+func bitmapAlignedXorAVX2(left, right, out []byte) {
+ _bitmap_aligned_xor_avx2(unsafe.Pointer(&left[0]), unsafe.Pointer(&right[0]), unsafe.Pointer(&out[0]), int64(len(out)))
+}
diff --git a/vendor/github.com/apache/arrow/go/v14/arrow/bitutil/bitmap_ops_avx2_amd64.s b/vendor/github.com/apache/arrow/go/v14/arrow/bitutil/bitmap_ops_avx2_amd64.s
new file mode 100644
index 000000000..00172e865
--- /dev/null
+++ b/vendor/github.com/apache/arrow/go/v14/arrow/bitutil/bitmap_ops_avx2_amd64.s
@@ -0,0 +1,373 @@
+//+build !noasm !appengine
+// AUTO-GENERATED BY C2GOASM -- DO NOT EDIT
+
+TEXT ·_bitmap_aligned_and_avx2(SB), $0-32
+
+ MOVQ left+0(FP), DI
+ MOVQ right+8(FP), SI
+ MOVQ out+16(FP), DX
+ MOVQ length+24(FP), CX
+
+ WORD $0x8548; BYTE $0xc9 // test rcx, rcx
+ JLE LBB0_12
+ LONG $0x7ff98348 // cmp rcx, 127
+ JA LBB0_7
+ WORD $0x3145; BYTE $0xd2 // xor r10d, r10d
+ JMP LBB0_3
+
+LBB0_7:
+ LONG $0x0a0c8d4c // lea r9, [rdx + rcx]
+ LONG $0x0f048d48 // lea rax, [rdi + rcx]
+ WORD $0x3948; BYTE $0xd0 // cmp rax, rdx
+ LONG $0xd3970f41 // seta r11b
+ LONG $0x0e048d48 // lea rax, [rsi + rcx]
+ WORD $0x3949; BYTE $0xf9 // cmp r9, rdi
+ WORD $0x970f; BYTE $0xd3 // seta bl
+ WORD $0x3948; BYTE $0xd0 // cmp rax, rdx
+ LONG $0xd0970f41 // seta r8b
+ WORD $0x3949; BYTE $0xf1 // cmp r9, rsi
+ LONG $0xd1970f41 // seta r9b
+ WORD $0x3145; BYTE $0xd2 // xor r10d, r10d
+ WORD $0x8441; BYTE $0xdb // test r11b, bl
+ JNE LBB0_3
+ WORD $0x2045; BYTE $0xc8 // and r8b, r9b
+ JNE LBB0_3
+ WORD $0x8949; BYTE $0xca // mov r10, rcx
+ LONG $0x80e28349 // and r10, -128
+ WORD $0x3145; BYTE $0xc0 // xor r8d, r8d
+
+LBB0_10:
+ LONG $0x107ca1c4; WORD $0x0604 // vmovups ymm0, yword [rsi + r8]
+ LONG $0x107ca1c4; WORD $0x064c; BYTE $0x20 // vmovups ymm1, yword [rsi + r8 + 32]
+ LONG $0x107ca1c4; WORD $0x0654; BYTE $0x40 // vmovups ymm2, yword [rsi + r8 + 64]
+ LONG $0x107ca1c4; WORD $0x065c; BYTE $0x60 // vmovups ymm3, yword [rsi + r8 + 96]
+ LONG $0x547ca1c4; WORD $0x0704 // vandps ymm0, ymm0, yword [rdi + r8]
+ LONG $0x5474a1c4; WORD $0x074c; BYTE $0x20 // vandps ymm1, ymm1, yword [rdi + r8 + 32]
+ LONG $0x546ca1c4; WORD $0x0754; BYTE $0x40 // vandps ymm2, ymm2, yword [rdi + r8 + 64]
+ LONG $0x5464a1c4; WORD $0x075c; BYTE $0x60 // vandps ymm3, ymm3, yword [rdi + r8 + 96]
+ LONG $0x117ca1c4; WORD $0x0204 // vmovups yword [rdx + r8], ymm0
+ LONG $0x117ca1c4; WORD $0x024c; BYTE $0x20 // vmovups yword [rdx + r8 + 32], ymm1
+ LONG $0x117ca1c4; WORD $0x0254; BYTE $0x40 // vmovups yword [rdx + r8 + 64], ymm2
+ LONG $0x117ca1c4; WORD $0x025c; BYTE $0x60 // vmovups yword [rdx + r8 + 96], ymm3
+ LONG $0x80e88349 // sub r8, -128
+ WORD $0x394d; BYTE $0xc2 // cmp r10, r8
+ JNE LBB0_10
+ WORD $0x3949; BYTE $0xca // cmp r10, rcx
+ JE LBB0_12
+
+LBB0_3:
+ WORD $0x894d; BYTE $0xd0 // mov r8, r10
+ WORD $0xf749; BYTE $0xd0 // not r8
+ WORD $0x0149; BYTE $0xc8 // add r8, rcx
+ WORD $0x8949; BYTE $0xc9 // mov r9, rcx
+ LONG $0x03e18349 // and r9, 3
+ JE LBB0_5
+
+LBB0_4:
+ LONG $0x04b60f42; BYTE $0x16 // movzx eax, byte [rsi + r10]
+ LONG $0x17042242 // and al, byte [rdi + r10]
+ LONG $0x12048842 // mov byte [rdx + r10], al
+ LONG $0x01c28349 // add r10, 1
+ LONG $0xffc18349 // add r9, -1
+ JNE LBB0_4
+
+LBB0_5:
+ LONG $0x03f88349 // cmp r8, 3
+ JB LBB0_12
+
+LBB0_6:
+ LONG $0x04b60f42; BYTE $0x16 // movzx eax, byte [rsi + r10]
+ LONG $0x17042242 // and al, byte [rdi + r10]
+ LONG $0x12048842 // mov byte [rdx + r10], al
+ LONG $0x44b60f42; WORD $0x0116 // movzx eax, byte [rsi + r10 + 1]
+ LONG $0x17442242; BYTE $0x01 // and al, byte [rdi + r10 + 1]
+ LONG $0x12448842; BYTE $0x01 // mov byte [rdx + r10 + 1], al
+ LONG $0x44b60f42; WORD $0x0216 // movzx eax, byte [rsi + r10 + 2]
+ LONG $0x17442242; BYTE $0x02 // and al, byte [rdi + r10 + 2]
+ LONG $0x12448842; BYTE $0x02 // mov byte [rdx + r10 + 2], al
+ LONG $0x44b60f42; WORD $0x0316 // movzx eax, byte [rsi + r10 + 3]
+ LONG $0x17442242; BYTE $0x03 // and al, byte [rdi + r10 + 3]
+ LONG $0x12448842; BYTE $0x03 // mov byte [rdx + r10 + 3], al
+ LONG $0x04c28349 // add r10, 4
+ WORD $0x394c; BYTE $0xd1 // cmp rcx, r10
+ JNE LBB0_6
+
+LBB0_12:
+ VZEROUPPER
+ RET
+
+TEXT ·_bitmap_aligned_or_avx2(SB), $0-32
+
+ MOVQ left+0(FP), DI
+ MOVQ right+8(FP), SI
+ MOVQ out+16(FP), DX
+ MOVQ length+24(FP), CX
+
+ WORD $0x8548; BYTE $0xc9 // test rcx, rcx
+ JLE LBB1_12
+ LONG $0x7ff98348 // cmp rcx, 127
+ JA LBB1_7
+ WORD $0x3145; BYTE $0xd2 // xor r10d, r10d
+ JMP LBB1_3
+
+LBB1_7:
+ LONG $0x0a0c8d4c // lea r9, [rdx + rcx]
+ LONG $0x0f048d48 // lea rax, [rdi + rcx]
+ WORD $0x3948; BYTE $0xd0 // cmp rax, rdx
+ LONG $0xd3970f41 // seta r11b
+ LONG $0x0e048d48 // lea rax, [rsi + rcx]
+ WORD $0x3949; BYTE $0xf9 // cmp r9, rdi
+ WORD $0x970f; BYTE $0xd3 // seta bl
+ WORD $0x3948; BYTE $0xd0 // cmp rax, rdx
+ LONG $0xd0970f41 // seta r8b
+ WORD $0x3949; BYTE $0xf1 // cmp r9, rsi
+ LONG $0xd1970f41 // seta r9b
+ WORD $0x3145; BYTE $0xd2 // xor r10d, r10d
+ WORD $0x8441; BYTE $0xdb // test r11b, bl
+ JNE LBB1_3
+ WORD $0x2045; BYTE $0xc8 // and r8b, r9b
+ JNE LBB1_3
+ WORD $0x8949; BYTE $0xca // mov r10, rcx
+ LONG $0x80e28349 // and r10, -128
+ WORD $0x3145; BYTE $0xc0 // xor r8d, r8d
+
+LBB1_10:
+ LONG $0x107ca1c4; WORD $0x0604 // vmovups ymm0, yword [rsi + r8]
+ LONG $0x107ca1c4; WORD $0x064c; BYTE $0x20 // vmovups ymm1, yword [rsi + r8 + 32]
+ LONG $0x107ca1c4; WORD $0x0654; BYTE $0x40 // vmovups ymm2, yword [rsi + r8 + 64]
+ LONG $0x107ca1c4; WORD $0x065c; BYTE $0x60 // vmovups ymm3, yword [rsi + r8 + 96]
+ LONG $0x567ca1c4; WORD $0x0704 // vorps ymm0, ymm0, yword [rdi + r8]
+ LONG $0x5674a1c4; WORD $0x074c; BYTE $0x20 // vorps ymm1, ymm1, yword [rdi + r8 + 32]
+ LONG $0x566ca1c4; WORD $0x0754; BYTE $0x40 // vorps ymm2, ymm2, yword [rdi + r8 + 64]
+ LONG $0x5664a1c4; WORD $0x075c; BYTE $0x60 // vorps ymm3, ymm3, yword [rdi + r8 + 96]
+ LONG $0x117ca1c4; WORD $0x0204 // vmovups yword [rdx + r8], ymm0
+ LONG $0x117ca1c4; WORD $0x024c; BYTE $0x20 // vmovups yword [rdx + r8 + 32], ymm1
+ LONG $0x117ca1c4; WORD $0x0254; BYTE $0x40 // vmovups yword [rdx + r8 + 64], ymm2
+ LONG $0x117ca1c4; WORD $0x025c; BYTE $0x60 // vmovups yword [rdx + r8 + 96], ymm3
+ LONG $0x80e88349 // sub r8, -128
+ WORD $0x394d; BYTE $0xc2 // cmp r10, r8
+ JNE LBB1_10
+ WORD $0x3949; BYTE $0xca // cmp r10, rcx
+ JE LBB1_12
+
+LBB1_3:
+ WORD $0x894d; BYTE $0xd0 // mov r8, r10
+ WORD $0xf749; BYTE $0xd0 // not r8
+ WORD $0x0149; BYTE $0xc8 // add r8, rcx
+ WORD $0x8949; BYTE $0xc9 // mov r9, rcx
+ LONG $0x03e18349 // and r9, 3
+ JE LBB1_5
+
+LBB1_4:
+ LONG $0x04b60f42; BYTE $0x16 // movzx eax, byte [rsi + r10]
+ LONG $0x17040a42 // or al, byte [rdi + r10]
+ LONG $0x12048842 // mov byte [rdx + r10], al
+ LONG $0x01c28349 // add r10, 1
+ LONG $0xffc18349 // add r9, -1
+ JNE LBB1_4
+
+LBB1_5:
+ LONG $0x03f88349 // cmp r8, 3
+ JB LBB1_12
+
+LBB1_6:
+ LONG $0x04b60f42; BYTE $0x16 // movzx eax, byte [rsi + r10]
+ LONG $0x17040a42 // or al, byte [rdi + r10]
+ LONG $0x12048842 // mov byte [rdx + r10], al
+ LONG $0x44b60f42; WORD $0x0116 // movzx eax, byte [rsi + r10 + 1]
+ LONG $0x17440a42; BYTE $0x01 // or al, byte [rdi + r10 + 1]
+ LONG $0x12448842; BYTE $0x01 // mov byte [rdx + r10 + 1], al
+ LONG $0x44b60f42; WORD $0x0216 // movzx eax, byte [rsi + r10 + 2]
+ LONG $0x17440a42; BYTE $0x02 // or al, byte [rdi + r10 + 2]
+ LONG $0x12448842; BYTE $0x02 // mov byte [rdx + r10 + 2], al
+ LONG $0x44b60f42; WORD $0x0316 // movzx eax, byte [rsi + r10 + 3]
+ LONG $0x17440a42; BYTE $0x03 // or al, byte [rdi + r10 + 3]
+ LONG $0x12448842; BYTE $0x03 // mov byte [rdx + r10 + 3], al
+ LONG $0x04c28349 // add r10, 4
+ WORD $0x394c; BYTE $0xd1 // cmp rcx, r10
+ JNE LBB1_6
+
+LBB1_12:
+ VZEROUPPER
+ RET
+
+TEXT ·_bitmap_aligned_and_not_avx2(SB), $0-32
+
+ MOVQ left+0(FP), DI
+ MOVQ right+8(FP), SI
+ MOVQ out+16(FP), DX
+ MOVQ length+24(FP), CX
+
+ WORD $0x8548; BYTE $0xc9 // test rcx, rcx
+ JLE LBB2_12
+ LONG $0x7ff98348 // cmp rcx, 127
+ JA LBB2_7
+ WORD $0x3145; BYTE $0xc0 // xor r8d, r8d
+ JMP LBB2_3
+
+LBB2_7:
+ LONG $0x0a048d4c // lea r8, [rdx + rcx]
+ LONG $0x0f048d48 // lea rax, [rdi + rcx]
+ WORD $0x3948; BYTE $0xd0 // cmp rax, rdx
+ LONG $0xd3970f41 // seta r11b
+ LONG $0x0e048d48 // lea rax, [rsi + rcx]
+ WORD $0x3949; BYTE $0xf8 // cmp r8, rdi
+ WORD $0x970f; BYTE $0xd3 // seta bl
+ WORD $0x3948; BYTE $0xd0 // cmp rax, rdx
+ LONG $0xd2970f41 // seta r10b
+ WORD $0x3949; BYTE $0xf0 // cmp r8, rsi
+ LONG $0xd1970f41 // seta r9b
+ WORD $0x3145; BYTE $0xc0 // xor r8d, r8d
+ WORD $0x8441; BYTE $0xdb // test r11b, bl
+ JNE LBB2_3
+ WORD $0x2045; BYTE $0xca // and r10b, r9b
+ JNE LBB2_3
+ WORD $0x8949; BYTE $0xc8 // mov r8, rcx
+ LONG $0x80e08349 // and r8, -128
+ WORD $0xc031 // xor eax, eax
+
+LBB2_10:
+ LONG $0x0410fcc5; BYTE $0x06 // vmovups ymm0, yword [rsi + rax]
+ LONG $0x4c10fcc5; WORD $0x2006 // vmovups ymm1, yword [rsi + rax + 32]
+ LONG $0x5410fcc5; WORD $0x4006 // vmovups ymm2, yword [rsi + rax + 64]
+ LONG $0x5c10fcc5; WORD $0x6006 // vmovups ymm3, yword [rsi + rax + 96]
+ LONG $0x0455fcc5; BYTE $0x07 // vandnps ymm0, ymm0, yword [rdi + rax]
+ LONG $0x4c55f4c5; WORD $0x2007 // vandnps ymm1, ymm1, yword [rdi + rax + 32]
+ LONG $0x5455ecc5; WORD $0x4007 // vandnps ymm2, ymm2, yword [rdi + rax + 64]
+ LONG $0x5c55e4c5; WORD $0x6007 // vandnps ymm3, ymm3, yword [rdi + rax + 96]
+ LONG $0x0411fcc5; BYTE $0x02 // vmovups yword [rdx + rax], ymm0
+ LONG $0x4c11fcc5; WORD $0x2002 // vmovups yword [rdx + rax + 32], ymm1
+ LONG $0x5411fcc5; WORD $0x4002 // vmovups yword [rdx + rax + 64], ymm2
+ LONG $0x5c11fcc5; WORD $0x6002 // vmovups yword [rdx + rax + 96], ymm3
+ LONG $0x80e88348 // sub rax, -128
+ WORD $0x3949; BYTE $0xc0 // cmp r8, rax
+ JNE LBB2_10
+ WORD $0x3949; BYTE $0xc8 // cmp r8, rcx
+ JE LBB2_12
+
+LBB2_3:
+ WORD $0x894d; BYTE $0xc1 // mov r9, r8
+ WORD $0xf749; BYTE $0xd1 // not r9
+ WORD $0xc1f6; BYTE $0x01 // test cl, 1
+ JE LBB2_5
+ LONG $0x06048a42 // mov al, byte [rsi + r8]
+ WORD $0xd0f6 // not al
+ LONG $0x07042242 // and al, byte [rdi + r8]
+ LONG $0x02048842 // mov byte [rdx + r8], al
+ LONG $0x01c88349 // or r8, 1
+
+LBB2_5:
+ WORD $0x0149; BYTE $0xc9 // add r9, rcx
+ JE LBB2_12
+
+LBB2_6:
+ LONG $0x04b60f42; BYTE $0x06 // movzx eax, byte [rsi + r8]
+ WORD $0xd0f6 // not al
+ LONG $0x07042242 // and al, byte [rdi + r8]
+ LONG $0x02048842 // mov byte [rdx + r8], al
+ LONG $0x44b60f42; WORD $0x0106 // movzx eax, byte [rsi + r8 + 1]
+ WORD $0xd0f6 // not al
+ LONG $0x07442242; BYTE $0x01 // and al, byte [rdi + r8 + 1]
+ LONG $0x02448842; BYTE $0x01 // mov byte [rdx + r8 + 1], al
+ LONG $0x02c08349 // add r8, 2
+ WORD $0x394c; BYTE $0xc1 // cmp rcx, r8
+ JNE LBB2_6
+
+LBB2_12:
+ VZEROUPPER
+ RET
+
+TEXT ·_bitmap_aligned_xor_avx2(SB), $0-32
+
+ MOVQ left+0(FP), DI
+ MOVQ right+8(FP), SI
+ MOVQ out+16(FP), DX
+ MOVQ length+24(FP), CX
+
+ WORD $0x8548; BYTE $0xc9 // test rcx, rcx
+ JLE LBB3_12
+ LONG $0x7ff98348 // cmp rcx, 127
+ JA LBB3_7
+ WORD $0x3145; BYTE $0xd2 // xor r10d, r10d
+ JMP LBB3_3
+
+LBB3_7:
+ LONG $0x0a0c8d4c // lea r9, [rdx + rcx]
+ LONG $0x0f048d48 // lea rax, [rdi + rcx]
+ WORD $0x3948; BYTE $0xd0 // cmp rax, rdx
+ LONG $0xd3970f41 // seta r11b
+ LONG $0x0e048d48 // lea rax, [rsi + rcx]
+ WORD $0x3949; BYTE $0xf9 // cmp r9, rdi
+ WORD $0x970f; BYTE $0xd3 // seta bl
+ WORD $0x3948; BYTE $0xd0 // cmp rax, rdx
+ LONG $0xd0970f41 // seta r8b
+ WORD $0x3949; BYTE $0xf1 // cmp r9, rsi
+ LONG $0xd1970f41 // seta r9b
+ WORD $0x3145; BYTE $0xd2 // xor r10d, r10d
+ WORD $0x8441; BYTE $0xdb // test r11b, bl
+ JNE LBB3_3
+ WORD $0x2045; BYTE $0xc8 // and r8b, r9b
+ JNE LBB3_3
+ WORD $0x8949; BYTE $0xca // mov r10, rcx
+ LONG $0x80e28349 // and r10, -128
+ WORD $0x3145; BYTE $0xc0 // xor r8d, r8d
+
+LBB3_10:
+ LONG $0x107ca1c4; WORD $0x0604 // vmovups ymm0, yword [rsi + r8]
+ LONG $0x107ca1c4; WORD $0x064c; BYTE $0x20 // vmovups ymm1, yword [rsi + r8 + 32]
+ LONG $0x107ca1c4; WORD $0x0654; BYTE $0x40 // vmovups ymm2, yword [rsi + r8 + 64]
+ LONG $0x107ca1c4; WORD $0x065c; BYTE $0x60 // vmovups ymm3, yword [rsi + r8 + 96]
+ LONG $0x577ca1c4; WORD $0x0704 // vxorps ymm0, ymm0, yword [rdi + r8]
+ LONG $0x5774a1c4; WORD $0x074c; BYTE $0x20 // vxorps ymm1, ymm1, yword [rdi + r8 + 32]
+ LONG $0x576ca1c4; WORD $0x0754; BYTE $0x40 // vxorps ymm2, ymm2, yword [rdi + r8 + 64]
+ LONG $0x5764a1c4; WORD $0x075c; BYTE $0x60 // vxorps ymm3, ymm3, yword [rdi + r8 + 96]
+ LONG $0x117ca1c4; WORD $0x0204 // vmovups yword [rdx + r8], ymm0
+ LONG $0x117ca1c4; WORD $0x024c; BYTE $0x20 // vmovups yword [rdx + r8 + 32], ymm1
+ LONG $0x117ca1c4; WORD $0x0254; BYTE $0x40 // vmovups yword [rdx + r8 + 64], ymm2
+ LONG $0x117ca1c4; WORD $0x025c; BYTE $0x60 // vmovups yword [rdx + r8 + 96], ymm3
+ LONG $0x80e88349 // sub r8, -128
+ WORD $0x394d; BYTE $0xc2 // cmp r10, r8
+ JNE LBB3_10
+ WORD $0x3949; BYTE $0xca // cmp r10, rcx
+ JE LBB3_12
+
+LBB3_3:
+ WORD $0x894d; BYTE $0xd0 // mov r8, r10
+ WORD $0xf749; BYTE $0xd0 // not r8
+ WORD $0x0149; BYTE $0xc8 // add r8, rcx
+ WORD $0x8949; BYTE $0xc9 // mov r9, rcx
+ LONG $0x03e18349 // and r9, 3
+ JE LBB3_5
+
+LBB3_4:
+ LONG $0x04b60f42; BYTE $0x16 // movzx eax, byte [rsi + r10]
+ LONG $0x17043242 // xor al, byte [rdi + r10]
+ LONG $0x12048842 // mov byte [rdx + r10], al
+ LONG $0x01c28349 // add r10, 1
+ LONG $0xffc18349 // add r9, -1
+ JNE LBB3_4
+
+LBB3_5:
+ LONG $0x03f88349 // cmp r8, 3
+ JB LBB3_12
+
+LBB3_6:
+ LONG $0x04b60f42; BYTE $0x16 // movzx eax, byte [rsi + r10]
+ LONG $0x17043242 // xor al, byte [rdi + r10]
+ LONG $0x12048842 // mov byte [rdx + r10], al
+ LONG $0x44b60f42; WORD $0x0116 // movzx eax, byte [rsi + r10 + 1]
+ LONG $0x17443242; BYTE $0x01 // xor al, byte [rdi + r10 + 1]
+ LONG $0x12448842; BYTE $0x01 // mov byte [rdx + r10 + 1], al
+ LONG $0x44b60f42; WORD $0x0216 // movzx eax, byte [rsi + r10 + 2]
+ LONG $0x17443242; BYTE $0x02 // xor al, byte [rdi + r10 + 2]
+ LONG $0x12448842; BYTE $0x02 // mov byte [rdx + r10 + 2], al
+ LONG $0x44b60f42; WORD $0x0316 // movzx eax, byte [rsi + r10 + 3]
+ LONG $0x17443242; BYTE $0x03 // xor al, byte [rdi + r10 + 3]
+ LONG $0x12448842; BYTE $0x03 // mov byte [rdx + r10 + 3], al
+ LONG $0x04c28349 // add r10, 4
+ WORD $0x394c; BYTE $0xd1 // cmp rcx, r10
+ JNE LBB3_6
+
+LBB3_12:
+ VZEROUPPER
+ RET
diff --git a/vendor/github.com/apache/arrow/go/v14/arrow/bitutil/bitmap_ops_noasm.go b/vendor/github.com/apache/arrow/go/v14/arrow/bitutil/bitmap_ops_noasm.go
new file mode 100644
index 000000000..e25347791
--- /dev/null
+++ b/vendor/github.com/apache/arrow/go/v14/arrow/bitutil/bitmap_ops_noasm.go
@@ -0,0 +1,27 @@
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements. See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership. The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License. You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+//go:build noasm
+// +build noasm
+
+package bitutil
+
+func init() {
+ bitAndOp.opAligned = alignedBitAndGo
+ bitOrOp.opAligned = alignedBitOrGo
+ bitAndNotOp.opAligned = alignedBitAndNotGo
+ bitXorOp.opAligned = alignedBitXorGo
+}
diff --git a/vendor/github.com/apache/arrow/go/v14/arrow/bitutil/bitmap_ops_ppc64le.go b/vendor/github.com/apache/arrow/go/v14/arrow/bitutil/bitmap_ops_ppc64le.go
new file mode 100644
index 000000000..28d95d84a
--- /dev/null
+++ b/vendor/github.com/apache/arrow/go/v14/arrow/bitutil/bitmap_ops_ppc64le.go
@@ -0,0 +1,27 @@
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements. See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership. The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License. You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+//go:build !noasm
+// +build !noasm
+
+package bitutil
+
+func init() {
+ bitAndOp.opAligned = alignedBitAndGo
+ bitOrOp.opAligned = alignedBitOrGo
+ bitAndNotOp.opAligned = alignedBitAndNotGo
+ bitXorOp.opAligned = alignedBitXorGo
+}
diff --git a/vendor/github.com/apache/arrow/go/v14/arrow/bitutil/bitmap_ops_s390x.go b/vendor/github.com/apache/arrow/go/v14/arrow/bitutil/bitmap_ops_s390x.go
new file mode 100644
index 000000000..28d95d84a
--- /dev/null
+++ b/vendor/github.com/apache/arrow/go/v14/arrow/bitutil/bitmap_ops_s390x.go
@@ -0,0 +1,27 @@
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements. See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership. The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License. You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+//go:build !noasm
+// +build !noasm
+
+package bitutil
+
+func init() {
+ bitAndOp.opAligned = alignedBitAndGo
+ bitOrOp.opAligned = alignedBitOrGo
+ bitAndNotOp.opAligned = alignedBitAndNotGo
+ bitXorOp.opAligned = alignedBitXorGo
+}
diff --git a/vendor/github.com/apache/arrow/go/v14/arrow/bitutil/bitmap_ops_sse4_amd64.go b/vendor/github.com/apache/arrow/go/v14/arrow/bitutil/bitmap_ops_sse4_amd64.go
new file mode 100644
index 000000000..f16bce12b
--- /dev/null
+++ b/vendor/github.com/apache/arrow/go/v14/arrow/bitutil/bitmap_ops_sse4_amd64.go
@@ -0,0 +1,52 @@
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements. See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership. The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License. You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+//go:build !noasm
+// +build !noasm
+
+package bitutil
+
+import (
+ "unsafe"
+)
+
+//go:noescape
+func _bitmap_aligned_and_sse4(left, right, out unsafe.Pointer, length int64)
+
+func bitmapAlignedAndSSE4(left, right, out []byte) {
+ _bitmap_aligned_and_sse4(unsafe.Pointer(&left[0]), unsafe.Pointer(&right[0]), unsafe.Pointer(&out[0]), int64(len(out)))
+}
+
+//go:noescape
+func _bitmap_aligned_or_sse4(left, right, out unsafe.Pointer, length int64)
+
+func bitmapAlignedOrSSE4(left, right, out []byte) {
+ _bitmap_aligned_or_sse4(unsafe.Pointer(&left[0]), unsafe.Pointer(&right[0]), unsafe.Pointer(&out[0]), int64(len(out)))
+}
+
+//go:noescape
+func _bitmap_aligned_and_not_sse4(left, right, out unsafe.Pointer, length int64)
+
+func bitmapAlignedAndNotSSE4(left, right, out []byte) {
+ _bitmap_aligned_and_not_sse4(unsafe.Pointer(&left[0]), unsafe.Pointer(&right[0]), unsafe.Pointer(&out[0]), int64(len(out)))
+}
+
+//go:noescape
+func _bitmap_aligned_xor_sse4(left, right, out unsafe.Pointer, length int64)
+
+func bitmapAlignedXorSSE4(left, right, out []byte) {
+ _bitmap_aligned_xor_sse4(unsafe.Pointer(&left[0]), unsafe.Pointer(&right[0]), unsafe.Pointer(&out[0]), int64(len(out)))
+}
diff --git a/vendor/github.com/apache/arrow/go/v14/arrow/bitutil/bitmap_ops_sse4_amd64.s b/vendor/github.com/apache/arrow/go/v14/arrow/bitutil/bitmap_ops_sse4_amd64.s
new file mode 100644
index 000000000..c15e18625
--- /dev/null
+++ b/vendor/github.com/apache/arrow/go/v14/arrow/bitutil/bitmap_ops_sse4_amd64.s
@@ -0,0 +1,501 @@
+//+build !noasm !appengine
+// AUTO-GENERATED BY C2GOASM -- DO NOT EDIT
+
+TEXT ·_bitmap_aligned_and_sse4(SB), $0-32
+
+ MOVQ left+0(FP), DI
+ MOVQ right+8(FP), SI
+ MOVQ out+16(FP), DX
+ MOVQ length+24(FP), CX
+
+ WORD $0x8548; BYTE $0xc9 // test rcx, rcx
+ JLE LBB0_16
+ LONG $0x1ff98348 // cmp rcx, 31
+ JA LBB0_7
+ WORD $0x3145; BYTE $0xdb // xor r11d, r11d
+
+LBB0_3:
+ WORD $0x894d; BYTE $0xd8 // mov r8, r11
+ WORD $0xf749; BYTE $0xd0 // not r8
+ WORD $0x0149; BYTE $0xc8 // add r8, rcx
+ WORD $0x8949; BYTE $0xc9 // mov r9, rcx
+ LONG $0x03e18349 // and r9, 3
+ JE LBB0_5
+
+LBB0_4:
+ LONG $0x04b60f42; BYTE $0x1e // movzx eax, byte [rsi + r11]
+ LONG $0x1f042242 // and al, byte [rdi + r11]
+ LONG $0x1a048842 // mov byte [rdx + r11], al
+ LONG $0x01c38349 // add r11, 1
+ LONG $0xffc18349 // add r9, -1
+ JNE LBB0_4
+
+LBB0_5:
+ LONG $0x03f88349 // cmp r8, 3
+ JB LBB0_16
+
+LBB0_6:
+ LONG $0x04b60f42; BYTE $0x1e // movzx eax, byte [rsi + r11]
+ LONG $0x1f042242 // and al, byte [rdi + r11]
+ LONG $0x1a048842 // mov byte [rdx + r11], al
+ LONG $0x44b60f42; WORD $0x011e // movzx eax, byte [rsi + r11 + 1]
+ LONG $0x1f442242; BYTE $0x01 // and al, byte [rdi + r11 + 1]
+ LONG $0x1a448842; BYTE $0x01 // mov byte [rdx + r11 + 1], al
+ LONG $0x44b60f42; WORD $0x021e // movzx eax, byte [rsi + r11 + 2]
+ LONG $0x1f442242; BYTE $0x02 // and al, byte [rdi + r11 + 2]
+ LONG $0x1a448842; BYTE $0x02 // mov byte [rdx + r11 + 2], al
+ LONG $0x44b60f42; WORD $0x031e // movzx eax, byte [rsi + r11 + 3]
+ LONG $0x1f442242; BYTE $0x03 // and al, byte [rdi + r11 + 3]
+ LONG $0x1a448842; BYTE $0x03 // mov byte [rdx + r11 + 3], al
+ LONG $0x04c38349 // add r11, 4
+ WORD $0x394c; BYTE $0xd9 // cmp rcx, r11
+ JNE LBB0_6
+ JMP LBB0_16
+
+LBB0_7:
+ LONG $0x0a0c8d4c // lea r9, [rdx + rcx]
+ LONG $0x0f048d48 // lea rax, [rdi + rcx]
+ WORD $0x3948; BYTE $0xd0 // cmp rax, rdx
+ LONG $0xd2970f41 // seta r10b
+ LONG $0x0e048d48 // lea rax, [rsi + rcx]
+ WORD $0x3949; BYTE $0xf9 // cmp r9, rdi
+ WORD $0x970f; BYTE $0xd3 // seta bl
+ WORD $0x3948; BYTE $0xd0 // cmp rax, rdx
+ LONG $0xd0970f41 // seta r8b
+ WORD $0x3949; BYTE $0xf1 // cmp r9, rsi
+ LONG $0xd1970f41 // seta r9b
+ WORD $0x3145; BYTE $0xdb // xor r11d, r11d
+ WORD $0x8441; BYTE $0xda // test r10b, bl
+ JNE LBB0_3
+ WORD $0x2045; BYTE $0xc8 // and r8b, r9b
+ JNE LBB0_3
+ WORD $0x8949; BYTE $0xcb // mov r11, rcx
+ LONG $0xe0e38349 // and r11, -32
+ LONG $0xe0438d49 // lea rax, [r11 - 32]
+ WORD $0x8949; BYTE $0xc1 // mov r9, rax
+ LONG $0x05e9c149 // shr r9, 5
+ LONG $0x01c18349 // add r9, 1
+ WORD $0x8548; BYTE $0xc0 // test rax, rax
+ JE LBB0_10
+ WORD $0x894d; BYTE $0xca // mov r10, r9
+ LONG $0xfee28349 // and r10, -2
+ WORD $0xf749; BYTE $0xda // neg r10
+ WORD $0x3145; BYTE $0xc0 // xor r8d, r8d
+
+LBB0_12:
+ LONG $0x04100f42; BYTE $0x07 // movups xmm0, oword [rdi + r8]
+ LONG $0x4c100f42; WORD $0x1007 // movups xmm1, oword [rdi + r8 + 16]
+ LONG $0x14100f42; BYTE $0x06 // movups xmm2, oword [rsi + r8]
+ WORD $0x540f; BYTE $0xd0 // andps xmm2, xmm0
+ LONG $0x44100f42; WORD $0x1006 // movups xmm0, oword [rsi + r8 + 16]
+ WORD $0x540f; BYTE $0xc1 // andps xmm0, xmm1
+ LONG $0x14110f42; BYTE $0x02 // movups oword [rdx + r8], xmm2
+ LONG $0x44110f42; WORD $0x1002 // movups oword [rdx + r8 + 16], xmm0
+ LONG $0x44100f42; WORD $0x2007 // movups xmm0, oword [rdi + r8 + 32]
+ LONG $0x4c100f42; WORD $0x3007 // movups xmm1, oword [rdi + r8 + 48]
+ LONG $0x54100f42; WORD $0x2006 // movups xmm2, oword [rsi + r8 + 32]
+ WORD $0x540f; BYTE $0xd0 // andps xmm2, xmm0
+ LONG $0x44100f42; WORD $0x3006 // movups xmm0, oword [rsi + r8 + 48]
+ WORD $0x540f; BYTE $0xc1 // andps xmm0, xmm1
+ LONG $0x54110f42; WORD $0x2002 // movups oword [rdx + r8 + 32], xmm2
+ LONG $0x44110f42; WORD $0x3002 // movups oword [rdx + r8 + 48], xmm0
+ LONG $0x40c08349 // add r8, 64
+ LONG $0x02c28349 // add r10, 2
+ JNE LBB0_12
+ LONG $0x01c1f641 // test r9b, 1
+ JE LBB0_15
+
+LBB0_14:
+ LONG $0x04100f42; BYTE $0x07 // movups xmm0, oword [rdi + r8]
+ LONG $0x4c100f42; WORD $0x1007 // movups xmm1, oword [rdi + r8 + 16]
+ LONG $0x14100f42; BYTE $0x06 // movups xmm2, oword [rsi + r8]
+ WORD $0x540f; BYTE $0xd0 // andps xmm2, xmm0
+ LONG $0x44100f42; WORD $0x1006 // movups xmm0, oword [rsi + r8 + 16]
+ WORD $0x540f; BYTE $0xc1 // andps xmm0, xmm1
+ LONG $0x14110f42; BYTE $0x02 // movups oword [rdx + r8], xmm2
+ LONG $0x44110f42; WORD $0x1002 // movups oword [rdx + r8 + 16], xmm0
+
+LBB0_15:
+ WORD $0x3949; BYTE $0xcb // cmp r11, rcx
+ JNE LBB0_3
+
+LBB0_16:
+ RET
+
+LBB0_10:
+ WORD $0x3145; BYTE $0xc0 // xor r8d, r8d
+ LONG $0x01c1f641 // test r9b, 1
+ JNE LBB0_14
+ JMP LBB0_15
+
+TEXT ·_bitmap_aligned_or_sse4(SB), $0-32
+
+ MOVQ left+0(FP), DI
+ MOVQ right+8(FP), SI
+ MOVQ out+16(FP), DX
+ MOVQ length+24(FP), CX
+
+ WORD $0x8548; BYTE $0xc9 // test rcx, rcx
+ JLE LBB1_16
+ LONG $0x1ff98348 // cmp rcx, 31
+ JA LBB1_7
+ WORD $0x3145; BYTE $0xdb // xor r11d, r11d
+
+LBB1_3:
+ WORD $0x894d; BYTE $0xd8 // mov r8, r11
+ WORD $0xf749; BYTE $0xd0 // not r8
+ WORD $0x0149; BYTE $0xc8 // add r8, rcx
+ WORD $0x8949; BYTE $0xc9 // mov r9, rcx
+ LONG $0x03e18349 // and r9, 3
+ JE LBB1_5
+
+LBB1_4:
+ LONG $0x04b60f42; BYTE $0x1e // movzx eax, byte [rsi + r11]
+ LONG $0x1f040a42 // or al, byte [rdi + r11]
+ LONG $0x1a048842 // mov byte [rdx + r11], al
+ LONG $0x01c38349 // add r11, 1
+ LONG $0xffc18349 // add r9, -1
+ JNE LBB1_4
+
+LBB1_5:
+ LONG $0x03f88349 // cmp r8, 3
+ JB LBB1_16
+
+LBB1_6:
+ LONG $0x04b60f42; BYTE $0x1e // movzx eax, byte [rsi + r11]
+ LONG $0x1f040a42 // or al, byte [rdi + r11]
+ LONG $0x1a048842 // mov byte [rdx + r11], al
+ LONG $0x44b60f42; WORD $0x011e // movzx eax, byte [rsi + r11 + 1]
+ LONG $0x1f440a42; BYTE $0x01 // or al, byte [rdi + r11 + 1]
+ LONG $0x1a448842; BYTE $0x01 // mov byte [rdx + r11 + 1], al
+ LONG $0x44b60f42; WORD $0x021e // movzx eax, byte [rsi + r11 + 2]
+ LONG $0x1f440a42; BYTE $0x02 // or al, byte [rdi + r11 + 2]
+ LONG $0x1a448842; BYTE $0x02 // mov byte [rdx + r11 + 2], al
+ LONG $0x44b60f42; WORD $0x031e // movzx eax, byte [rsi + r11 + 3]
+ LONG $0x1f440a42; BYTE $0x03 // or al, byte [rdi + r11 + 3]
+ LONG $0x1a448842; BYTE $0x03 // mov byte [rdx + r11 + 3], al
+ LONG $0x04c38349 // add r11, 4
+ WORD $0x394c; BYTE $0xd9 // cmp rcx, r11
+ JNE LBB1_6
+ JMP LBB1_16
+
+LBB1_7:
+ LONG $0x0a0c8d4c // lea r9, [rdx + rcx]
+ LONG $0x0f048d48 // lea rax, [rdi + rcx]
+ WORD $0x3948; BYTE $0xd0 // cmp rax, rdx
+ LONG $0xd2970f41 // seta r10b
+ LONG $0x0e048d48 // lea rax, [rsi + rcx]
+ WORD $0x3949; BYTE $0xf9 // cmp r9, rdi
+ WORD $0x970f; BYTE $0xd3 // seta bl
+ WORD $0x3948; BYTE $0xd0 // cmp rax, rdx
+ LONG $0xd0970f41 // seta r8b
+ WORD $0x3949; BYTE $0xf1 // cmp r9, rsi
+ LONG $0xd1970f41 // seta r9b
+ WORD $0x3145; BYTE $0xdb // xor r11d, r11d
+ WORD $0x8441; BYTE $0xda // test r10b, bl
+ JNE LBB1_3
+ WORD $0x2045; BYTE $0xc8 // and r8b, r9b
+ JNE LBB1_3
+ WORD $0x8949; BYTE $0xcb // mov r11, rcx
+ LONG $0xe0e38349 // and r11, -32
+ LONG $0xe0438d49 // lea rax, [r11 - 32]
+ WORD $0x8949; BYTE $0xc1 // mov r9, rax
+ LONG $0x05e9c149 // shr r9, 5
+ LONG $0x01c18349 // add r9, 1
+ WORD $0x8548; BYTE $0xc0 // test rax, rax
+ JE LBB1_10
+ WORD $0x894d; BYTE $0xca // mov r10, r9
+ LONG $0xfee28349 // and r10, -2
+ WORD $0xf749; BYTE $0xda // neg r10
+ WORD $0x3145; BYTE $0xc0 // xor r8d, r8d
+
+LBB1_12:
+ LONG $0x04100f42; BYTE $0x07 // movups xmm0, oword [rdi + r8]
+ LONG $0x4c100f42; WORD $0x1007 // movups xmm1, oword [rdi + r8 + 16]
+ LONG $0x14100f42; BYTE $0x06 // movups xmm2, oword [rsi + r8]
+ WORD $0x560f; BYTE $0xd0 // orps xmm2, xmm0
+ LONG $0x44100f42; WORD $0x1006 // movups xmm0, oword [rsi + r8 + 16]
+ WORD $0x560f; BYTE $0xc1 // orps xmm0, xmm1
+ LONG $0x14110f42; BYTE $0x02 // movups oword [rdx + r8], xmm2
+ LONG $0x44110f42; WORD $0x1002 // movups oword [rdx + r8 + 16], xmm0
+ LONG $0x44100f42; WORD $0x2007 // movups xmm0, oword [rdi + r8 + 32]
+ LONG $0x4c100f42; WORD $0x3007 // movups xmm1, oword [rdi + r8 + 48]
+ LONG $0x54100f42; WORD $0x2006 // movups xmm2, oword [rsi + r8 + 32]
+ WORD $0x560f; BYTE $0xd0 // orps xmm2, xmm0
+ LONG $0x44100f42; WORD $0x3006 // movups xmm0, oword [rsi + r8 + 48]
+ WORD $0x560f; BYTE $0xc1 // orps xmm0, xmm1
+ LONG $0x54110f42; WORD $0x2002 // movups oword [rdx + r8 + 32], xmm2
+ LONG $0x44110f42; WORD $0x3002 // movups oword [rdx + r8 + 48], xmm0
+ LONG $0x40c08349 // add r8, 64
+ LONG $0x02c28349 // add r10, 2
+ JNE LBB1_12
+ LONG $0x01c1f641 // test r9b, 1
+ JE LBB1_15
+
+LBB1_14:
+ LONG $0x04100f42; BYTE $0x07 // movups xmm0, oword [rdi + r8]
+ LONG $0x4c100f42; WORD $0x1007 // movups xmm1, oword [rdi + r8 + 16]
+ LONG $0x14100f42; BYTE $0x06 // movups xmm2, oword [rsi + r8]
+ WORD $0x560f; BYTE $0xd0 // orps xmm2, xmm0
+ LONG $0x44100f42; WORD $0x1006 // movups xmm0, oword [rsi + r8 + 16]
+ WORD $0x560f; BYTE $0xc1 // orps xmm0, xmm1
+ LONG $0x14110f42; BYTE $0x02 // movups oword [rdx + r8], xmm2
+ LONG $0x44110f42; WORD $0x1002 // movups oword [rdx + r8 + 16], xmm0
+
+LBB1_15:
+ WORD $0x3949; BYTE $0xcb // cmp r11, rcx
+ JNE LBB1_3
+
+LBB1_16:
+ RET
+
+LBB1_10:
+ WORD $0x3145; BYTE $0xc0 // xor r8d, r8d
+ LONG $0x01c1f641 // test r9b, 1
+ JNE LBB1_14
+ JMP LBB1_15
+
+TEXT ·_bitmap_aligned_and_not_sse4(SB), $0-32
+
+ MOVQ left+0(FP), DI
+ MOVQ right+8(FP), SI
+ MOVQ out+16(FP), DX
+ MOVQ length+24(FP), CX
+
+ WORD $0x8548; BYTE $0xc9 // test rcx, rcx
+ JLE LBB2_16
+ LONG $0x1ff98348 // cmp rcx, 31
+ JA LBB2_7
+ WORD $0x3145; BYTE $0xdb // xor r11d, r11d
+
+LBB2_3:
+ WORD $0x894d; BYTE $0xd8 // mov r8, r11
+ WORD $0xf749; BYTE $0xd0 // not r8
+ WORD $0xc1f6; BYTE $0x01 // test cl, 1
+ JE LBB2_5
+ LONG $0x1e048a42 // mov al, byte [rsi + r11]
+ WORD $0xd0f6 // not al
+ LONG $0x1f042242 // and al, byte [rdi + r11]
+ LONG $0x1a048842 // mov byte [rdx + r11], al
+ LONG $0x01cb8349 // or r11, 1
+
+LBB2_5:
+ WORD $0x0149; BYTE $0xc8 // add r8, rcx
+ JE LBB2_16
+
+LBB2_6:
+ LONG $0x04b60f42; BYTE $0x1e // movzx eax, byte [rsi + r11]
+ WORD $0xd0f6 // not al
+ LONG $0x1f042242 // and al, byte [rdi + r11]
+ LONG $0x1a048842 // mov byte [rdx + r11], al
+ LONG $0x44b60f42; WORD $0x011e // movzx eax, byte [rsi + r11 + 1]
+ WORD $0xd0f6 // not al
+ LONG $0x1f442242; BYTE $0x01 // and al, byte [rdi + r11 + 1]
+ LONG $0x1a448842; BYTE $0x01 // mov byte [rdx + r11 + 1], al
+ LONG $0x02c38349 // add r11, 2
+ WORD $0x394c; BYTE $0xd9 // cmp rcx, r11
+ JNE LBB2_6
+ JMP LBB2_16
+
+LBB2_7:
+ LONG $0x0a0c8d4c // lea r9, [rdx + rcx]
+ LONG $0x0f048d48 // lea rax, [rdi + rcx]
+ WORD $0x3948; BYTE $0xd0 // cmp rax, rdx
+ LONG $0xd2970f41 // seta r10b
+ LONG $0x0e048d48 // lea rax, [rsi + rcx]
+ WORD $0x3949; BYTE $0xf9 // cmp r9, rdi
+ WORD $0x970f; BYTE $0xd3 // seta bl
+ WORD $0x3948; BYTE $0xd0 // cmp rax, rdx
+ LONG $0xd0970f41 // seta r8b
+ WORD $0x3949; BYTE $0xf1 // cmp r9, rsi
+ LONG $0xd1970f41 // seta r9b
+ WORD $0x3145; BYTE $0xdb // xor r11d, r11d
+ WORD $0x8441; BYTE $0xda // test r10b, bl
+ JNE LBB2_3
+ WORD $0x2045; BYTE $0xc8 // and r8b, r9b
+ JNE LBB2_3
+ WORD $0x8949; BYTE $0xcb // mov r11, rcx
+ LONG $0xe0e38349 // and r11, -32
+ LONG $0xe0438d49 // lea rax, [r11 - 32]
+ WORD $0x8949; BYTE $0xc1 // mov r9, rax
+ LONG $0x05e9c149 // shr r9, 5
+ LONG $0x01c18349 // add r9, 1
+ WORD $0x8548; BYTE $0xc0 // test rax, rax
+ JE LBB2_10
+ WORD $0x894d; BYTE $0xca // mov r10, r9
+ LONG $0xfee28349 // and r10, -2
+ WORD $0xf749; BYTE $0xda // neg r10
+ WORD $0x3145; BYTE $0xc0 // xor r8d, r8d
+
+LBB2_12:
+ LONG $0x04100f42; BYTE $0x07 // movups xmm0, oword [rdi + r8]
+ LONG $0x4c100f42; WORD $0x1007 // movups xmm1, oword [rdi + r8 + 16]
+ LONG $0x14100f42; BYTE $0x06 // movups xmm2, oword [rsi + r8]
+ WORD $0x550f; BYTE $0xd0 // andnps xmm2, xmm0
+ LONG $0x44100f42; WORD $0x1006 // movups xmm0, oword [rsi + r8 + 16]
+ WORD $0x550f; BYTE $0xc1 // andnps xmm0, xmm1
+ LONG $0x14110f42; BYTE $0x02 // movups oword [rdx + r8], xmm2
+ LONG $0x44110f42; WORD $0x1002 // movups oword [rdx + r8 + 16], xmm0
+ LONG $0x44100f42; WORD $0x2007 // movups xmm0, oword [rdi + r8 + 32]
+ LONG $0x4c100f42; WORD $0x3007 // movups xmm1, oword [rdi + r8 + 48]
+ LONG $0x54100f42; WORD $0x2006 // movups xmm2, oword [rsi + r8 + 32]
+ WORD $0x550f; BYTE $0xd0 // andnps xmm2, xmm0
+ LONG $0x44100f42; WORD $0x3006 // movups xmm0, oword [rsi + r8 + 48]
+ WORD $0x550f; BYTE $0xc1 // andnps xmm0, xmm1
+ LONG $0x54110f42; WORD $0x2002 // movups oword [rdx + r8 + 32], xmm2
+ LONG $0x44110f42; WORD $0x3002 // movups oword [rdx + r8 + 48], xmm0
+ LONG $0x40c08349 // add r8, 64
+ LONG $0x02c28349 // add r10, 2
+ JNE LBB2_12
+ LONG $0x01c1f641 // test r9b, 1
+ JE LBB2_15
+
+LBB2_14:
+ LONG $0x04100f42; BYTE $0x07 // movups xmm0, oword [rdi + r8]
+ LONG $0x4c100f42; WORD $0x1007 // movups xmm1, oword [rdi + r8 + 16]
+ LONG $0x14100f42; BYTE $0x06 // movups xmm2, oword [rsi + r8]
+ WORD $0x550f; BYTE $0xd0 // andnps xmm2, xmm0
+ LONG $0x44100f42; WORD $0x1006 // movups xmm0, oword [rsi + r8 + 16]
+ WORD $0x550f; BYTE $0xc1 // andnps xmm0, xmm1
+ LONG $0x14110f42; BYTE $0x02 // movups oword [rdx + r8], xmm2
+ LONG $0x44110f42; WORD $0x1002 // movups oword [rdx + r8 + 16], xmm0
+
+LBB2_15:
+ WORD $0x3949; BYTE $0xcb // cmp r11, rcx
+ JNE LBB2_3
+
+LBB2_16:
+ RET
+
+LBB2_10:
+ WORD $0x3145; BYTE $0xc0 // xor r8d, r8d
+ LONG $0x01c1f641 // test r9b, 1
+ JNE LBB2_14
+ JMP LBB2_15
+
+TEXT ·_bitmap_aligned_xor_sse4(SB), $0-32
+
+ MOVQ left+0(FP), DI
+ MOVQ right+8(FP), SI
+ MOVQ out+16(FP), DX
+ MOVQ length+24(FP), CX
+
+ WORD $0x8548; BYTE $0xc9 // test rcx, rcx
+ JLE LBB3_16
+ LONG $0x1ff98348 // cmp rcx, 31
+ JA LBB3_7
+ WORD $0x3145; BYTE $0xdb // xor r11d, r11d
+
+LBB3_3:
+ WORD $0x894d; BYTE $0xd8 // mov r8, r11
+ WORD $0xf749; BYTE $0xd0 // not r8
+ WORD $0x0149; BYTE $0xc8 // add r8, rcx
+ WORD $0x8949; BYTE $0xc9 // mov r9, rcx
+ LONG $0x03e18349 // and r9, 3
+ JE LBB3_5
+
+LBB3_4:
+ LONG $0x04b60f42; BYTE $0x1e // movzx eax, byte [rsi + r11]
+ LONG $0x1f043242 // xor al, byte [rdi + r11]
+ LONG $0x1a048842 // mov byte [rdx + r11], al
+ LONG $0x01c38349 // add r11, 1
+ LONG $0xffc18349 // add r9, -1
+ JNE LBB3_4
+
+LBB3_5:
+ LONG $0x03f88349 // cmp r8, 3
+ JB LBB3_16
+
+LBB3_6:
+ LONG $0x04b60f42; BYTE $0x1e // movzx eax, byte [rsi + r11]
+ LONG $0x1f043242 // xor al, byte [rdi + r11]
+ LONG $0x1a048842 // mov byte [rdx + r11], al
+ LONG $0x44b60f42; WORD $0x011e // movzx eax, byte [rsi + r11 + 1]
+ LONG $0x1f443242; BYTE $0x01 // xor al, byte [rdi + r11 + 1]
+ LONG $0x1a448842; BYTE $0x01 // mov byte [rdx + r11 + 1], al
+ LONG $0x44b60f42; WORD $0x021e // movzx eax, byte [rsi + r11 + 2]
+ LONG $0x1f443242; BYTE $0x02 // xor al, byte [rdi + r11 + 2]
+ LONG $0x1a448842; BYTE $0x02 // mov byte [rdx + r11 + 2], al
+ LONG $0x44b60f42; WORD $0x031e // movzx eax, byte [rsi + r11 + 3]
+ LONG $0x1f443242; BYTE $0x03 // xor al, byte [rdi + r11 + 3]
+ LONG $0x1a448842; BYTE $0x03 // mov byte [rdx + r11 + 3], al
+ LONG $0x04c38349 // add r11, 4
+ WORD $0x394c; BYTE $0xd9 // cmp rcx, r11
+ JNE LBB3_6
+ JMP LBB3_16
+
+LBB3_7:
+ LONG $0x0a0c8d4c // lea r9, [rdx + rcx]
+ LONG $0x0f048d48 // lea rax, [rdi + rcx]
+ WORD $0x3948; BYTE $0xd0 // cmp rax, rdx
+ LONG $0xd2970f41 // seta r10b
+ LONG $0x0e048d48 // lea rax, [rsi + rcx]
+ WORD $0x3949; BYTE $0xf9 // cmp r9, rdi
+ WORD $0x970f; BYTE $0xd3 // seta bl
+ WORD $0x3948; BYTE $0xd0 // cmp rax, rdx
+ LONG $0xd0970f41 // seta r8b
+ WORD $0x3949; BYTE $0xf1 // cmp r9, rsi
+ LONG $0xd1970f41 // seta r9b
+ WORD $0x3145; BYTE $0xdb // xor r11d, r11d
+ WORD $0x8441; BYTE $0xda // test r10b, bl
+ JNE LBB3_3
+ WORD $0x2045; BYTE $0xc8 // and r8b, r9b
+ JNE LBB3_3
+ WORD $0x8949; BYTE $0xcb // mov r11, rcx
+ LONG $0xe0e38349 // and r11, -32
+ LONG $0xe0438d49 // lea rax, [r11 - 32]
+ WORD $0x8949; BYTE $0xc1 // mov r9, rax
+ LONG $0x05e9c149 // shr r9, 5
+ LONG $0x01c18349 // add r9, 1
+ WORD $0x8548; BYTE $0xc0 // test rax, rax
+ JE LBB3_10
+ WORD $0x894d; BYTE $0xca // mov r10, r9
+ LONG $0xfee28349 // and r10, -2
+ WORD $0xf749; BYTE $0xda // neg r10
+ WORD $0x3145; BYTE $0xc0 // xor r8d, r8d
+
+LBB3_12:
+ LONG $0x04100f42; BYTE $0x07 // movups xmm0, oword [rdi + r8]
+ LONG $0x4c100f42; WORD $0x1007 // movups xmm1, oword [rdi + r8 + 16]
+ LONG $0x14100f42; BYTE $0x06 // movups xmm2, oword [rsi + r8]
+ WORD $0x570f; BYTE $0xd0 // xorps xmm2, xmm0
+ LONG $0x44100f42; WORD $0x1006 // movups xmm0, oword [rsi + r8 + 16]
+ WORD $0x570f; BYTE $0xc1 // xorps xmm0, xmm1
+ LONG $0x14110f42; BYTE $0x02 // movups oword [rdx + r8], xmm2
+ LONG $0x44110f42; WORD $0x1002 // movups oword [rdx + r8 + 16], xmm0
+ LONG $0x44100f42; WORD $0x2007 // movups xmm0, oword [rdi + r8 + 32]
+ LONG $0x4c100f42; WORD $0x3007 // movups xmm1, oword [rdi + r8 + 48]
+ LONG $0x54100f42; WORD $0x2006 // movups xmm2, oword [rsi + r8 + 32]
+ WORD $0x570f; BYTE $0xd0 // xorps xmm2, xmm0
+ LONG $0x44100f42; WORD $0x3006 // movups xmm0, oword [rsi + r8 + 48]
+ WORD $0x570f; BYTE $0xc1 // xorps xmm0, xmm1
+ LONG $0x54110f42; WORD $0x2002 // movups oword [rdx + r8 + 32], xmm2
+ LONG $0x44110f42; WORD $0x3002 // movups oword [rdx + r8 + 48], xmm0
+ LONG $0x40c08349 // add r8, 64
+ LONG $0x02c28349 // add r10, 2
+ JNE LBB3_12
+ LONG $0x01c1f641 // test r9b, 1
+ JE LBB3_15
+
+LBB3_14:
+ LONG $0x04100f42; BYTE $0x07 // movups xmm0, oword [rdi + r8]
+ LONG $0x4c100f42; WORD $0x1007 // movups xmm1, oword [rdi + r8 + 16]
+ LONG $0x14100f42; BYTE $0x06 // movups xmm2, oword [rsi + r8]
+ WORD $0x570f; BYTE $0xd0 // xorps xmm2, xmm0
+ LONG $0x44100f42; WORD $0x1006 // movups xmm0, oword [rsi + r8 + 16]
+ WORD $0x570f; BYTE $0xc1 // xorps xmm0, xmm1
+ LONG $0x14110f42; BYTE $0x02 // movups oword [rdx + r8], xmm2
+ LONG $0x44110f42; WORD $0x1002 // movups oword [rdx + r8 + 16], xmm0
+
+LBB3_15:
+ WORD $0x3949; BYTE $0xcb // cmp r11, rcx
+ JNE LBB3_3
+
+LBB3_16:
+ RET
+
+LBB3_10:
+ WORD $0x3145; BYTE $0xc0 // xor r8d, r8d
+ LONG $0x01c1f641 // test r9b, 1
+ JNE LBB3_14
+ JMP LBB3_15
diff --git a/vendor/github.com/apache/arrow/go/v14/arrow/bitutil/bitmaps.go b/vendor/github.com/apache/arrow/go/v14/arrow/bitutil/bitmaps.go
new file mode 100644
index 000000000..2e9c0601c
--- /dev/null
+++ b/vendor/github.com/apache/arrow/go/v14/arrow/bitutil/bitmaps.go
@@ -0,0 +1,747 @@
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements. See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership. The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License. You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package bitutil
+
+import (
+ "bytes"
+ "errors"
+ "math/bits"
+ "unsafe"
+
+ "github.com/apache/arrow/go/v14/arrow/endian"
+ "github.com/apache/arrow/go/v14/arrow/internal/debug"
+ "github.com/apache/arrow/go/v14/arrow/memory"
+)
+
+// BitmapReader is a simple bitmap reader for a byte slice.
+type BitmapReader struct {
+ bitmap []byte
+ pos int
+ len int
+
+ current byte
+ byteOffset int
+ bitOffset int
+}
+
+// NewBitmapReader creates and returns a new bitmap reader for the given bitmap
+func NewBitmapReader(bitmap []byte, offset, length int) *BitmapReader {
+ curbyte := byte(0)
+ if length > 0 && bitmap != nil {
+ curbyte = bitmap[offset/8]
+ }
+ return &BitmapReader{
+ bitmap: bitmap,
+ byteOffset: offset / 8,
+ bitOffset: offset % 8,
+ current: curbyte,
+ len: length,
+ }
+}
+
+// Set returns true if the current bit is set
+func (b *BitmapReader) Set() bool {
+ return (b.current & (1 << b.bitOffset)) != 0
+}
+
+// NotSet returns true if the current bit is not set
+func (b *BitmapReader) NotSet() bool {
+ return (b.current & (1 << b.bitOffset)) == 0
+}
+
+// Next advances the reader to the next bit in the bitmap.
+func (b *BitmapReader) Next() {
+ b.bitOffset++
+ b.pos++
+ if b.bitOffset == 8 {
+ b.bitOffset = 0
+ b.byteOffset++
+ if b.pos < b.len {
+ b.current = b.bitmap[int(b.byteOffset)]
+ }
+ }
+}
+
+// Pos returns the current bit position in the bitmap that the reader is looking at
+func (b *BitmapReader) Pos() int { return b.pos }
+
+// Len returns the total number of bits in the bitmap
+func (b *BitmapReader) Len() int { return b.len }
+
+// BitmapWriter is a simple writer for writing bitmaps to byte slices
+type BitmapWriter struct {
+ buf []byte
+ pos int
+ length int
+
+ curByte uint8
+ bitMask uint8
+ byteOffset int
+}
+
+// NewBitmapWriter returns a sequential bitwise writer that preserves surrounding
+// bit values as it writes.
+func NewBitmapWriter(bitmap []byte, start, length int) *BitmapWriter {
+ ret := &BitmapWriter{
+ buf: bitmap,
+ length: length,
+ byteOffset: start / 8,
+ bitMask: BitMask[start%8],
+ }
+ if length > 0 {
+ ret.curByte = bitmap[int(ret.byteOffset)]
+ }
+ return ret
+}
+
+// Reset resets the position and view of the slice to restart writing a bitmap
+// to the same byte slice.
+func (b *BitmapWriter) Reset(start, length int) {
+ b.pos = 0
+ b.byteOffset = start / 8
+ b.bitMask = BitMask[start%8]
+ b.length = length
+ if b.length > 0 {
+ b.curByte = b.buf[int(b.byteOffset)]
+ }
+}
+
+func (b *BitmapWriter) Pos() int { return b.pos }
+func (b *BitmapWriter) Set() { b.curByte |= b.bitMask }
+func (b *BitmapWriter) Clear() { b.curByte &= ^b.bitMask }
+
+// Next increments the writer to the next bit for writing.
+func (b *BitmapWriter) Next() {
+ b.bitMask = b.bitMask << 1
+ b.pos++
+ if b.bitMask == 0 {
+ b.bitMask = 0x01
+ b.buf[b.byteOffset] = b.curByte
+ b.byteOffset++
+ if b.pos < b.length {
+ b.curByte = b.buf[int(b.byteOffset)]
+ }
+ }
+}
+
+// AppendBools writes a series of booleans to the bitmapwriter and returns
+// the number of remaining bytes left in the buffer for writing.
+func (b *BitmapWriter) AppendBools(in []bool) int {
+ space := min(b.length-b.pos, len(in))
+ if space == 0 {
+ return 0
+ }
+
+ bitOffset := bits.TrailingZeros32(uint32(b.bitMask))
+ // location that the first byte needs to be written to for appending
+ appslice := b.buf[int(b.byteOffset) : b.byteOffset+int(BytesForBits(int64(bitOffset+space)))]
+ // update everything but curByte
+ appslice[0] = b.curByte
+ for i, b := range in[:space] {
+ if b {
+ SetBit(appslice, i+bitOffset)
+ } else {
+ ClearBit(appslice, i+bitOffset)
+ }
+ }
+
+ b.pos += space
+ b.bitMask = BitMask[(bitOffset+space)%8]
+ b.byteOffset += (bitOffset + space) / 8
+ b.curByte = appslice[len(appslice)-1]
+
+ return space
+}
+
+// Finish flushes the final byte out to the byteslice in case it was not already
+// on a byte aligned boundary.
+func (b *BitmapWriter) Finish() {
+ if b.length > 0 && (b.bitMask != 0x01 || b.pos < b.length) {
+ b.buf[int(b.byteOffset)] = b.curByte
+ }
+}
+
+// BitmapWordReader is a reader for bitmaps that reads a word at a time (a word being an 8 byte uint64)
+// and then provides functions to grab the individual trailing bytes after the last word
+type BitmapWordReader struct {
+ bitmap []byte
+ offset int
+ nwords int
+ trailingBits int
+ trailingBytes int
+ curword uint64
+}
+
+// NewBitmapWordReader sets up a word reader, calculates the number of trailing bits and
+// number of trailing bytes, along with the number of words.
+func NewBitmapWordReader(bitmap []byte, offset, length int) *BitmapWordReader {
+ bitoffset := offset % 8
+ byteOffset := offset / 8
+ bm := &BitmapWordReader{
+ offset: bitoffset,
+ bitmap: bitmap[byteOffset : byteOffset+int(BytesForBits(int64(bitoffset+length)))],
+ // decrement wordcount by 1 as we may touch two adjacent words in one iteration
+ nwords: length/int(unsafe.Sizeof(uint64(0))*8) - 1,
+ }
+ if bm.nwords < 0 {
+ bm.nwords = 0
+ }
+ bm.trailingBits = length - bm.nwords*int(unsafe.Sizeof(uint64(0)))*8
+ bm.trailingBytes = int(BytesForBits(int64(bm.trailingBits)))
+
+ if bm.nwords > 0 {
+ bm.curword = toFromLEFunc(endian.Native.Uint64(bm.bitmap))
+ } else if length > 0 {
+ setLSB(&bm.curword, bm.bitmap[0])
+ }
+ return bm
+}
+
+// NextWord returns the next full word read from the bitmap, should not be called
+// if Words() is 0 as it will step outside of the bounds of the bitmap slice and panic.
+//
+// We don't perform the bounds checking in order to improve performance.
+func (bm *BitmapWordReader) NextWord() uint64 {
+ bm.bitmap = bm.bitmap[unsafe.Sizeof(bm.curword):]
+ word := bm.curword
+ nextWord := toFromLEFunc(endian.Native.Uint64(bm.bitmap))
+ if bm.offset != 0 {
+ // combine two adjacent words into one word
+ // |<------ next ----->|<---- current ---->|
+ // +-------------+-----+-------------+-----+
+ // | --- | A | B | --- |
+ // +-------------+-----+-------------+-----+
+ // | | offset
+ // v v
+ // +-----+-------------+
+ // | A | B |
+ // +-----+-------------+
+ // |<------ word ----->|
+ word >>= uint64(bm.offset)
+ word |= nextWord << (int64(unsafe.Sizeof(uint64(0))*8) - int64(bm.offset))
+ }
+ bm.curword = nextWord
+ return word
+}
+
+// NextTrailingByte returns the next trailing byte of the bitmap after the last word
+// along with the number of valid bits in that byte. When validBits < 8, that
+// is the last byte.
+//
+// If the bitmap ends on a byte alignment, then the last byte can also return 8 valid bits.
+// Thus the TrailingBytes function should be used to know how many trailing bytes to read.
+func (bm *BitmapWordReader) NextTrailingByte() (val byte, validBits int) {
+ debug.Assert(bm.trailingBits > 0, "next trailing byte called with no trailing bits")
+
+ if bm.trailingBits <= 8 {
+ // last byte
+ validBits = bm.trailingBits
+ bm.trailingBits = 0
+ rdr := NewBitmapReader(bm.bitmap, bm.offset, validBits)
+ for i := 0; i < validBits; i++ {
+ val >>= 1
+ if rdr.Set() {
+ val |= 0x80
+ }
+ rdr.Next()
+ }
+ val >>= (8 - validBits)
+ return
+ }
+
+ bm.bitmap = bm.bitmap[1:]
+ nextByte := bm.bitmap[0]
+ val = getLSB(bm.curword)
+ if bm.offset != 0 {
+ val >>= byte(bm.offset)
+ val |= nextByte << (8 - bm.offset)
+ }
+ setLSB(&bm.curword, nextByte)
+ bm.trailingBits -= 8
+ bm.trailingBytes--
+ validBits = 8
+ return
+}
+
+func (bm *BitmapWordReader) Words() int { return bm.nwords }
+func (bm *BitmapWordReader) TrailingBytes() int { return bm.trailingBytes }
+
+// BitmapWordWriter is a bitmap writer for writing a full word at a time (a word being
+// a uint64). After the last full word is written, PutNextTrailingByte can be used to
+// write the remaining trailing bytes.
+type BitmapWordWriter struct {
+ bitmap []byte
+ offset int
+ len int
+
+ bitMask uint64
+ currentWord uint64
+}
+
+// NewBitmapWordWriter initializes a new bitmap word writer which will start writing
+// into the byte slice at bit offset start, expecting to write len bits.
+func NewBitmapWordWriter(bitmap []byte, start, len int) *BitmapWordWriter {
+ ret := &BitmapWordWriter{
+ bitmap: bitmap[start/8:],
+ len: len,
+ offset: start % 8,
+ bitMask: (uint64(1) << uint64(start%8)) - 1,
+ }
+
+ if ret.offset != 0 {
+ if ret.len >= int(unsafe.Sizeof(uint64(0))*8) {
+ ret.currentWord = toFromLEFunc(endian.Native.Uint64(ret.bitmap))
+ } else if ret.len > 0 {
+ setLSB(&ret.currentWord, ret.bitmap[0])
+ }
+ }
+ return ret
+}
+
+// PutNextWord writes the given word to the bitmap, potentially splitting across
+// two adjacent words.
+func (bm *BitmapWordWriter) PutNextWord(word uint64) {
+ sz := int(unsafe.Sizeof(word))
+ if bm.offset != 0 {
+ // split one word into two adjacent words, don't touch unused bits
+ // |<------ word ----->|
+ // +-----+-------------+
+ // | A | B |
+ // +-----+-------------+
+ // | |
+ // v v offset
+ // +-------------+-----+-------------+-----+
+ // | --- | A | B | --- |
+ // +-------------+-----+-------------+-----+
+ // |<------ next ----->|<---- current ---->|
+ word = (word << uint64(bm.offset)) | (word >> (int64(sz*8) - int64(bm.offset)))
+ next := toFromLEFunc(endian.Native.Uint64(bm.bitmap[sz:]))
+ bm.currentWord = (bm.currentWord & bm.bitMask) | (word &^ bm.bitMask)
+ next = (next &^ bm.bitMask) | (word & bm.bitMask)
+ endian.Native.PutUint64(bm.bitmap, toFromLEFunc(bm.currentWord))
+ endian.Native.PutUint64(bm.bitmap[sz:], toFromLEFunc(next))
+ bm.currentWord = next
+ } else {
+ endian.Native.PutUint64(bm.bitmap, toFromLEFunc(word))
+ }
+ bm.bitmap = bm.bitmap[sz:]
+}
+
+// PutNextTrailingByte writes the number of bits indicated by validBits from b to
+// the bitmap.
+func (bm *BitmapWordWriter) PutNextTrailingByte(b byte, validBits int) {
+ curbyte := getLSB(bm.currentWord)
+ if validBits == 8 {
+ if bm.offset != 0 {
+ b = (b << bm.offset) | (b >> (8 - bm.offset))
+ next := bm.bitmap[1]
+ curbyte = (curbyte & byte(bm.bitMask)) | (b &^ byte(bm.bitMask))
+ next = (next &^ byte(bm.bitMask)) | (b & byte(bm.bitMask))
+ bm.bitmap[0] = curbyte
+ bm.bitmap[1] = next
+ bm.currentWord = uint64(next)
+ } else {
+ bm.bitmap[0] = b
+ }
+ bm.bitmap = bm.bitmap[1:]
+ } else {
+ debug.Assert(validBits > 0 && validBits < 8, "invalid valid bits in bitmap word writer")
+ debug.Assert(BytesForBits(int64(bm.offset+validBits)) <= int64(len(bm.bitmap)), "writing trailiing byte outside of bounds of bitmap")
+ wr := NewBitmapWriter(bm.bitmap, int(bm.offset), validBits)
+ for i := 0; i < validBits; i++ {
+ if b&0x01 != 0 {
+ wr.Set()
+ } else {
+ wr.Clear()
+ }
+ wr.Next()
+ b >>= 1
+ }
+ wr.Finish()
+ }
+}
+
+type transferMode int8
+
+const (
+ transferCopy transferMode = iota
+ transferInvert
+)
+
+func transferBitmap(mode transferMode, src []byte, srcOffset, length int, dst []byte, dstOffset int) {
+ if length == 0 {
+ // if there's nothing to write, end early.
+ return
+ }
+
+ bitOffset := srcOffset % 8
+ destBitOffset := dstOffset % 8
+
+ // slow path, one of the bitmaps are not byte aligned.
+ if bitOffset != 0 || destBitOffset != 0 {
+ rdr := NewBitmapWordReader(src, srcOffset, length)
+ wr := NewBitmapWordWriter(dst, dstOffset, length)
+
+ nwords := rdr.Words()
+ for nwords > 0 {
+ nwords--
+ if mode == transferInvert {
+ wr.PutNextWord(^rdr.NextWord())
+ } else {
+ wr.PutNextWord(rdr.NextWord())
+ }
+ }
+ nbytes := rdr.TrailingBytes()
+ for nbytes > 0 {
+ nbytes--
+ bt, validBits := rdr.NextTrailingByte()
+ if mode == transferInvert {
+ bt = ^bt
+ }
+ wr.PutNextTrailingByte(bt, validBits)
+ }
+ return
+ }
+
+ // fast path, both are starting with byte-aligned bitmaps
+ nbytes := int(BytesForBits(int64(length)))
+
+ // shift by its byte offset
+ src = src[srcOffset/8:]
+ dst = dst[dstOffset/8:]
+
+ // Take care of the trailing bits in the last byte
+ // E.g., if trailing_bits = 5, last byte should be
+ // - low 3 bits: new bits from last byte of data buffer
+ // - high 5 bits: old bits from last byte of dest buffer
+ trailingBits := nbytes*8 - length
+ trailMask := byte(uint(1)<<(8-trailingBits)) - 1
+ var lastData byte
+ if mode == transferInvert {
+ for i, b := range src[:nbytes-1] {
+ dst[i] = ^b
+ }
+ lastData = ^src[nbytes-1]
+ } else {
+ copy(dst, src[:nbytes-1])
+ lastData = src[nbytes-1]
+ }
+
+ dst[nbytes-1] &= ^trailMask
+ dst[nbytes-1] |= lastData & trailMask
+}
+
+// CopyBitmap copies the bitmap indicated by src, starting at bit offset srcOffset,
+// and copying length bits into dst, starting at bit offset dstOffset.
+func CopyBitmap(src []byte, srcOffset, length int, dst []byte, dstOffset int) {
+ transferBitmap(transferCopy, src, srcOffset, length, dst, dstOffset)
+}
+
+// InvertBitmap copies a bit range of a bitmap, inverting it as it copies
+// over into the destination.
+func InvertBitmap(src []byte, srcOffset, length int, dst []byte, dstOffset int) {
+ transferBitmap(transferInvert, src, srcOffset, length, dst, dstOffset)
+}
+
+type bitOp struct {
+ opWord func(uint64, uint64) uint64
+ opByte func(byte, byte) byte
+ opAligned func(l, r, o []byte)
+}
+
+var (
+ bitAndOp = bitOp{
+ opWord: func(l, r uint64) uint64 { return l & r },
+ opByte: func(l, r byte) byte { return l & r },
+ }
+ bitOrOp = bitOp{
+ opWord: func(l, r uint64) uint64 { return l | r },
+ opByte: func(l, r byte) byte { return l | r },
+ }
+ bitAndNotOp = bitOp{
+ opWord: func(l, r uint64) uint64 { return l &^ r },
+ opByte: func(l, r byte) byte { return l &^ r },
+ }
+ bitXorOp = bitOp{
+ opWord: func(l, r uint64) uint64 { return l ^ r },
+ opByte: func(l, r byte) byte { return l ^ r },
+ }
+)
+
+func alignedBitmapOp(op bitOp, left, right []byte, lOffset, rOffset int64, out []byte, outOffset int64, length int64) {
+ debug.Assert(lOffset%8 == rOffset%8, "aligned bitmap op called with unaligned offsets")
+ debug.Assert(lOffset%8 == outOffset%8, "aligned bitmap op called with unaligned output offset")
+
+ nbytes := BytesForBits(length + lOffset%8)
+ left = left[lOffset/8:]
+ right = right[rOffset/8:]
+ out = out[outOffset/8:]
+ endMask := (lOffset + length%8)
+ switch nbytes {
+ case 0:
+ return
+ case 1: // everything within a single byte
+ // (length+lOffset%8) <= 8
+ mask := PrecedingBitmask[lOffset%8]
+ if endMask != 0 {
+ mask |= TrailingBitmask[(lOffset+length)%8]
+ }
+ out[0] = (out[0] & mask) | (op.opByte(left[0], right[0]) &^ mask)
+ case 2: // don't send zero length to opAligned
+ firstByteMask := PrecedingBitmask[lOffset%8]
+ out[0] = (out[0] & firstByteMask) | (op.opByte(left[0], right[0]) &^ firstByteMask)
+ lastByteMask := byte(0)
+ if endMask != 0 {
+ lastByteMask = TrailingBitmask[(lOffset+length)%8]
+ }
+ out[1] = (out[1] & lastByteMask) | (op.opByte(left[1], right[1]) &^ lastByteMask)
+ default:
+ firstByteMask := PrecedingBitmask[lOffset%8]
+ out[0] = (out[0] & firstByteMask) | (op.opByte(left[0], right[0]) &^ firstByteMask)
+
+ op.opAligned(left[1:nbytes-1], right[1:nbytes-1], out[1:nbytes-1])
+
+ lastByteMask := byte(0)
+ if endMask != 0 {
+ lastByteMask = TrailingBitmask[(lOffset+length)%8]
+ }
+ out[nbytes-1] = (out[nbytes-1] & lastByteMask) | (op.opByte(left[nbytes-1], right[nbytes-1]) &^ lastByteMask)
+ }
+}
+
+func unalignedBitmapOp(op bitOp, left, right []byte, lOffset, rOffset int64, out []byte, outOffset int64, length int64) {
+ leftRdr := NewBitmapWordReader(left, int(lOffset), int(length))
+ rightRdr := NewBitmapWordReader(right, int(rOffset), int(length))
+ writer := NewBitmapWordWriter(out, int(outOffset), int(length))
+
+ for nwords := leftRdr.Words(); nwords > 0; nwords-- {
+ writer.PutNextWord(op.opWord(leftRdr.NextWord(), rightRdr.NextWord()))
+ }
+ for nbytes := leftRdr.TrailingBytes(); nbytes > 0; nbytes-- {
+ leftByte, leftValid := leftRdr.NextTrailingByte()
+ rightByte, rightValid := rightRdr.NextTrailingByte()
+ debug.Assert(leftValid == rightValid, "unexpected mismatch of valid bits")
+ writer.PutNextTrailingByte(op.opByte(leftByte, rightByte), leftValid)
+ }
+}
+
+func BitmapOp(op bitOp, left, right []byte, lOffset, rOffset int64, out []byte, outOffset, length int64) {
+ if (outOffset%8 == lOffset%8) && (outOffset%8 == rOffset%8) {
+ // fastcase!
+ alignedBitmapOp(op, left, right, lOffset, rOffset, out, outOffset, length)
+ } else {
+ unalignedBitmapOp(op, left, right, lOffset, rOffset, out, outOffset, length)
+ }
+}
+
+func BitmapOpAlloc(mem memory.Allocator, op bitOp, left, right []byte, lOffset, rOffset int64, length int64, outOffset int64) *memory.Buffer {
+ bits := length + outOffset
+ buf := memory.NewResizableBuffer(mem)
+ buf.Resize(int(BytesForBits(bits)))
+ BitmapOp(op, left, right, lOffset, rOffset, buf.Bytes(), outOffset, length)
+ return buf
+}
+
+func BitmapAnd(left, right []byte, lOffset, rOffset int64, out []byte, outOffset int64, length int64) {
+ BitmapOp(bitAndOp, left, right, lOffset, rOffset, out, outOffset, length)
+}
+
+func BitmapOr(left, right []byte, lOffset, rOffset int64, out []byte, outOffset int64, length int64) {
+ BitmapOp(bitOrOp, left, right, lOffset, rOffset, out, outOffset, length)
+}
+
+func BitmapAndAlloc(mem memory.Allocator, left, right []byte, lOffset, rOffset int64, length, outOffset int64) *memory.Buffer {
+ return BitmapOpAlloc(mem, bitAndOp, left, right, lOffset, rOffset, length, outOffset)
+}
+
+func BitmapOrAlloc(mem memory.Allocator, left, right []byte, lOffset, rOffset int64, length, outOffset int64) *memory.Buffer {
+ return BitmapOpAlloc(mem, bitOrOp, left, right, lOffset, rOffset, length, outOffset)
+}
+
+func BitmapAndNot(left, right []byte, lOffset, rOffset int64, out []byte, outOffset int64, length int64) {
+ BitmapOp(bitAndNotOp, left, right, lOffset, rOffset, out, outOffset, length)
+}
+
+func BitmapAndNotAlloc(mem memory.Allocator, left, right []byte, lOffset, rOffset int64, length, outOffset int64) *memory.Buffer {
+ return BitmapOpAlloc(mem, bitAndNotOp, left, right, lOffset, rOffset, length, outOffset)
+}
+
+func BitmapXor(left, right []byte, lOffset, rOffset int64, out []byte, outOffset int64, length int64) {
+ BitmapOp(bitXorOp, left, right, lOffset, rOffset, out, outOffset, length)
+}
+
+func BitmapXorAlloc(mem memory.Allocator, left, right []byte, lOffset, rOffset int64, length, outOffset int64) *memory.Buffer {
+ return BitmapOpAlloc(mem, bitXorOp, left, right, lOffset, rOffset, length, outOffset)
+}
+
+func BitmapEquals(left, right []byte, lOffset, rOffset int64, length int64) bool {
+ if lOffset%8 == 0 && rOffset%8 == 0 {
+ // byte aligned, fast path, can use bytes.Equal (memcmp)
+ byteLen := length / 8
+ lStart := lOffset / 8
+ rStart := rOffset / 8
+ if !bytes.Equal(left[lStart:lStart+byteLen], right[rStart:rStart+byteLen]) {
+ return false
+ }
+
+ // check trailing bits
+ for i := (length / 8) * 8; i < length; i++ {
+ if BitIsSet(left, int(lOffset+i)) != BitIsSet(right, int(rOffset+i)) {
+ return false
+ }
+ }
+ return true
+ }
+
+ lrdr := NewBitmapWordReader(left, int(lOffset), int(length))
+ rrdr := NewBitmapWordReader(right, int(rOffset), int(length))
+
+ nwords := lrdr.Words()
+ for nwords > 0 {
+ nwords--
+ if lrdr.NextWord() != rrdr.NextWord() {
+ return false
+ }
+ }
+
+ nbytes := lrdr.TrailingBytes()
+ for nbytes > 0 {
+ nbytes--
+ lbt, _ := lrdr.NextTrailingByte()
+ rbt, _ := rrdr.NextTrailingByte()
+ if lbt != rbt {
+ return false
+ }
+ }
+ return true
+}
+
+// OptionalBitIndexer is a convenience wrapper for getting bits from
+// a bitmap which may or may not be nil.
+type OptionalBitIndexer struct {
+ Bitmap []byte
+ Offset int
+}
+
+func (b *OptionalBitIndexer) GetBit(i int) bool {
+ return b.Bitmap == nil || BitIsSet(b.Bitmap, b.Offset+i)
+}
+
+type Bitmap struct {
+ Data []byte
+ Offset, Len int64
+}
+
+func bitLength(bitmaps []Bitmap) (int64, error) {
+ for _, b := range bitmaps[1:] {
+ if b.Len != bitmaps[0].Len {
+ return -1, errors.New("bitmaps must be same length")
+ }
+ }
+ return bitmaps[0].Len, nil
+}
+
+func runVisitWordsAndWriteLoop(bitLen int64, rdrs []*BitmapWordReader, wrs []*BitmapWordWriter, visitor func(in, out []uint64)) {
+ const bitWidth int64 = int64(uint64SizeBits)
+
+ visited := make([]uint64, len(rdrs))
+ output := make([]uint64, len(wrs))
+
+ // every reader will have same number of words, since they are same
+ // length'ed. This will be inefficient in some cases. When there's
+ // offsets beyond the Word boundary, every word would have to be
+ // created from 2 adjoining words
+ nwords := int64(rdrs[0].Words())
+ bitLen -= nwords * bitWidth
+ for nwords > 0 {
+ nwords--
+ for i := range visited {
+ visited[i] = rdrs[i].NextWord()
+ }
+ visitor(visited, output)
+ for i := range output {
+ wrs[i].PutNextWord(output[i])
+ }
+ }
+
+ // every reader will have the same number of trailing bytes, because
+ // we already confirmed they have the same length. Because
+ // offsets beyond the Word boundary can cause adjoining words, the
+ // tailing portion could be more than one word remaining full/partial
+ // words to write.
+ if bitLen == 0 {
+ return
+ }
+
+ // convert the word visitor to a bytevisitor
+ byteVisitor := func(in, out []byte) {
+ for i, w := range in {
+ visited[i] = uint64(w)
+ }
+ visitor(visited, output)
+ for i, w := range output {
+ out[i] = byte(w)
+ }
+ }
+
+ visitedBytes := make([]byte, len(rdrs))
+ outputBytes := make([]byte, len(wrs))
+ nbytes := rdrs[0].trailingBytes
+ for nbytes > 0 {
+ nbytes--
+ memory.Set(visitedBytes, 0)
+ memory.Set(outputBytes, 0)
+
+ var validBits int
+ for i := range rdrs {
+ visitedBytes[i], validBits = rdrs[i].NextTrailingByte()
+ }
+ byteVisitor(visitedBytes, outputBytes)
+ for i, w := range outputBytes {
+ wrs[i].PutNextTrailingByte(w, validBits)
+ }
+ }
+}
+
+// VisitWordsAndWrite visits words of bits from each input bitmap and
+// collects outputs to a slice of output Bitmaps.
+//
+// All bitmaps must have identical lengths. The first bit in a visited
+// bitmap may be offset within the first visited word, but words will
+// otherwise contain densely packed bits loaded from the bitmap. That
+// offset within the first word is returned.
+//
+// NOTE: this function is efficient on 3+ sufficiently large bitmaps.
+// It also has a large prolog/epilog overhead and should be used
+// carefully in other cases. For 2 or fewer bitmaps, and/or smaller
+// bitmaps, try BitmapReader and or other utilities.
+func VisitWordsAndWrite(args []Bitmap, out []Bitmap, visitor func(in, out []uint64)) error {
+ bitLen, err := bitLength(args)
+ if err != nil {
+ return err
+ }
+
+ rdrs, wrs := make([]*BitmapWordReader, len(args)), make([]*BitmapWordWriter, len(out))
+ for i, in := range args {
+ rdrs[i] = NewBitmapWordReader(in.Data, int(in.Offset), int(in.Len))
+ }
+ for i, o := range out {
+ wrs[i] = NewBitmapWordWriter(o.Data, int(o.Offset), int(o.Len))
+ }
+ runVisitWordsAndWriteLoop(bitLen, rdrs, wrs, visitor)
+ return nil
+}
diff --git a/vendor/github.com/apache/arrow/go/v14/arrow/bitutil/bitutil.go b/vendor/github.com/apache/arrow/go/v14/arrow/bitutil/bitutil.go
new file mode 100644
index 000000000..a4a1519b8
--- /dev/null
+++ b/vendor/github.com/apache/arrow/go/v14/arrow/bitutil/bitutil.go
@@ -0,0 +1,217 @@
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements. See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership. The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License. You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package bitutil
+
+import (
+ "math"
+ "math/bits"
+ "reflect"
+ "unsafe"
+
+ "github.com/apache/arrow/go/v14/arrow/memory"
+)
+
+var (
+ BitMask = [8]byte{1, 2, 4, 8, 16, 32, 64, 128}
+ FlippedBitMask = [8]byte{254, 253, 251, 247, 239, 223, 191, 127}
+)
+
+// IsMultipleOf8 returns whether v is a multiple of 8.
+func IsMultipleOf8(v int64) bool { return v&7 == 0 }
+
+// IsMultipleOf64 returns whether v is a multiple of 64
+func IsMultipleOf64(v int64) bool { return v&63 == 0 }
+
+func BytesForBits(bits int64) int64 { return (bits + 7) >> 3 }
+
+// NextPowerOf2 rounds x to the next power of two.
+func NextPowerOf2(x int) int { return 1 << uint(bits.Len(uint(x))) }
+
+// CeilByte rounds size to the next multiple of 8.
+func CeilByte(size int) int { return (size + 7) &^ 7 }
+
+// CeilByte64 rounds size to the next multiple of 8.
+func CeilByte64(size int64) int64 { return (size + 7) &^ 7 }
+
+// BitIsSet returns true if the bit at index i in buf is set (1).
+func BitIsSet(buf []byte, i int) bool { return (buf[uint(i)/8] & BitMask[byte(i)%8]) != 0 }
+
+// BitIsNotSet returns true if the bit at index i in buf is not set (0).
+func BitIsNotSet(buf []byte, i int) bool { return (buf[uint(i)/8] & BitMask[byte(i)%8]) == 0 }
+
+// SetBit sets the bit at index i in buf to 1.
+func SetBit(buf []byte, i int) { buf[uint(i)/8] |= BitMask[byte(i)%8] }
+
+// ClearBit sets the bit at index i in buf to 0.
+func ClearBit(buf []byte, i int) { buf[uint(i)/8] &= FlippedBitMask[byte(i)%8] }
+
+// SetBitTo sets the bit at index i in buf to val.
+func SetBitTo(buf []byte, i int, val bool) {
+ if val {
+ SetBit(buf, i)
+ } else {
+ ClearBit(buf, i)
+ }
+}
+
+// CountSetBits counts the number of 1's in buf up to n bits.
+func CountSetBits(buf []byte, offset, n int) int {
+ if offset > 0 {
+ return countSetBitsWithOffset(buf, offset, n)
+ }
+
+ count := 0
+
+ uint64Bytes := n / uint64SizeBits * 8
+ for _, v := range bytesToUint64(buf[:uint64Bytes]) {
+ count += bits.OnesCount64(v)
+ }
+
+ for _, v := range buf[uint64Bytes : n/8] {
+ count += bits.OnesCount8(v)
+ }
+
+ // tail bits
+ for i := n &^ 0x7; i < n; i++ {
+ if BitIsSet(buf, i) {
+ count++
+ }
+ }
+
+ return count
+}
+
+func countSetBitsWithOffset(buf []byte, offset, n int) int {
+ count := 0
+
+ beg := offset
+ end := offset + n
+
+ begU8 := roundUp(beg, uint64SizeBits)
+
+ init := min(n, begU8-beg)
+ for i := offset; i < beg+init; i++ {
+ if BitIsSet(buf, i) {
+ count++
+ }
+ }
+
+ nU64 := (n - init) / uint64SizeBits
+ begU64 := begU8 / uint64SizeBits
+ endU64 := begU64 + nU64
+ bufU64 := bytesToUint64(buf)
+ if begU64 < len(bufU64) {
+ for _, v := range bufU64[begU64:endU64] {
+ count += bits.OnesCount64(v)
+ }
+ }
+
+ // FIXME: use a fallback to bits.OnesCount8
+ // before counting the tail bits.
+
+ tail := beg + init + nU64*uint64SizeBits
+ for i := tail; i < end; i++ {
+ if BitIsSet(buf, i) {
+ count++
+ }
+ }
+
+ return count
+}
+
+func roundUp(v, f int) int {
+ return (v + (f - 1)) / f * f
+}
+
+func min(a, b int) int {
+ if a < b {
+ return a
+ }
+ return b
+}
+
+const (
+ uint64SizeBytes = int(unsafe.Sizeof(uint64(0)))
+ uint64SizeBits = uint64SizeBytes * 8
+)
+
+func bytesToUint64(b []byte) []uint64 {
+ if cap(b) < uint64SizeBytes {
+ return nil
+ }
+
+ h := (*reflect.SliceHeader)(unsafe.Pointer(&b))
+ return unsafe.Slice((*uint64)(unsafe.Pointer(h.Data)), cap(b)/uint64SizeBytes)[:len(b)/uint64SizeBytes]
+}
+
+var (
+ // PrecedingBitmask is a convenience set of values as bitmasks for checking
+ // prefix bits of a byte
+ PrecedingBitmask = [8]byte{0, 1, 3, 7, 15, 31, 63, 127}
+ // TrailingBitmask is the bitwise complement version of kPrecedingBitmask
+ TrailingBitmask = [8]byte{255, 254, 252, 248, 240, 224, 192, 128}
+)
+
+// SetBitsTo is a convenience function to quickly set or unset all the bits
+// in a bitmap starting at startOffset for length bits.
+func SetBitsTo(bits []byte, startOffset, length int64, areSet bool) {
+ if length == 0 {
+ return
+ }
+
+ beg := startOffset
+ end := startOffset + length
+ var fill uint8 = 0
+ if areSet {
+ fill = math.MaxUint8
+ }
+
+ byteBeg := beg / 8
+ byteEnd := end/8 + 1
+
+ // don't modify bits before the startOffset by using this mask
+ firstByteMask := PrecedingBitmask[beg%8]
+ // don't modify bits past the length by using this mask
+ lastByteMask := TrailingBitmask[end%8]
+
+ if byteEnd == byteBeg+1 {
+ // set bits within a single byte
+ onlyByteMask := firstByteMask
+ if end%8 != 0 {
+ onlyByteMask = firstByteMask | lastByteMask
+ }
+
+ bits[byteBeg] &= onlyByteMask
+ bits[byteBeg] |= fill &^ onlyByteMask
+ return
+ }
+
+ // set/clear trailing bits of first byte
+ bits[byteBeg] &= firstByteMask
+ bits[byteBeg] |= fill &^ firstByteMask
+
+ if byteEnd-byteBeg > 2 {
+ memory.Set(bits[byteBeg+1:byteEnd-1], fill)
+ }
+
+ if end%8 == 0 {
+ return
+ }
+
+ bits[byteEnd-1] &= lastByteMask
+ bits[byteEnd-1] |= fill &^ lastByteMask
+}
diff --git a/vendor/github.com/apache/arrow/go/v14/arrow/bitutil/endian_default.go b/vendor/github.com/apache/arrow/go/v14/arrow/bitutil/endian_default.go
new file mode 100644
index 000000000..9f5d3cdc7
--- /dev/null
+++ b/vendor/github.com/apache/arrow/go/v14/arrow/bitutil/endian_default.go
@@ -0,0 +1,33 @@
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements. See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership. The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License. You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// +build !s390x
+
+package bitutil
+
+import (
+ "unsafe"
+)
+
+var toFromLEFunc = func(in uint64) uint64 { return in }
+
+func getLSB(v uint64) byte {
+ return (*[8]byte)(unsafe.Pointer(&v))[0]
+}
+
+func setLSB(v *uint64, b byte) {
+ (*[8]byte)(unsafe.Pointer(v))[0] = b
+}
diff --git a/vendor/github.com/apache/arrow/go/v14/arrow/bitutil/endian_s390x.go b/vendor/github.com/apache/arrow/go/v14/arrow/bitutil/endian_s390x.go
new file mode 100644
index 000000000..a9bba4391
--- /dev/null
+++ b/vendor/github.com/apache/arrow/go/v14/arrow/bitutil/endian_s390x.go
@@ -0,0 +1,32 @@
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements. See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership. The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License. You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package bitutil
+
+import (
+ "math/bits"
+ "unsafe"
+)
+
+var toFromLEFunc = bits.ReverseBytes64
+
+func getLSB(v uint64) byte {
+ return (*[8]byte)(unsafe.Pointer(&v))[7]
+}
+
+func setLSB(v *uint64, b byte) {
+ (*[8]byte)(unsafe.Pointer(v))[7] = b
+}
diff --git a/vendor/github.com/apache/arrow/go/v14/arrow/compare.go b/vendor/github.com/apache/arrow/go/v14/arrow/compare.go
new file mode 100644
index 000000000..58569b332
--- /dev/null
+++ b/vendor/github.com/apache/arrow/go/v14/arrow/compare.go
@@ -0,0 +1,153 @@
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements. See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership. The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License. You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package arrow
+
+import (
+ "reflect"
+)
+
+type typeEqualsConfig struct {
+ metadata bool
+}
+
+// TypeEqualOption is a functional option type used for configuring type
+// equality checks.
+type TypeEqualOption func(*typeEqualsConfig)
+
+// CheckMetadata is an option for TypeEqual that allows checking for metadata
+// equality besides type equality. It only makes sense for types with metadata.
+func CheckMetadata() TypeEqualOption {
+ return func(cfg *typeEqualsConfig) {
+ cfg.metadata = true
+ }
+}
+
+// TypeEqual checks if two DataType are the same, optionally checking metadata
+// equality for STRUCT types.
+func TypeEqual(left, right DataType, opts ...TypeEqualOption) bool {
+ var cfg typeEqualsConfig
+ for _, opt := range opts {
+ opt(&cfg)
+ }
+
+ switch {
+ case left == nil || right == nil:
+ return left == nil && right == nil
+ case left.ID() != right.ID():
+ return false
+ }
+
+ switch l := left.(type) {
+ case ExtensionType:
+ return l.ExtensionEquals(right.(ExtensionType))
+ case *ListType:
+ if !TypeEqual(l.Elem(), right.(*ListType).Elem(), opts...) {
+ return false
+ }
+ if cfg.metadata && !l.elem.Metadata.Equal(right.(*ListType).elem.Metadata) {
+ return false
+ }
+ return l.elem.Nullable == right.(*ListType).elem.Nullable
+ case *FixedSizeListType:
+ if !TypeEqual(l.Elem(), right.(*FixedSizeListType).Elem(), opts...) {
+ return false
+ }
+ if cfg.metadata && !l.elem.Metadata.Equal(right.(*FixedSizeListType).elem.Metadata) {
+ return false
+ }
+ return l.n == right.(*FixedSizeListType).n && l.elem.Nullable == right.(*FixedSizeListType).elem.Nullable
+ case *MapType:
+ if !TypeEqual(l.KeyType(), right.(*MapType).KeyType(), opts...) {
+ return false
+ }
+ if !TypeEqual(l.ItemType(), right.(*MapType).ItemType(), opts...) {
+ return false
+ }
+ if l.KeyField().Nullable != right.(*MapType).KeyField().Nullable {
+ return false
+ }
+ if l.ItemField().Nullable != right.(*MapType).ItemField().Nullable {
+ return false
+ }
+ if cfg.metadata {
+ if !l.KeyField().Metadata.Equal(right.(*MapType).KeyField().Metadata) {
+ return false
+ }
+ if !l.ItemField().Metadata.Equal(right.(*MapType).ItemField().Metadata) {
+ return false
+ }
+ }
+ return true
+ case *StructType:
+ r := right.(*StructType)
+ switch {
+ case len(l.fields) != len(r.fields):
+ return false
+ case !reflect.DeepEqual(l.index, r.index):
+ return false
+ }
+ for i := range l.fields {
+ leftField, rightField := l.fields[i], r.fields[i]
+ switch {
+ case leftField.Name != rightField.Name:
+ return false
+ case leftField.Nullable != rightField.Nullable:
+ return false
+ case !TypeEqual(leftField.Type, rightField.Type, opts...):
+ return false
+ case cfg.metadata && !leftField.Metadata.Equal(rightField.Metadata):
+ return false
+ }
+ }
+ return true
+ case UnionType:
+ r := right.(UnionType)
+ if l.Mode() != r.Mode() {
+ return false
+ }
+
+ if !reflect.DeepEqual(l.ChildIDs(), r.ChildIDs()) {
+ return false
+ }
+
+ for i := range l.Fields() {
+ leftField, rightField := l.Fields()[i], r.Fields()[i]
+ switch {
+ case leftField.Name != rightField.Name:
+ return false
+ case leftField.Nullable != rightField.Nullable:
+ return false
+ case !TypeEqual(leftField.Type, rightField.Type, opts...):
+ return false
+ case cfg.metadata && !leftField.Metadata.Equal(rightField.Metadata):
+ return false
+ case l.TypeCodes()[i] != r.TypeCodes()[i]:
+ return false
+ }
+ }
+ return true
+ case *TimestampType:
+ r := right.(*TimestampType)
+ return l.Unit == r.Unit && l.TimeZone == r.TimeZone
+ case *RunEndEncodedType:
+ r := right.(*RunEndEncodedType)
+ return TypeEqual(l.Encoded(), r.Encoded(), opts...) &&
+ TypeEqual(l.runEnds, r.runEnds, opts...)
+ default:
+ return reflect.DeepEqual(left, right)
+ }
+}
diff --git a/vendor/github.com/apache/arrow/go/v14/arrow/datatype.go b/vendor/github.com/apache/arrow/go/v14/arrow/datatype.go
new file mode 100644
index 000000000..f0fb24ec8
--- /dev/null
+++ b/vendor/github.com/apache/arrow/go/v14/arrow/datatype.go
@@ -0,0 +1,404 @@
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements. See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership. The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License. You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package arrow
+
+import (
+ "fmt"
+ "hash/maphash"
+ "strings"
+
+ "github.com/apache/arrow/go/v14/arrow/internal/debug"
+)
+
+// Type is a logical type. They can be expressed as
+// either a primitive physical type (bytes or bits of some fixed size), a
+// nested type consisting of other data types, or another data type (e.g. a
+// timestamp encoded as an int64)
+type Type int
+
+const (
+ // NULL type having no physical storage
+ NULL Type = iota
+
+ // BOOL is a 1 bit, LSB bit-packed ordering
+ BOOL
+
+ // UINT8 is an Unsigned 8-bit little-endian integer
+ UINT8
+
+ // INT8 is a Signed 8-bit little-endian integer
+ INT8
+
+ // UINT16 is an Unsigned 16-bit little-endian integer
+ UINT16
+
+ // INT16 is a Signed 16-bit little-endian integer
+ INT16
+
+ // UINT32 is an Unsigned 32-bit little-endian integer
+ UINT32
+
+ // INT32 is a Signed 32-bit little-endian integer
+ INT32
+
+ // UINT64 is an Unsigned 64-bit little-endian integer
+ UINT64
+
+ // INT64 is a Signed 64-bit little-endian integer
+ INT64
+
+ // FLOAT16 is a 2-byte floating point value
+ FLOAT16
+
+ // FLOAT32 is a 4-byte floating point value
+ FLOAT32
+
+ // FLOAT64 is an 8-byte floating point value
+ FLOAT64
+
+ // STRING is a UTF8 variable-length string
+ STRING
+
+ // BINARY is a Variable-length byte type (no guarantee of UTF8-ness)
+ BINARY
+
+ // FIXED_SIZE_BINARY is a binary where each value occupies the same number of bytes
+ FIXED_SIZE_BINARY
+
+ // DATE32 is int32 days since the UNIX epoch
+ DATE32
+
+ // DATE64 is int64 milliseconds since the UNIX epoch
+ DATE64
+
+ // TIMESTAMP is an exact timestamp encoded with int64 since UNIX epoch
+ // Default unit millisecond
+ TIMESTAMP
+
+ // TIME32 is a signed 32-bit integer, representing either seconds or
+ // milliseconds since midnight
+ TIME32
+
+ // TIME64 is a signed 64-bit integer, representing either microseconds or
+ // nanoseconds since midnight
+ TIME64
+
+ // INTERVAL_MONTHS is YEAR_MONTH interval in SQL style
+ INTERVAL_MONTHS
+
+ // INTERVAL_DAY_TIME is DAY_TIME in SQL Style
+ INTERVAL_DAY_TIME
+
+ // DECIMAL128 is a precision- and scale-based decimal type. Storage type depends on the
+ // parameters.
+ DECIMAL128
+
+ // DECIMAL256 is a precision and scale based decimal type, with 256 bit max. not yet implemented
+ DECIMAL256
+
+ // LIST is a list of some logical data type
+ LIST
+
+ // STRUCT of logical types
+ STRUCT
+
+ // SPARSE_UNION of logical types. not yet implemented
+ SPARSE_UNION
+
+ // DENSE_UNION of logical types. not yet implemented
+ DENSE_UNION
+
+ // DICTIONARY aka Category type
+ DICTIONARY
+
+ // MAP is a repeated struct logical type
+ MAP
+
+ // Custom data type, implemented by user
+ EXTENSION
+
+ // Fixed size list of some logical type
+ FIXED_SIZE_LIST
+
+ // Measure of elapsed time in either seconds, milliseconds, microseconds
+ // or nanoseconds.
+ DURATION
+
+ // like STRING, but 64-bit offsets. not yet implemented
+ LARGE_STRING
+
+ // like BINARY but with 64-bit offsets, not yet implemented
+ LARGE_BINARY
+
+ // like LIST but with 64-bit offsets. not yet implmented
+ LARGE_LIST
+
+ // calendar interval with three fields
+ INTERVAL_MONTH_DAY_NANO
+
+ RUN_END_ENCODED
+
+ // String (UTF8) view type with 4-byte prefix and inline
+ // small string optimizations
+ STRING_VIEW
+
+ // Bytes view with 4-byte prefix and inline small byte arrays optimization
+ BINARY_VIEW
+
+ // LIST_VIEW is a list of some logical data type represented with offsets and sizes
+ LIST_VIEW
+
+ // like LIST but with 64-bit offsets
+ LARGE_LIST_VIEW
+
+ // Alias to ensure we do not break any consumers
+ DECIMAL = DECIMAL128
+)
+
+// DataType is the representation of an Arrow type.
+type DataType interface {
+ fmt.Stringer
+ ID() Type
+ // Name is name of the data type.
+ Name() string
+ Fingerprint() string
+ Layout() DataTypeLayout
+}
+
+// TypesToString is a convenience function to create a list of types
+// which are comma delimited as a string
+func TypesToString(types []DataType) string {
+ var b strings.Builder
+ b.WriteByte('(')
+ for i, t := range types {
+ if i != 0 {
+ b.WriteString(", ")
+ }
+ b.WriteString(t.String())
+ }
+ b.WriteByte(')')
+ return b.String()
+}
+
+// FixedWidthDataType is the representation of an Arrow type that
+// requires a fixed number of bits in memory for each element.
+type FixedWidthDataType interface {
+ DataType
+ // BitWidth returns the number of bits required to store a single element of this data type in memory.
+ BitWidth() int
+ // Bytes returns the number of bytes required to store a single element of this data type in memory.
+ Bytes() int
+}
+
+type BinaryDataType interface {
+ DataType
+ IsUtf8() bool
+ binary()
+}
+
+type OffsetsDataType interface {
+ DataType
+ OffsetTypeTraits() OffsetTraits
+}
+
+func HashType(seed maphash.Seed, dt DataType) uint64 {
+ var h maphash.Hash
+ h.SetSeed(seed)
+ h.WriteString(dt.Fingerprint())
+ return h.Sum64()
+}
+
+func typeIDFingerprint(id Type) string {
+ c := string(rune(int(id) + int('A')))
+ return "@" + c
+}
+
+func typeFingerprint(typ DataType) string { return typeIDFingerprint(typ.ID()) }
+
+func timeUnitFingerprint(unit TimeUnit) rune {
+ switch unit {
+ case Second:
+ return 's'
+ case Millisecond:
+ return 'm'
+ case Microsecond:
+ return 'u'
+ case Nanosecond:
+ return 'n'
+ default:
+ debug.Assert(false, "unexpected time unit")
+ return rune(0)
+ }
+}
+
+// BufferKind describes the type of buffer expected when defining a layout specification
+type BufferKind int8
+
+// The expected types of buffers
+const (
+ KindFixedWidth BufferKind = iota
+ KindVarWidth
+ KindBitmap
+ KindAlwaysNull
+)
+
+// BufferSpec provides a specification for the buffers of a particular datatype
+type BufferSpec struct {
+ Kind BufferKind
+ ByteWidth int // for KindFixedWidth
+}
+
+func (b BufferSpec) Equals(other BufferSpec) bool {
+ return b.Kind == other.Kind && (b.Kind != KindFixedWidth || b.ByteWidth == other.ByteWidth)
+}
+
+// DataTypeLayout represents the physical layout of a datatype's buffers including
+// the number of and types of those binary buffers. This will correspond
+// with the buffers in the ArrayData for an array of that type.
+type DataTypeLayout struct {
+ Buffers []BufferSpec
+ HasDict bool
+}
+
+func SpecFixedWidth(w int) BufferSpec { return BufferSpec{KindFixedWidth, w} }
+func SpecVariableWidth() BufferSpec { return BufferSpec{KindVarWidth, -1} }
+func SpecBitmap() BufferSpec { return BufferSpec{KindBitmap, -1} }
+func SpecAlwaysNull() BufferSpec { return BufferSpec{KindAlwaysNull, -1} }
+
+// IsInteger is a helper to return true if the type ID provided is one of the
+// integral types of uint or int with the varying sizes.
+func IsInteger(t Type) bool {
+ switch t {
+ case UINT8, INT8, UINT16, INT16, UINT32, INT32, UINT64, INT64:
+ return true
+ }
+ return false
+}
+
+// IsUnsignedInteger is a helper that returns true if the type ID provided is
+// one of the uint integral types (uint8, uint16, uint32, uint64)
+func IsUnsignedInteger(t Type) bool {
+ switch t {
+ case UINT8, UINT16, UINT32, UINT64:
+ return true
+ }
+ return false
+}
+
+// IsSignedInteger is a helper that returns true if the type ID provided is
+// one of the int integral types (int8, int16, int32, int64)
+func IsSignedInteger(t Type) bool {
+ switch t {
+ case INT8, INT16, INT32, INT64:
+ return true
+ }
+ return false
+}
+
+// IsFloating is a helper that returns true if the type ID provided is
+// one of Float16, Float32, or Float64
+func IsFloating(t Type) bool {
+ switch t {
+ case FLOAT16, FLOAT32, FLOAT64:
+ return true
+ }
+ return false
+}
+
+// IsPrimitive returns true if the provided type ID represents a fixed width
+// primitive type.
+func IsPrimitive(t Type) bool {
+ switch t {
+ case BOOL, UINT8, INT8, UINT16, INT16, UINT32, INT32, UINT64, INT64,
+ FLOAT16, FLOAT32, FLOAT64, DATE32, DATE64, TIME32, TIME64, TIMESTAMP,
+ DURATION, INTERVAL_MONTHS, INTERVAL_DAY_TIME, INTERVAL_MONTH_DAY_NANO:
+ return true
+ }
+ return false
+}
+
+// IsBaseBinary returns true for Binary/String and their LARGE variants
+func IsBaseBinary(t Type) bool {
+ switch t {
+ case BINARY, STRING, LARGE_BINARY, LARGE_STRING:
+ return true
+ }
+ return false
+}
+
+// IsBinaryLike returns true for only BINARY and STRING
+func IsBinaryLike(t Type) bool {
+ switch t {
+ case BINARY, STRING:
+ return true
+ }
+ return false
+}
+
+// IsLargeBinaryLike returns true for only LARGE_BINARY and LARGE_STRING
+func IsLargeBinaryLike(t Type) bool {
+ switch t {
+ case LARGE_BINARY, LARGE_STRING:
+ return true
+ }
+ return false
+}
+
+// IsFixedSizeBinary returns true for Decimal128/256 and FixedSizeBinary
+func IsFixedSizeBinary(t Type) bool {
+ switch t {
+ case DECIMAL128, DECIMAL256, FIXED_SIZE_BINARY:
+ return true
+ }
+ return false
+}
+
+// IsDecimal returns true for Decimal128 and Decimal256
+func IsDecimal(t Type) bool {
+ switch t {
+ case DECIMAL128, DECIMAL256:
+ return true
+ }
+ return false
+}
+
+// IsUnion returns true for Sparse and Dense Unions
+func IsUnion(t Type) bool {
+ switch t {
+ case DENSE_UNION, SPARSE_UNION:
+ return true
+ }
+ return false
+}
+
+// IsListLike returns true for List, LargeList, FixedSizeList, and Map
+func IsListLike(t Type) bool {
+ switch t {
+ case LIST, LARGE_LIST, FIXED_SIZE_LIST, MAP:
+ return true
+ }
+ return false
+}
+
+// IsNested returns true for List, LargeList, FixedSizeList, Map, Struct, and Unions
+func IsNested(t Type) bool {
+ switch t {
+ case LIST, LARGE_LIST, FIXED_SIZE_LIST, MAP, LIST_VIEW, LARGE_LIST_VIEW, STRUCT, SPARSE_UNION, DENSE_UNION:
+ return true
+ }
+ return false
+}
diff --git a/vendor/github.com/apache/arrow/go/v14/arrow/datatype_binary.go b/vendor/github.com/apache/arrow/go/v14/arrow/datatype_binary.go
new file mode 100644
index 000000000..a3a856864
--- /dev/null
+++ b/vendor/github.com/apache/arrow/go/v14/arrow/datatype_binary.go
@@ -0,0 +1,98 @@
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements. See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership. The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License. You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package arrow
+
+// OffsetTraits is a convenient interface over the various type traits
+// constants such as arrow.Int32Traits allowing types with offsets, like
+// BinaryType, StringType, LargeBinaryType and LargeStringType to have
+// a method to return information about their offset type and how many bytes
+// would be required to allocate an offset buffer for them.
+type OffsetTraits interface {
+ // BytesRequired returns the number of bytes required to be allocated
+ // in order to hold the passed in number of elements of this type.
+ BytesRequired(int) int
+}
+
+type BinaryType struct{}
+
+func (t *BinaryType) ID() Type { return BINARY }
+func (t *BinaryType) Name() string { return "binary" }
+func (t *BinaryType) String() string { return "binary" }
+func (t *BinaryType) binary() {}
+func (t *BinaryType) Fingerprint() string { return typeFingerprint(t) }
+func (t *BinaryType) Layout() DataTypeLayout {
+ return DataTypeLayout{Buffers: []BufferSpec{SpecBitmap(),
+ SpecFixedWidth(Int32SizeBytes), SpecVariableWidth()}}
+}
+func (t *BinaryType) OffsetTypeTraits() OffsetTraits { return Int32Traits }
+func (BinaryType) IsUtf8() bool { return false }
+
+type StringType struct{}
+
+func (t *StringType) ID() Type { return STRING }
+func (t *StringType) Name() string { return "utf8" }
+func (t *StringType) String() string { return "utf8" }
+func (t *StringType) binary() {}
+func (t *StringType) Fingerprint() string { return typeFingerprint(t) }
+func (t *StringType) Layout() DataTypeLayout {
+ return DataTypeLayout{Buffers: []BufferSpec{SpecBitmap(),
+ SpecFixedWidth(Int32SizeBytes), SpecVariableWidth()}}
+}
+func (t *StringType) OffsetTypeTraits() OffsetTraits { return Int32Traits }
+func (StringType) IsUtf8() bool { return true }
+
+type LargeBinaryType struct{}
+
+func (t *LargeBinaryType) ID() Type { return LARGE_BINARY }
+func (t *LargeBinaryType) Name() string { return "large_binary" }
+func (t *LargeBinaryType) String() string { return "large_binary" }
+func (t *LargeBinaryType) binary() {}
+func (t *LargeBinaryType) Fingerprint() string { return typeFingerprint(t) }
+func (t *LargeBinaryType) Layout() DataTypeLayout {
+ return DataTypeLayout{Buffers: []BufferSpec{SpecBitmap(),
+ SpecFixedWidth(Int64SizeBytes), SpecVariableWidth()}}
+}
+func (t *LargeBinaryType) OffsetTypeTraits() OffsetTraits { return Int64Traits }
+func (LargeBinaryType) IsUtf8() bool { return false }
+
+type LargeStringType struct{}
+
+func (t *LargeStringType) ID() Type { return LARGE_STRING }
+func (t *LargeStringType) Name() string { return "large_utf8" }
+func (t *LargeStringType) String() string { return "large_utf8" }
+func (t *LargeStringType) binary() {}
+func (t *LargeStringType) Fingerprint() string { return typeFingerprint(t) }
+func (t *LargeStringType) Layout() DataTypeLayout {
+ return DataTypeLayout{Buffers: []BufferSpec{SpecBitmap(),
+ SpecFixedWidth(Int64SizeBytes), SpecVariableWidth()}}
+}
+func (t *LargeStringType) OffsetTypeTraits() OffsetTraits { return Int64Traits }
+func (LargeStringType) IsUtf8() bool { return true }
+
+var (
+ BinaryTypes = struct {
+ Binary BinaryDataType
+ String BinaryDataType
+ LargeBinary BinaryDataType
+ LargeString BinaryDataType
+ }{
+ Binary: &BinaryType{},
+ String: &StringType{},
+ LargeBinary: &LargeBinaryType{},
+ LargeString: &LargeStringType{},
+ }
+)
diff --git a/vendor/github.com/apache/arrow/go/v14/arrow/datatype_encoded.go b/vendor/github.com/apache/arrow/go/v14/arrow/datatype_encoded.go
new file mode 100644
index 000000000..c1750a889
--- /dev/null
+++ b/vendor/github.com/apache/arrow/go/v14/arrow/datatype_encoded.go
@@ -0,0 +1,67 @@
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements. See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership. The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License. You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package arrow
+
+type EncodedType interface {
+ DataType
+ Encoded() DataType
+}
+
+// RunEndEncodedType is the datatype to represent a run-end encoded
+// array of data. ValueNullable defaults to true, but can be set false
+// if this should represent a type with a non-nullable value field.
+type RunEndEncodedType struct {
+ runEnds DataType
+ values DataType
+ ValueNullable bool
+}
+
+func RunEndEncodedOf(runEnds, values DataType) *RunEndEncodedType {
+ return &RunEndEncodedType{runEnds: runEnds, values: values, ValueNullable: true}
+}
+
+func (*RunEndEncodedType) ID() Type { return RUN_END_ENCODED }
+func (*RunEndEncodedType) Name() string { return "run_end_encoded" }
+func (*RunEndEncodedType) Layout() DataTypeLayout {
+ return DataTypeLayout{Buffers: []BufferSpec{SpecAlwaysNull()}}
+}
+
+func (t *RunEndEncodedType) String() string {
+ return t.Name() + "<run_ends: " + t.runEnds.String() + ", values: " + t.values.String() + ">"
+}
+
+func (t *RunEndEncodedType) Fingerprint() string {
+ return typeFingerprint(t) + "{" + t.runEnds.Fingerprint() + ";" + t.values.Fingerprint() + ";}"
+}
+
+func (t *RunEndEncodedType) RunEnds() DataType { return t.runEnds }
+func (t *RunEndEncodedType) Encoded() DataType { return t.values }
+
+func (t *RunEndEncodedType) Fields() []Field {
+ return []Field{
+ {Name: "run_ends", Type: t.runEnds},
+ {Name: "values", Type: t.values, Nullable: t.ValueNullable},
+ }
+}
+
+func (*RunEndEncodedType) ValidRunEndsType(dt DataType) bool {
+ switch dt.ID() {
+ case INT16, INT32, INT64:
+ return true
+ }
+ return false
+}
diff --git a/vendor/github.com/apache/arrow/go/v14/arrow/datatype_extension.go b/vendor/github.com/apache/arrow/go/v14/arrow/datatype_extension.go
new file mode 100644
index 000000000..271c8b0db
--- /dev/null
+++ b/vendor/github.com/apache/arrow/go/v14/arrow/datatype_extension.go
@@ -0,0 +1,173 @@
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements. See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership. The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License. You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package arrow
+
+import (
+ "fmt"
+ "reflect"
+ "sync"
+)
+
+var (
+ // global extension type registry, initially left null to avoid paying
+ // the cost if no extension types are used.
+ // the choice to use a sync.Map here is because it's expected that most
+ // use cases would be to register some number of types at initialization
+ // or otherwise and leave them rather than a pattern of repeatedly registering
+ // and unregistering types. As per the documentation for sync.Map
+ // (https://pkg.go.dev/sync#Map), it is specialized for the case where an entry
+ // is written once but read many times which fits our case here as we register
+ // a type once and then have to read it many times when deserializing messages
+ // with that type.
+ extTypeRegistry *sync.Map
+ // used for initializing the registry once and only once
+ initReg sync.Once
+)
+
+// convenience function to ensure that the type registry is initialized once
+// and only once in a goroutine-safe manner.
+func getExtTypeRegistry() *sync.Map {
+ initReg.Do(func() { extTypeRegistry = &sync.Map{} })
+ return extTypeRegistry
+}
+
+// RegisterExtensionType registers the provided ExtensionType by calling ExtensionName
+// to use as a Key for registrying the type. If a type with the same name is already
+// registered then this will return an error saying so, otherwise it will return nil
+// if successful registering the type.
+// This function is safe to call from multiple goroutines simultaneously.
+func RegisterExtensionType(typ ExtensionType) error {
+ name := typ.ExtensionName()
+ registry := getExtTypeRegistry()
+ if _, existed := registry.LoadOrStore(name, typ); existed {
+ return fmt.Errorf("arrow: type extension with name %s already defined", name)
+ }
+ return nil
+}
+
+// UnregisterExtensionType removes the type with the given name from the registry
+// causing any messages with that type which come in to be expressed with their
+// metadata and underlying type instead of the extension type that isn't known.
+// This function is safe to call from multiple goroutines simultaneously.
+func UnregisterExtensionType(typName string) error {
+ registry := getExtTypeRegistry()
+ if _, loaded := registry.LoadAndDelete(typName); !loaded {
+ return fmt.Errorf("arrow: no type extension with name %s found", typName)
+ }
+ return nil
+}
+
+// GetExtensionType retrieves and returns the extension type of the given name
+// from the global extension type registry. If the type isn't found it will return
+// nil. This function is safe to call from multiple goroutines concurrently.
+func GetExtensionType(typName string) ExtensionType {
+ registry := getExtTypeRegistry()
+ if val, ok := registry.Load(typName); ok {
+ return val.(ExtensionType)
+ }
+ return nil
+}
+
+// ExtensionType is an interface for handling user-defined types. They must be
+// DataTypes and must embed arrow.ExtensionBase in them in order to work properly
+// ensuring that they always have the expected base behavior.
+//
+// The arrow.ExtensionBase that needs to be embedded implements the DataType interface
+// leaving the remaining functions having to be implemented by the actual user-defined
+// type in order to be handled properly.
+type ExtensionType interface {
+ DataType
+ // ArrayType should return the reflect.TypeOf(ExtensionArrayType{}) where the
+ // ExtensionArrayType is a type that implements the array.ExtensionArray interface.
+ // Such a type must also embed the array.ExtensionArrayBase in it. This will be used
+ // when creating arrays of this ExtensionType by using reflect.New
+ ArrayType() reflect.Type
+ // ExtensionName is what will be used when registering / unregistering this extension
+ // type. Multiple user-defined types can be defined with a parameterized ExtensionType
+ // as long as the parameter is used in the ExtensionName to distinguish the instances
+ // in the global Extension Type registry.
+ // The return from this is also what will be placed in the metadata for IPC communication
+ // under the key ARROW:extension:name
+ ExtensionName() string
+ // StorageType returns the underlying storage type which is used by this extension
+ // type. It is already implemented by the ExtensionBase struct and thus does not need
+ // to be re-implemented by a user-defined type.
+ StorageType() DataType
+ // ExtensionEquals is used to tell whether two ExtensionType instances are equal types.
+ ExtensionEquals(ExtensionType) bool
+ // Serialize should produce any extra metadata necessary for initializing an instance of
+ // this user-defined type. Not all user-defined types require this and it is valid to return
+ // nil from this function or an empty slice. This is used for the IPC format and will be
+ // added to metadata for IPC communication under the key ARROW:extension:metadata
+ // This should be implemented such that it is valid to be called by multiple goroutines
+ // concurrently.
+ Serialize() string
+ // Deserialize is called when reading in extension arrays and types via the IPC format
+ // in order to construct an instance of the appropriate extension type. The data passed in
+ // is pulled from the ARROW:extension:metadata key and may be nil or an empty slice.
+ // If the storage type is incorrect or something else is invalid with the data this should
+ // return nil and an appropriate error.
+ Deserialize(storageType DataType, data string) (ExtensionType, error)
+
+ mustEmbedExtensionBase()
+}
+
+// ExtensionBase is the base struct for user-defined Extension Types which must be
+// embedded in any user-defined types like so:
+//
+// type UserDefinedType struct {
+// arrow.ExtensionBase
+// // any other data
+// }
+type ExtensionBase struct {
+ // Storage is the underlying storage type
+ Storage DataType
+}
+
+// ID always returns arrow.EXTENSION and should not be overridden
+func (*ExtensionBase) ID() Type { return EXTENSION }
+
+// Name should always return "extension" and should not be overridden
+func (*ExtensionBase) Name() string { return "extension" }
+
+// String by default will return "extension_type<storage=storage_type>" by can be overridden
+// to customize what is printed out when printing this extension type.
+func (e *ExtensionBase) String() string { return fmt.Sprintf("extension_type<storage=%s>", e.Storage) }
+
+// StorageType returns the underlying storage type and exists so that functions
+// written against the ExtensionType interface can access the storage type.
+func (e *ExtensionBase) StorageType() DataType { return e.Storage }
+
+func (e *ExtensionBase) Fingerprint() string { return typeFingerprint(e) + e.Storage.Fingerprint() }
+
+func (e *ExtensionBase) Fields() []Field {
+ if nested, ok := e.Storage.(NestedType); ok {
+ return nested.Fields()
+ }
+ return nil
+}
+
+func (e *ExtensionBase) Layout() DataTypeLayout { return e.Storage.Layout() }
+
+// this no-op exists to ensure that this type must be embedded in any user-defined extension type.
+//
+//lint:ignore U1000 this function is intentionally unused as it only exists to ensure embedding happens
+func (ExtensionBase) mustEmbedExtensionBase() {}
+
+var (
+ _ DataType = (*ExtensionBase)(nil)
+)
diff --git a/vendor/github.com/apache/arrow/go/v14/arrow/datatype_fixedwidth.go b/vendor/github.com/apache/arrow/go/v14/arrow/datatype_fixedwidth.go
new file mode 100644
index 000000000..fc0b3aea5
--- /dev/null
+++ b/vendor/github.com/apache/arrow/go/v14/arrow/datatype_fixedwidth.go
@@ -0,0 +1,819 @@
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements. See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership. The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License. You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package arrow
+
+import (
+ "fmt"
+ "strconv"
+ "time"
+
+ "github.com/apache/arrow/go/v14/internal/json"
+
+ "golang.org/x/xerrors"
+)
+
+type BooleanType struct{}
+
+func (t *BooleanType) ID() Type { return BOOL }
+func (t *BooleanType) Name() string { return "bool" }
+func (t *BooleanType) String() string { return "bool" }
+func (t *BooleanType) Fingerprint() string { return typeFingerprint(t) }
+func (BooleanType) Bytes() int { return 1 }
+
+// BitWidth returns the number of bits required to store a single element of this data type in memory.
+func (t *BooleanType) BitWidth() int { return 1 }
+
+func (BooleanType) Layout() DataTypeLayout {
+ return DataTypeLayout{Buffers: []BufferSpec{SpecBitmap(), SpecBitmap()}}
+}
+
+type FixedSizeBinaryType struct {
+ ByteWidth int
+}
+
+func (*FixedSizeBinaryType) ID() Type { return FIXED_SIZE_BINARY }
+func (*FixedSizeBinaryType) Name() string { return "fixed_size_binary" }
+func (t *FixedSizeBinaryType) BitWidth() int { return 8 * t.ByteWidth }
+func (t *FixedSizeBinaryType) Bytes() int { return t.ByteWidth }
+func (t *FixedSizeBinaryType) Fingerprint() string { return typeFingerprint(t) }
+func (t *FixedSizeBinaryType) String() string {
+ return "fixed_size_binary[" + strconv.Itoa(t.ByteWidth) + "]"
+}
+func (t *FixedSizeBinaryType) Layout() DataTypeLayout {
+ return DataTypeLayout{Buffers: []BufferSpec{SpecBitmap(), SpecFixedWidth(t.ByteWidth)}}
+}
+
+type (
+ Timestamp int64
+ Time32 int32
+ Time64 int64
+ TimeUnit int
+ Date32 int32
+ Date64 int64
+ Duration int64
+)
+
+// Date32FromTime returns a Date32 value from a time object
+func Date32FromTime(t time.Time) Date32 {
+ if _, offset := t.Zone(); offset != 0 {
+ // properly account for timezone adjustments before we calculate
+ // the number of days by adjusting the time and converting to UTC
+ t = t.Add(time.Duration(offset) * time.Second).UTC()
+ }
+ return Date32(t.Truncate(24*time.Hour).Unix() / int64((time.Hour * 24).Seconds()))
+}
+
+func (d Date32) ToTime() time.Time {
+ return time.Unix(0, 0).UTC().AddDate(0, 0, int(d))
+}
+
+func (d Date32) FormattedString() string {
+ return d.ToTime().Format("2006-01-02")
+}
+
+// Date64FromTime returns a Date64 value from a time object
+func Date64FromTime(t time.Time) Date64 {
+ if _, offset := t.Zone(); offset != 0 {
+ // properly account for timezone adjustments before we calculate
+ // the actual value by adjusting the time and converting to UTC
+ t = t.Add(time.Duration(offset) * time.Second).UTC()
+ }
+ // truncate to the start of the day to get the correct value
+ t = t.Truncate(24 * time.Hour)
+ return Date64(t.Unix()*1e3 + int64(t.Nanosecond())/1e6)
+}
+
+func (d Date64) ToTime() time.Time {
+ days := int(int64(d) / (time.Hour * 24).Milliseconds())
+ return time.Unix(0, 0).UTC().AddDate(0, 0, days)
+}
+
+func (d Date64) FormattedString() string {
+ return d.ToTime().Format("2006-01-02")
+}
+
+// TimestampFromStringInLocation is like TimestampFromString, but treats the time instant
+// as if it were in the provided timezone before converting to UTC for internal representation.
+func TimestampFromStringInLocation(val string, unit TimeUnit, loc *time.Location) (Timestamp, bool, error) {
+ if len(val) < 10 {
+ return 0, false, fmt.Errorf("%w: invalid timestamp string", ErrInvalid)
+ }
+
+ var (
+ format = "2006-01-02"
+ zoneFmt string
+ lenWithoutZone = len(val)
+ )
+
+ if lenWithoutZone > 10 {
+ switch {
+ case val[len(val)-1] == 'Z':
+ zoneFmt = "Z"
+ lenWithoutZone--
+ case val[len(val)-3] == '+' || val[len(val)-3] == '-':
+ zoneFmt = "-07"
+ lenWithoutZone -= 3
+ case val[len(val)-5] == '+' || val[len(val)-5] == '-':
+ zoneFmt = "-0700"
+ lenWithoutZone -= 5
+ case val[len(val)-6] == '+' || val[len(val)-6] == '-':
+ zoneFmt = "-07:00"
+ lenWithoutZone -= 6
+ }
+ }
+
+ switch {
+ case lenWithoutZone == 13:
+ format += string(val[10]) + "15"
+ case lenWithoutZone == 16:
+ format += string(val[10]) + "15:04"
+ case lenWithoutZone >= 19:
+ format += string(val[10]) + "15:04:05.999999999"
+ }
+
+ // error if we're truncating precision
+ // don't need a case for nano as time.Parse will already error if
+ // more than nanosecond precision is provided
+ switch {
+ case unit == Second && lenWithoutZone > 19:
+ return 0, zoneFmt != "", xerrors.New("provided more than second precision for timestamp[s]")
+ case unit == Millisecond && lenWithoutZone > 23:
+ return 0, zoneFmt != "", xerrors.New("provided more than millisecond precision for timestamp[ms]")
+ case unit == Microsecond && lenWithoutZone > 26:
+ return 0, zoneFmt != "", xerrors.New("provided more than microsecond precision for timestamp[us]")
+ }
+
+ format += zoneFmt
+ out, err := time.Parse(format, val)
+ if err != nil {
+ return 0, zoneFmt != "", fmt.Errorf("%w: %s", ErrInvalid, err)
+ }
+ if loc != time.UTC {
+ // convert to UTC by putting the same time instant in the desired location
+ // before converting to UTC
+ out = out.In(loc).UTC()
+ }
+
+ ts, err := TimestampFromTime(out, unit)
+ return ts, zoneFmt != "", err
+}
+
+// TimestampFromString parses a string and returns a timestamp for the given unit
+// level.
+//
+// The timestamp should be in one of the following forms, [T] can be either T
+// or a space, and [.zzzzzzzzz] can be either left out or up to 9 digits of
+// fractions of a second.
+//
+// YYYY-MM-DD
+// YYYY-MM-DD[T]HH
+// YYYY-MM-DD[T]HH:MM
+// YYYY-MM-DD[T]HH:MM:SS[.zzzzzzzz]
+//
+// You can also optionally have an ending Z to indicate UTC or indicate a specific
+// timezone using ±HH, ±HHMM or ±HH:MM at the end of the string.
+func TimestampFromString(val string, unit TimeUnit) (Timestamp, error) {
+ tm, _, err := TimestampFromStringInLocation(val, unit, time.UTC)
+ return tm, err
+}
+
+func (t Timestamp) ToTime(unit TimeUnit) time.Time {
+ switch unit {
+ case Second:
+ return time.Unix(int64(t), 0).UTC()
+ case Millisecond:
+ return time.UnixMilli(int64(t)).UTC()
+ case Microsecond:
+ return time.UnixMicro(int64(t)).UTC()
+ default:
+ return time.Unix(0, int64(t)).UTC()
+ }
+}
+
+// TimestampFromTime allows converting time.Time to Timestamp
+func TimestampFromTime(val time.Time, unit TimeUnit) (Timestamp, error) {
+ switch unit {
+ case Second:
+ return Timestamp(val.Unix()), nil
+ case Millisecond:
+ return Timestamp(val.Unix()*1e3 + int64(val.Nanosecond())/1e6), nil
+ case Microsecond:
+ return Timestamp(val.Unix()*1e6 + int64(val.Nanosecond())/1e3), nil
+ case Nanosecond:
+ return Timestamp(val.UnixNano()), nil
+ default:
+ return 0, fmt.Errorf("%w: unexpected timestamp unit: %s", ErrInvalid, unit)
+ }
+}
+
+// Time32FromString parses a string to return a Time32 value in the given unit,
+// unit needs to be only seconds or milliseconds and the string should be in the
+// form of HH:MM or HH:MM:SS[.zzz] where the fractions of a second are optional.
+func Time32FromString(val string, unit TimeUnit) (Time32, error) {
+ switch unit {
+ case Second:
+ if len(val) > 8 {
+ return 0, xerrors.New("cannot convert larger than second precision to time32s")
+ }
+ case Millisecond:
+ if len(val) > 12 {
+ return 0, xerrors.New("cannot convert larger than millisecond precision to time32ms")
+ }
+ case Microsecond, Nanosecond:
+ return 0, xerrors.New("time32 can only be seconds or milliseconds")
+ }
+
+ var (
+ out time.Time
+ err error
+ )
+ switch {
+ case len(val) == 5:
+ out, err = time.Parse("15:04", val)
+ default:
+ out, err = time.Parse("15:04:05.999", val)
+ }
+ if err != nil {
+ return 0, err
+ }
+ t := out.Sub(time.Date(0, 1, 1, 0, 0, 0, 0, time.UTC))
+ if unit == Second {
+ return Time32(t.Seconds()), nil
+ }
+ return Time32(t.Milliseconds()), nil
+}
+
+func (t Time32) ToTime(unit TimeUnit) time.Time {
+ return time.Unix(0, int64(t)*int64(unit.Multiplier())).UTC()
+}
+
+func (t Time32) FormattedString(unit TimeUnit) string {
+ const baseFmt = "15:04:05"
+ tm := t.ToTime(unit)
+ switch unit {
+ case Second:
+ return tm.Format(baseFmt)
+ case Millisecond:
+ return tm.Format(baseFmt + ".000")
+ }
+ return ""
+}
+
+// Time64FromString parses a string to return a Time64 value in the given unit,
+// unit needs to be only microseconds or nanoseconds and the string should be in the
+// form of HH:MM or HH:MM:SS[.zzzzzzzzz] where the fractions of a second are optional.
+func Time64FromString(val string, unit TimeUnit) (Time64, error) {
+ // don't need to check length for nanoseconds as Parse will already error
+ // if more than 9 digits are provided for the fractional second
+ switch unit {
+ case Microsecond:
+ if len(val) > 15 {
+ return 0, xerrors.New("cannot convert larger than microsecond precision to time64us")
+ }
+ case Second, Millisecond:
+ return 0, xerrors.New("time64 should only be microseconds or nanoseconds")
+ }
+
+ var (
+ out time.Time
+ err error
+ )
+ switch {
+ case len(val) == 5:
+ out, err = time.Parse("15:04", val)
+ default:
+ out, err = time.Parse("15:04:05.999999999", val)
+ }
+ if err != nil {
+ return 0, err
+ }
+ t := out.Sub(time.Date(0, 1, 1, 0, 0, 0, 0, time.UTC))
+ if unit == Microsecond {
+ return Time64(t.Microseconds()), nil
+ }
+ return Time64(t.Nanoseconds()), nil
+}
+
+func (t Time64) ToTime(unit TimeUnit) time.Time {
+ return time.Unix(0, int64(t)*int64(unit.Multiplier())).UTC()
+}
+
+func (t Time64) FormattedString(unit TimeUnit) string {
+ const baseFmt = "15:04:05.000000"
+ tm := t.ToTime(unit)
+ switch unit {
+ case Microsecond:
+ return tm.Format(baseFmt)
+ case Nanosecond:
+ return tm.Format(baseFmt + "000")
+ }
+ return ""
+}
+
+const (
+ Second TimeUnit = iota
+ Millisecond
+ Microsecond
+ Nanosecond
+)
+
+var TimeUnitValues = []TimeUnit{Second, Millisecond, Microsecond, Nanosecond}
+
+// Multiplier returns a time.Duration value to multiply by in order to
+// convert the value into nanoseconds
+func (u TimeUnit) Multiplier() time.Duration {
+ return [...]time.Duration{time.Second, time.Millisecond, time.Microsecond, time.Nanosecond}[uint(u)&3]
+}
+
+func (u TimeUnit) String() string { return [...]string{"s", "ms", "us", "ns"}[uint(u)&3] }
+
+type TemporalWithUnit interface {
+ FixedWidthDataType
+ TimeUnit() TimeUnit
+}
+
+// TimestampType is encoded as a 64-bit signed integer since the UNIX epoch (2017-01-01T00:00:00Z).
+// The zero-value is a second and time zone neutral. Time zone neutral can be
+// considered UTC without having "UTC" as a time zone.
+type TimestampType struct {
+ Unit TimeUnit
+ TimeZone string
+
+ loc *time.Location
+}
+
+func (*TimestampType) ID() Type { return TIMESTAMP }
+func (*TimestampType) Name() string { return "timestamp" }
+func (t *TimestampType) String() string {
+ switch len(t.TimeZone) {
+ case 0:
+ return "timestamp[" + t.Unit.String() + "]"
+ default:
+ return "timestamp[" + t.Unit.String() + ", tz=" + t.TimeZone + "]"
+ }
+}
+
+func (t *TimestampType) Fingerprint() string {
+ return fmt.Sprintf("%s%d:%s", typeFingerprint(t)+string(timeUnitFingerprint(t.Unit)), len(t.TimeZone), t.TimeZone)
+}
+
+// BitWidth returns the number of bits required to store a single element of this data type in memory.
+func (*TimestampType) BitWidth() int { return 64 }
+
+func (*TimestampType) Bytes() int { return Int64SizeBytes }
+
+func (*TimestampType) Layout() DataTypeLayout {
+ return DataTypeLayout{Buffers: []BufferSpec{SpecBitmap(), SpecFixedWidth(TimestampSizeBytes)}}
+}
+
+func (t *TimestampType) TimeUnit() TimeUnit { return t.Unit }
+
+// ClearCachedLocation clears the cached time.Location object in the type.
+// This should be called if you change the value of the TimeZone after having
+// potentially called GetZone.
+func (t *TimestampType) ClearCachedLocation() {
+ t.loc = nil
+}
+
+// GetZone returns a *time.Location that represents the current TimeZone member
+// of the TimestampType. If it is "", "UTC", or "utc", you'll get time.UTC.
+// Otherwise it must either be a valid tzdata string such as "America/New_York"
+// or of the format +HH:MM or -HH:MM indicating an absolute offset.
+//
+// The location object will be cached in the TimestampType for subsequent calls
+// so if you change the value of TimeZone after calling this, make sure to call
+// ClearCachedLocation.
+func (t *TimestampType) GetZone() (*time.Location, error) {
+ if t.loc != nil {
+ return t.loc, nil
+ }
+
+ // the TimeZone string is allowed to be either a valid tzdata string
+ // such as "America/New_York" or an absolute offset of the form -XX:XX
+ // or +XX:XX
+ //
+ // As such we have two methods we can try, first we'll try LoadLocation
+ // and if that fails, we'll test for an absolute offset.
+ if t.TimeZone == "" || t.TimeZone == "UTC" || t.TimeZone == "utc" {
+ t.loc = time.UTC
+ return time.UTC, nil
+ }
+
+ if loc, err := time.LoadLocation(t.TimeZone); err == nil {
+ t.loc = loc
+ return t.loc, err
+ }
+
+ // at this point we know that the timezone isn't empty, and didn't match
+ // anything in the tzdata names. So either it's an absolute offset
+ // or it's invalid.
+ timetz, err := time.Parse("-07:00", t.TimeZone)
+ if err != nil {
+ return time.UTC, fmt.Errorf("could not find timezone location for '%s'", t.TimeZone)
+ }
+
+ _, offset := timetz.Zone()
+ t.loc = time.FixedZone(t.TimeZone, offset)
+ return t.loc, nil
+}
+
+// GetToTimeFunc returns a function for converting an arrow.Timestamp value into a
+// time.Time object with proper TimeZone and precision. If the TimeZone is invalid
+// this will return an error. It calls GetZone to get the timezone for consistency.
+func (t *TimestampType) GetToTimeFunc() (func(Timestamp) time.Time, error) {
+ tz, err := t.GetZone()
+ if err != nil {
+ return nil, err
+ }
+
+ switch t.Unit {
+ case Second:
+ return func(v Timestamp) time.Time { return time.Unix(int64(v), 0).In(tz) }, nil
+ case Millisecond:
+ return func(v Timestamp) time.Time { return time.UnixMilli(int64(v)).In(tz) }, nil
+ case Microsecond:
+ return func(v Timestamp) time.Time { return time.UnixMicro(int64(v)).In(tz) }, nil
+ case Nanosecond:
+ return func(v Timestamp) time.Time { return time.Unix(0, int64(v)).In(tz) }, nil
+ }
+ return nil, fmt.Errorf("invalid timestamp unit: %s", t.Unit)
+}
+
+// Time32Type is encoded as a 32-bit signed integer, representing either seconds or milliseconds since midnight.
+type Time32Type struct {
+ Unit TimeUnit
+}
+
+func (*Time32Type) ID() Type { return TIME32 }
+func (*Time32Type) Name() string { return "time32" }
+func (*Time32Type) BitWidth() int { return 32 }
+func (*Time32Type) Bytes() int { return Int32SizeBytes }
+func (t *Time32Type) String() string { return "time32[" + t.Unit.String() + "]" }
+func (t *Time32Type) Fingerprint() string {
+ return typeFingerprint(t) + string(timeUnitFingerprint(t.Unit))
+}
+
+func (Time32Type) Layout() DataTypeLayout {
+ return DataTypeLayout{Buffers: []BufferSpec{SpecBitmap(), SpecFixedWidth(Time32SizeBytes)}}
+}
+
+func (t *Time32Type) TimeUnit() TimeUnit { return t.Unit }
+
+// Time64Type is encoded as a 64-bit signed integer, representing either microseconds or nanoseconds since midnight.
+type Time64Type struct {
+ Unit TimeUnit
+}
+
+func (*Time64Type) ID() Type { return TIME64 }
+func (*Time64Type) Name() string { return "time64" }
+func (*Time64Type) BitWidth() int { return 64 }
+func (*Time64Type) Bytes() int { return Int64SizeBytes }
+func (t *Time64Type) String() string { return "time64[" + t.Unit.String() + "]" }
+func (t *Time64Type) Fingerprint() string {
+ return typeFingerprint(t) + string(timeUnitFingerprint(t.Unit))
+}
+
+func (Time64Type) Layout() DataTypeLayout {
+ return DataTypeLayout{Buffers: []BufferSpec{SpecBitmap(), SpecFixedWidth(Time64SizeBytes)}}
+}
+
+func (t *Time64Type) TimeUnit() TimeUnit { return t.Unit }
+
+// DurationType is encoded as a 64-bit signed integer, representing an amount
+// of elapsed time without any relation to a calendar artifact.
+type DurationType struct {
+ Unit TimeUnit
+}
+
+func (*DurationType) ID() Type { return DURATION }
+func (*DurationType) Name() string { return "duration" }
+func (*DurationType) BitWidth() int { return 64 }
+func (*DurationType) Bytes() int { return Int64SizeBytes }
+func (t *DurationType) String() string { return "duration[" + t.Unit.String() + "]" }
+func (t *DurationType) Fingerprint() string {
+ return typeFingerprint(t) + string(timeUnitFingerprint(t.Unit))
+}
+
+func (DurationType) Layout() DataTypeLayout {
+ return DataTypeLayout{Buffers: []BufferSpec{SpecBitmap(), SpecFixedWidth(DurationSizeBytes)}}
+}
+
+func (t *DurationType) TimeUnit() TimeUnit { return t.Unit }
+
+// Float16Type represents a floating point value encoded with a 16-bit precision.
+type Float16Type struct{}
+
+func (t *Float16Type) ID() Type { return FLOAT16 }
+func (t *Float16Type) Name() string { return "float16" }
+func (t *Float16Type) String() string { return "float16" }
+func (t *Float16Type) Fingerprint() string { return typeFingerprint(t) }
+
+// BitWidth returns the number of bits required to store a single element of this data type in memory.
+func (t *Float16Type) BitWidth() int { return 16 }
+
+func (Float16Type) Bytes() int { return Float16SizeBytes }
+
+func (Float16Type) Layout() DataTypeLayout {
+ return DataTypeLayout{Buffers: []BufferSpec{SpecBitmap(), SpecFixedWidth(Float16SizeBytes)}}
+}
+
+type DecimalType interface {
+ DataType
+ GetPrecision() int32
+ GetScale() int32
+}
+
+func NewDecimalType(id Type, prec, scale int32) (DecimalType, error) {
+ switch id {
+ case DECIMAL128:
+ return &Decimal128Type{Precision: prec, Scale: scale}, nil
+ case DECIMAL256:
+ return &Decimal256Type{Precision: prec, Scale: scale}, nil
+ default:
+ return nil, fmt.Errorf("%w: must use DECIMAL128 or DECIMAL256 to create a DecimalType", ErrInvalid)
+ }
+}
+
+// Decimal128Type represents a fixed-size 128-bit decimal type.
+type Decimal128Type struct {
+ Precision int32
+ Scale int32
+}
+
+func (*Decimal128Type) ID() Type { return DECIMAL128 }
+func (*Decimal128Type) Name() string { return "decimal" }
+func (*Decimal128Type) BitWidth() int { return 128 }
+func (*Decimal128Type) Bytes() int { return Decimal128SizeBytes }
+func (t *Decimal128Type) String() string {
+ return fmt.Sprintf("%s(%d, %d)", t.Name(), t.Precision, t.Scale)
+}
+func (t *Decimal128Type) Fingerprint() string {
+ return fmt.Sprintf("%s[%d,%d,%d]", typeFingerprint(t), t.BitWidth(), t.Precision, t.Scale)
+}
+func (t *Decimal128Type) GetPrecision() int32 { return t.Precision }
+func (t *Decimal128Type) GetScale() int32 { return t.Scale }
+
+func (Decimal128Type) Layout() DataTypeLayout {
+ return DataTypeLayout{Buffers: []BufferSpec{SpecBitmap(), SpecFixedWidth(Decimal128SizeBytes)}}
+}
+
+// Decimal256Type represents a fixed-size 256-bit decimal type.
+type Decimal256Type struct {
+ Precision int32
+ Scale int32
+}
+
+func (*Decimal256Type) ID() Type { return DECIMAL256 }
+func (*Decimal256Type) Name() string { return "decimal256" }
+func (*Decimal256Type) BitWidth() int { return 256 }
+func (*Decimal256Type) Bytes() int { return Decimal256SizeBytes }
+func (t *Decimal256Type) String() string {
+ return fmt.Sprintf("%s(%d, %d)", t.Name(), t.Precision, t.Scale)
+}
+func (t *Decimal256Type) Fingerprint() string {
+ return fmt.Sprintf("%s[%d,%d,%d]", typeFingerprint(t), t.BitWidth(), t.Precision, t.Scale)
+}
+func (t *Decimal256Type) GetPrecision() int32 { return t.Precision }
+func (t *Decimal256Type) GetScale() int32 { return t.Scale }
+
+func (Decimal256Type) Layout() DataTypeLayout {
+ return DataTypeLayout{Buffers: []BufferSpec{SpecBitmap(), SpecFixedWidth(Decimal256SizeBytes)}}
+}
+
+// MonthInterval represents a number of months.
+type MonthInterval int32
+
+func (m *MonthInterval) UnmarshalJSON(data []byte) error {
+ var val struct {
+ Months int32 `json:"months"`
+ }
+ if err := json.Unmarshal(data, &val); err != nil {
+ return err
+ }
+
+ *m = MonthInterval(val.Months)
+ return nil
+}
+
+func (m MonthInterval) MarshalJSON() ([]byte, error) {
+ return json.Marshal(struct {
+ Months int32 `json:"months"`
+ }{int32(m)})
+}
+
+// MonthIntervalType is encoded as a 32-bit signed integer,
+// representing a number of months.
+type MonthIntervalType struct{}
+
+func (*MonthIntervalType) ID() Type { return INTERVAL_MONTHS }
+func (*MonthIntervalType) Name() string { return "month_interval" }
+func (*MonthIntervalType) String() string { return "month_interval" }
+func (*MonthIntervalType) Fingerprint() string { return typeIDFingerprint(INTERVAL_MONTHS) + "M" }
+
+// BitWidth returns the number of bits required to store a single element of this data type in memory.
+func (t *MonthIntervalType) BitWidth() int { return 32 }
+
+func (MonthIntervalType) Bytes() int { return Int32SizeBytes }
+func (MonthIntervalType) Layout() DataTypeLayout {
+ return DataTypeLayout{Buffers: []BufferSpec{SpecBitmap(), SpecFixedWidth(MonthIntervalSizeBytes)}}
+}
+
+// DayTimeInterval represents a number of days and milliseconds (fraction of day).
+type DayTimeInterval struct {
+ Days int32 `json:"days"`
+ Milliseconds int32 `json:"milliseconds"`
+}
+
+// DayTimeIntervalType is encoded as a pair of 32-bit signed integer,
+// representing a number of days and milliseconds (fraction of day).
+type DayTimeIntervalType struct{}
+
+func (*DayTimeIntervalType) ID() Type { return INTERVAL_DAY_TIME }
+func (*DayTimeIntervalType) Name() string { return "day_time_interval" }
+func (*DayTimeIntervalType) String() string { return "day_time_interval" }
+func (*DayTimeIntervalType) Fingerprint() string { return typeIDFingerprint(INTERVAL_DAY_TIME) + "d" }
+
+// BitWidth returns the number of bits required to store a single element of this data type in memory.
+func (t *DayTimeIntervalType) BitWidth() int { return 64 }
+
+func (DayTimeIntervalType) Bytes() int { return DayTimeIntervalSizeBytes }
+func (DayTimeIntervalType) Layout() DataTypeLayout {
+ return DataTypeLayout{Buffers: []BufferSpec{SpecBitmap(), SpecFixedWidth(DayTimeIntervalSizeBytes)}}
+}
+
+// MonthDayNanoInterval represents a number of months, days and nanoseconds (fraction of day).
+type MonthDayNanoInterval struct {
+ Months int32 `json:"months"`
+ Days int32 `json:"days"`
+ Nanoseconds int64 `json:"nanoseconds"`
+}
+
+// MonthDayNanoIntervalType is encoded as two signed 32-bit integers representing
+// a number of months and a number of days, followed by a 64-bit integer representing
+// the number of nanoseconds since midnight for fractions of a day.
+type MonthDayNanoIntervalType struct{}
+
+func (*MonthDayNanoIntervalType) ID() Type { return INTERVAL_MONTH_DAY_NANO }
+func (*MonthDayNanoIntervalType) Name() string { return "month_day_nano_interval" }
+func (*MonthDayNanoIntervalType) String() string { return "month_day_nano_interval" }
+func (*MonthDayNanoIntervalType) Fingerprint() string {
+ return typeIDFingerprint(INTERVAL_MONTH_DAY_NANO) + "N"
+}
+
+// BitWidth returns the number of bits required to store a single element of this data type in memory.
+func (*MonthDayNanoIntervalType) BitWidth() int { return 128 }
+func (*MonthDayNanoIntervalType) Bytes() int { return MonthDayNanoIntervalSizeBytes }
+func (MonthDayNanoIntervalType) Layout() DataTypeLayout {
+ return DataTypeLayout{Buffers: []BufferSpec{SpecBitmap(), SpecFixedWidth(MonthDayNanoIntervalSizeBytes)}}
+}
+
+type TimestampConvertOp int8
+
+const (
+ ConvDIVIDE = iota
+ ConvMULTIPLY
+)
+
+var timestampConversion = [...][4]struct {
+ op TimestampConvertOp
+ factor int64
+}{
+ Nanosecond: {
+ Nanosecond: {ConvMULTIPLY, int64(time.Nanosecond)},
+ Microsecond: {ConvDIVIDE, int64(time.Microsecond)},
+ Millisecond: {ConvDIVIDE, int64(time.Millisecond)},
+ Second: {ConvDIVIDE, int64(time.Second)},
+ },
+ Microsecond: {
+ Nanosecond: {ConvMULTIPLY, int64(time.Microsecond)},
+ Microsecond: {ConvMULTIPLY, 1},
+ Millisecond: {ConvDIVIDE, int64(time.Millisecond / time.Microsecond)},
+ Second: {ConvDIVIDE, int64(time.Second / time.Microsecond)},
+ },
+ Millisecond: {
+ Nanosecond: {ConvMULTIPLY, int64(time.Millisecond)},
+ Microsecond: {ConvMULTIPLY, int64(time.Millisecond / time.Microsecond)},
+ Millisecond: {ConvMULTIPLY, 1},
+ Second: {ConvDIVIDE, int64(time.Second / time.Millisecond)},
+ },
+ Second: {
+ Nanosecond: {ConvMULTIPLY, int64(time.Second)},
+ Microsecond: {ConvMULTIPLY, int64(time.Second / time.Microsecond)},
+ Millisecond: {ConvMULTIPLY, int64(time.Second / time.Millisecond)},
+ Second: {ConvMULTIPLY, 1},
+ },
+}
+
+func GetTimestampConvert(in, out TimeUnit) (op TimestampConvertOp, factor int64) {
+ conv := timestampConversion[int(in)][int(out)]
+ return conv.op, conv.factor
+}
+
+func ConvertTimestampValue(in, out TimeUnit, value int64) int64 {
+ conv := timestampConversion[int(in)][int(out)]
+ switch conv.op {
+ case ConvMULTIPLY:
+ return value * conv.factor
+ case ConvDIVIDE:
+ return value / conv.factor
+ }
+
+ return 0
+}
+
+// DictionaryType represents categorical or dictionary-encoded in-memory data
+// It contains a dictionary-encoded value type (any type) and an index type
+// (any integer type).
+type DictionaryType struct {
+ IndexType DataType
+ ValueType DataType
+ Ordered bool
+}
+
+func (*DictionaryType) ID() Type { return DICTIONARY }
+func (*DictionaryType) Name() string { return "dictionary" }
+func (d *DictionaryType) BitWidth() int { return d.IndexType.(FixedWidthDataType).BitWidth() }
+func (d *DictionaryType) Bytes() int { return d.IndexType.(FixedWidthDataType).Bytes() }
+func (d *DictionaryType) String() string {
+ return fmt.Sprintf("%s<values=%s, indices=%s, ordered=%t>",
+ d.Name(), d.ValueType, d.IndexType, d.Ordered)
+}
+func (d *DictionaryType) Fingerprint() string {
+ indexFingerprint := d.IndexType.Fingerprint()
+ valueFingerprint := d.ValueType.Fingerprint()
+ ordered := "1"
+ if !d.Ordered {
+ ordered = "0"
+ }
+
+ if len(valueFingerprint) > 0 {
+ return typeFingerprint(d) + indexFingerprint + valueFingerprint + ordered
+ }
+ return ordered
+}
+
+func (d *DictionaryType) Layout() DataTypeLayout {
+ layout := d.IndexType.Layout()
+ layout.HasDict = true
+ return layout
+}
+
+var (
+ FixedWidthTypes = struct {
+ Boolean FixedWidthDataType
+ Date32 FixedWidthDataType
+ Date64 FixedWidthDataType
+ DayTimeInterval FixedWidthDataType
+ Duration_s FixedWidthDataType
+ Duration_ms FixedWidthDataType
+ Duration_us FixedWidthDataType
+ Duration_ns FixedWidthDataType
+ Float16 FixedWidthDataType
+ MonthInterval FixedWidthDataType
+ Time32s FixedWidthDataType
+ Time32ms FixedWidthDataType
+ Time64us FixedWidthDataType
+ Time64ns FixedWidthDataType
+ Timestamp_s FixedWidthDataType
+ Timestamp_ms FixedWidthDataType
+ Timestamp_us FixedWidthDataType
+ Timestamp_ns FixedWidthDataType
+ MonthDayNanoInterval FixedWidthDataType
+ }{
+ Boolean: &BooleanType{},
+ Date32: &Date32Type{},
+ Date64: &Date64Type{},
+ DayTimeInterval: &DayTimeIntervalType{},
+ Duration_s: &DurationType{Unit: Second},
+ Duration_ms: &DurationType{Unit: Millisecond},
+ Duration_us: &DurationType{Unit: Microsecond},
+ Duration_ns: &DurationType{Unit: Nanosecond},
+ Float16: &Float16Type{},
+ MonthInterval: &MonthIntervalType{},
+ Time32s: &Time32Type{Unit: Second},
+ Time32ms: &Time32Type{Unit: Millisecond},
+ Time64us: &Time64Type{Unit: Microsecond},
+ Time64ns: &Time64Type{Unit: Nanosecond},
+ Timestamp_s: &TimestampType{Unit: Second, TimeZone: "UTC"},
+ Timestamp_ms: &TimestampType{Unit: Millisecond, TimeZone: "UTC"},
+ Timestamp_us: &TimestampType{Unit: Microsecond, TimeZone: "UTC"},
+ Timestamp_ns: &TimestampType{Unit: Nanosecond, TimeZone: "UTC"},
+ MonthDayNanoInterval: &MonthDayNanoIntervalType{},
+ }
+
+ _ FixedWidthDataType = (*FixedSizeBinaryType)(nil)
+)
diff --git a/vendor/github.com/apache/arrow/go/v14/arrow/datatype_nested.go b/vendor/github.com/apache/arrow/go/v14/arrow/datatype_nested.go
new file mode 100644
index 000000000..4ae488033
--- /dev/null
+++ b/vendor/github.com/apache/arrow/go/v14/arrow/datatype_nested.go
@@ -0,0 +1,977 @@
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements. See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership. The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License. You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package arrow
+
+import (
+ "errors"
+ "fmt"
+ "strconv"
+ "strings"
+
+ "github.com/apache/arrow/go/v14/arrow/internal/debug"
+)
+
+type (
+ NestedType interface {
+ DataType
+
+ // Fields method provides a copy of NestedType fields
+ // (so it can be safely mutated and will not result in updating the NestedType).
+ Fields() []Field
+ }
+
+ ListLikeType interface {
+ DataType
+ Elem() DataType
+ ElemField() Field
+ }
+
+ VarLenListLikeType interface {
+ ListLikeType
+ }
+)
+
+// ListType describes a nested type in which each array slot contains
+// a variable-size sequence of values, all having the same relative type.
+type ListType struct {
+ elem Field
+}
+
+func ListOfField(f Field) *ListType {
+ if f.Type == nil {
+ panic("arrow: nil type for list field")
+ }
+ return &ListType{elem: f}
+}
+
+// ListOf returns the list type with element type t.
+// For example, if t represents int32, ListOf(t) represents []int32.
+//
+// ListOf panics if t is nil or invalid. NullableElem defaults to true
+func ListOf(t DataType) *ListType {
+ if t == nil {
+ panic("arrow: nil DataType")
+ }
+ return &ListType{elem: Field{Name: "item", Type: t, Nullable: true}}
+}
+
+// ListOfNonNullable is like ListOf but NullableElem defaults to false, indicating
+// that the child type should be marked as non-nullable.
+func ListOfNonNullable(t DataType) *ListType {
+ if t == nil {
+ panic("arrow: nil DataType")
+ }
+ return &ListType{elem: Field{Name: "item", Type: t, Nullable: false}}
+}
+
+func (*ListType) ID() Type { return LIST }
+func (*ListType) Name() string { return "list" }
+
+func (t *ListType) String() string {
+ if t.elem.Nullable {
+ return fmt.Sprintf("list<%s: %s, nullable>", t.elem.Name, t.elem.Type)
+ }
+ return fmt.Sprintf("list<%s: %s>", t.elem.Name, t.elem.Type)
+}
+
+func (t *ListType) Fingerprint() string {
+ child := t.elem.Type.Fingerprint()
+ if len(child) > 0 {
+ return typeFingerprint(t) + "{" + child + "}"
+ }
+ return ""
+}
+
+func (t *ListType) SetElemMetadata(md Metadata) { t.elem.Metadata = md }
+
+func (t *ListType) SetElemNullable(n bool) { t.elem.Nullable = n }
+
+// Elem returns the ListType's element type.
+func (t *ListType) Elem() DataType { return t.elem.Type }
+
+func (t *ListType) ElemField() Field {
+ return t.elem
+}
+
+func (t *ListType) Fields() []Field { return []Field{t.ElemField()} }
+
+func (*ListType) Layout() DataTypeLayout {
+ return DataTypeLayout{Buffers: []BufferSpec{SpecBitmap(), SpecFixedWidth(Int32SizeBytes)}}
+}
+
+func (*ListType) OffsetTypeTraits() OffsetTraits { return Int32Traits }
+
+type LargeListType struct {
+ ListType
+}
+
+func (LargeListType) ID() Type { return LARGE_LIST }
+func (LargeListType) Name() string { return "large_list" }
+func (t *LargeListType) String() string {
+ return "large_" + t.ListType.String()
+}
+
+func (t *LargeListType) Fingerprint() string {
+ child := t.elem.Type.Fingerprint()
+ if len(child) > 0 {
+ return typeFingerprint(t) + "{" + child + "}"
+ }
+ return ""
+}
+
+func (*LargeListType) Layout() DataTypeLayout {
+ return DataTypeLayout{Buffers: []BufferSpec{SpecBitmap(), SpecFixedWidth(Int64SizeBytes)}}
+}
+
+func (*LargeListType) OffsetTypeTraits() OffsetTraits { return Int64Traits }
+
+func LargeListOfField(f Field) *LargeListType {
+ if f.Type == nil {
+ panic("arrow: nil type for list field")
+ }
+ return &LargeListType{ListType{elem: f}}
+}
+
+// LargeListOf returns the list type with element type t.
+// For example, if t represents int32, LargeListOf(t) represents []int32.
+//
+// LargeListOf panics if t is nil or invalid. NullableElem defaults to true
+func LargeListOf(t DataType) *LargeListType {
+ if t == nil {
+ panic("arrow: nil DataType")
+ }
+ return &LargeListType{ListType{elem: Field{Name: "item", Type: t, Nullable: true}}}
+}
+
+// LargeListOfNonNullable is like ListOf but NullableElem defaults to false, indicating
+// that the child type should be marked as non-nullable.
+func LargeListOfNonNullable(t DataType) *LargeListType {
+ if t == nil {
+ panic("arrow: nil DataType")
+ }
+ return &LargeListType{ListType{elem: Field{Name: "item", Type: t, Nullable: false}}}
+}
+
+// FixedSizeListType describes a nested type in which each array slot contains
+// a fixed-size sequence of values, all having the same relative type.
+type FixedSizeListType struct {
+ n int32 // number of elements in the list
+ elem Field
+}
+
+func FixedSizeListOfField(n int32, f Field) *FixedSizeListType {
+ if f.Type == nil {
+ panic("arrow: nil DataType")
+ }
+ if n <= 0 {
+ panic("arrow: invalid size")
+ }
+ return &FixedSizeListType{n: n, elem: f}
+}
+
+// FixedSizeListOf returns the list type with element type t.
+// For example, if t represents int32, FixedSizeListOf(10, t) represents [10]int32.
+//
+// FixedSizeListOf panics if t is nil or invalid.
+// FixedSizeListOf panics if n is <= 0.
+// NullableElem defaults to true
+func FixedSizeListOf(n int32, t DataType) *FixedSizeListType {
+ if t == nil {
+ panic("arrow: nil DataType")
+ }
+ if n <= 0 {
+ panic("arrow: invalid size")
+ }
+ return &FixedSizeListType{n: n, elem: Field{Name: "item", Type: t, Nullable: true}}
+}
+
+// FixedSizeListOfNonNullable is like FixedSizeListOf but NullableElem defaults to false
+// indicating that the child type should be marked as non-nullable.
+func FixedSizeListOfNonNullable(n int32, t DataType) *FixedSizeListType {
+ if t == nil {
+ panic("arrow: nil DataType")
+ }
+ if n <= 0 {
+ panic("arrow: invalid size")
+ }
+ return &FixedSizeListType{n: n, elem: Field{Name: "item", Type: t, Nullable: false}}
+}
+
+func (*FixedSizeListType) ID() Type { return FIXED_SIZE_LIST }
+func (*FixedSizeListType) Name() string { return "fixed_size_list" }
+func (t *FixedSizeListType) String() string {
+ if t.elem.Nullable {
+ return fmt.Sprintf("fixed_size_list<%s: %s, nullable>[%d]", t.elem.Name, t.elem.Type, t.n)
+ }
+ return fmt.Sprintf("fixed_size_list<%s: %s>[%d]", t.elem.Name, t.elem.Type, t.n)
+}
+
+func (t *FixedSizeListType) SetElemNullable(n bool) { t.elem.Nullable = n }
+
+// Elem returns the FixedSizeListType's element type.
+func (t *FixedSizeListType) Elem() DataType { return t.elem.Type }
+
+// Len returns the FixedSizeListType's size.
+func (t *FixedSizeListType) Len() int32 { return t.n }
+
+func (t *FixedSizeListType) ElemField() Field {
+ return t.elem
+}
+
+func (t *FixedSizeListType) Fingerprint() string {
+ child := t.elem.Type.Fingerprint()
+ if len(child) > 0 {
+ return fmt.Sprintf("%s[%d]{%s}", typeFingerprint(t), t.n, child)
+ }
+ return ""
+}
+
+func (t *FixedSizeListType) Fields() []Field { return []Field{t.ElemField()} }
+
+func (*FixedSizeListType) Layout() DataTypeLayout {
+ return DataTypeLayout{Buffers: []BufferSpec{SpecBitmap()}}
+}
+
+type ListViewType struct {
+ elem Field
+}
+
+func ListViewOfField(f Field) *ListViewType {
+ if f.Type == nil {
+ panic("arrow: nil DataType")
+ }
+ return &ListViewType{elem: f}
+}
+
+// ListViewOf returns the list-view type with element type t.
+// For example, if t represents int32, ListViewOf(t) represents []int32.
+//
+// ListViewOf panics if t is nil or invalid. NullableElem defaults to true
+func ListViewOf(t DataType) *ListViewType {
+ if t == nil {
+ panic("arrow: nil DataType")
+ }
+ return &ListViewType{elem: Field{Name: "item", Type: t, Nullable: true}}
+}
+
+// ListViewOfNonNullable is like ListViewOf but NullableElem defaults to false, indicating
+// that the child type should be marked as non-nullable.
+func ListViewOfNonNullable(t DataType) *ListViewType {
+ if t == nil {
+ panic("arrow: nil DataType")
+ }
+ return &ListViewType{elem: Field{Name: "item", Type: t, Nullable: false}}
+}
+
+func (*ListViewType) ID() Type { return LIST_VIEW }
+func (*ListViewType) Name() string { return "list_view" }
+
+func (t *ListViewType) String() string {
+ if t.elem.Nullable {
+ return fmt.Sprintf("list_view<%s: %s, nullable>", t.elem.Name, t.elem.Type)
+ }
+ return fmt.Sprintf("list_view<%s: %s>", t.elem.Name, t.elem.Type)
+}
+
+func (t *ListViewType) Fingerprint() string {
+ child := t.elem.Type.Fingerprint()
+ if len(child) > 0 {
+ return typeFingerprint(t) + "{" + child + "}"
+ }
+ return ""
+}
+
+func (t *ListViewType) SetElemMetadata(md Metadata) { t.elem.Metadata = md }
+
+func (t *ListViewType) SetElemNullable(n bool) { t.elem.Nullable = n }
+
+// Elem returns the ListViewType's element type.
+func (t *ListViewType) Elem() DataType { return t.elem.Type }
+
+func (t *ListViewType) ElemField() Field {
+ return t.elem
+}
+
+func (t *ListViewType) Fields() []Field { return []Field{t.ElemField()} }
+
+func (*ListViewType) Layout() DataTypeLayout {
+ return DataTypeLayout{Buffers: []BufferSpec{SpecBitmap(), SpecFixedWidth(Int32SizeBytes), SpecFixedWidth(Int32SizeBytes)}}
+}
+
+func (*ListViewType) OffsetTypeTraits() OffsetTraits { return Int32Traits }
+
+type LargeListViewType struct {
+ elem Field
+}
+
+func LargeListViewOfField(f Field) *LargeListViewType {
+ if f.Type == nil {
+ panic("arrow: nil DataType")
+ }
+ return &LargeListViewType{elem: f}
+}
+
+// LargeListViewOf returns the list-view type with element type t.
+// For example, if t represents int32, LargeListViewOf(t) represents []int32.
+//
+// LargeListViewOf panics if t is nil or invalid. NullableElem defaults to true
+func LargeListViewOf(t DataType) *LargeListViewType {
+ if t == nil {
+ panic("arrow: nil DataType")
+ }
+ return &LargeListViewType{elem: Field{Name: "item", Type: t, Nullable: true}}
+}
+
+// LargeListViewOfNonNullable is like LargeListViewOf but NullableElem defaults
+// to false, indicating that the child type should be marked as non-nullable.
+func LargeListViewOfNonNullable(t DataType) *LargeListViewType {
+ if t == nil {
+ panic("arrow: nil DataType")
+ }
+ return &LargeListViewType{elem: Field{Name: "item", Type: t, Nullable: false}}
+}
+
+func (*LargeListViewType) ID() Type { return LARGE_LIST_VIEW }
+func (*LargeListViewType) Name() string { return "large_list_view" }
+
+func (t *LargeListViewType) String() string {
+ if t.elem.Nullable {
+ return fmt.Sprintf("large_list_view<%s: %s, nullable>", t.elem.Name, t.elem.Type)
+ }
+ return fmt.Sprintf("large_list_view<%s: %s>", t.elem.Name, t.elem.Type)
+}
+
+func (t *LargeListViewType) Fingerprint() string {
+ child := t.elem.Type.Fingerprint()
+ if len(child) > 0 {
+ return typeFingerprint(t) + "{" + child + "}"
+ }
+ return ""
+}
+
+func (t *LargeListViewType) SetElemMetadata(md Metadata) { t.elem.Metadata = md }
+
+func (t *LargeListViewType) SetElemNullable(n bool) { t.elem.Nullable = n }
+
+// Elem returns the LargeListViewType's element type.
+func (t *LargeListViewType) Elem() DataType { return t.elem.Type }
+
+func (t *LargeListViewType) ElemField() Field {
+ return t.elem
+}
+
+func (t *LargeListViewType) Fields() []Field { return []Field{t.ElemField()} }
+
+func (*LargeListViewType) Layout() DataTypeLayout {
+ return DataTypeLayout{Buffers: []BufferSpec{SpecBitmap(), SpecFixedWidth(Int64SizeBytes), SpecFixedWidth(Int64SizeBytes)}}
+}
+
+func (*LargeListViewType) OffsetTypeTraits() OffsetTraits { return Int64Traits }
+
+// StructType describes a nested type parameterized by an ordered sequence
+// of relative types, called its fields.
+type StructType struct {
+ fields []Field
+ index map[string][]int
+ meta Metadata
+}
+
+// StructOf returns the struct type with fields fs.
+//
+// StructOf panics if there is a field with an invalid DataType.
+func StructOf(fs ...Field) *StructType {
+ n := len(fs)
+ if n == 0 {
+ return &StructType{}
+ }
+
+ t := &StructType{
+ fields: make([]Field, n),
+ index: make(map[string][]int, n),
+ }
+ for i, f := range fs {
+ if f.Type == nil {
+ panic("arrow: field with nil DataType")
+ }
+ t.fields[i] = Field{
+ Name: f.Name,
+ Type: f.Type,
+ Nullable: f.Nullable,
+ Metadata: f.Metadata.clone(),
+ }
+ if indices, exists := t.index[f.Name]; exists {
+ t.index[f.Name] = append(indices, i)
+ } else {
+ t.index[f.Name] = []int{i}
+ }
+ }
+
+ return t
+}
+
+func (*StructType) ID() Type { return STRUCT }
+func (*StructType) Name() string { return "struct" }
+
+func (t *StructType) String() string {
+ var o strings.Builder
+ o.WriteString("struct<")
+ for i, f := range t.fields {
+ if i > 0 {
+ o.WriteString(", ")
+ }
+ o.WriteString(fmt.Sprintf("%s: %v", f.Name, f.Type))
+ }
+ o.WriteString(">")
+ return o.String()
+}
+
+// Fields method provides a copy of StructType fields
+// (so it can be safely mutated and will not result in updating the StructType).
+func (t *StructType) Fields() []Field {
+ fields := make([]Field, len(t.fields))
+ copy(fields, t.fields)
+ return fields
+}
+
+func (t *StructType) Field(i int) Field { return t.fields[i] }
+
+// FieldByName gets the field with the given name.
+//
+// If there are multiple fields with the given name, FieldByName
+// returns the first such field.
+func (t *StructType) FieldByName(name string) (Field, bool) {
+ i, ok := t.index[name]
+ if !ok {
+ return Field{}, false
+ }
+ return t.fields[i[0]], true
+}
+
+// FieldIdx gets the index of the field with the given name.
+//
+// If there are multiple fields with the given name, FieldIdx returns
+// the index of the first first such field.
+func (t *StructType) FieldIdx(name string) (int, bool) {
+ i, ok := t.index[name]
+ if ok {
+ return i[0], true
+ }
+ return -1, false
+}
+
+// FieldsByName returns all fields with the given name.
+func (t *StructType) FieldsByName(n string) ([]Field, bool) {
+ indices, ok := t.index[n]
+ if !ok {
+ return nil, ok
+ }
+ fields := make([]Field, 0, len(indices))
+ for _, v := range indices {
+ fields = append(fields, t.fields[v])
+ }
+ return fields, ok
+}
+
+// FieldIndices returns indices of all fields with the given name, or nil.
+func (t *StructType) FieldIndices(name string) []int {
+ return t.index[name]
+}
+
+func (t *StructType) Fingerprint() string {
+ var b strings.Builder
+ b.WriteString(typeFingerprint(t))
+ b.WriteByte('{')
+ for _, c := range t.fields {
+ child := c.Fingerprint()
+ if len(child) == 0 {
+ return ""
+ }
+ b.WriteString(child)
+ b.WriteByte(';')
+ }
+ b.WriteByte('}')
+ return b.String()
+}
+
+func (*StructType) Layout() DataTypeLayout {
+ return DataTypeLayout{Buffers: []BufferSpec{SpecBitmap()}}
+}
+
+type MapType struct {
+ value *ListType
+ KeysSorted bool
+}
+
+func MapOf(key, item DataType) *MapType {
+ if key == nil || item == nil {
+ panic("arrow: nil key or item type for MapType")
+ }
+
+ return &MapType{value: ListOf(StructOf(Field{Name: "key", Type: key}, Field{Name: "value", Type: item, Nullable: true}))}
+}
+
+func MapOfWithMetadata(key DataType, keyMetadata Metadata, item DataType, itemMetadata Metadata) *MapType {
+ if key == nil || item == nil {
+ panic("arrow: nil key or item type for MapType")
+ }
+
+ return &MapType{value: ListOf(StructOf(Field{
+ Name: "key",
+ Type: key,
+ Metadata: keyMetadata,
+ }, Field{
+ Name: "value",
+ Type: item,
+ Nullable: true,
+ Metadata: itemMetadata,
+ }))}
+}
+
+func (*MapType) ID() Type { return MAP }
+func (*MapType) Name() string { return "map" }
+
+func (t *MapType) String() string {
+ var o strings.Builder
+ o.WriteString(fmt.Sprintf("map<%s, %s",
+ t.value.Elem().(*StructType).Field(0).Type,
+ t.value.Elem().(*StructType).Field(1).Type))
+ if t.KeysSorted {
+ o.WriteString(", keys_sorted")
+ }
+ if t.ItemField().Nullable {
+ o.WriteString(", items_nullable")
+ } else {
+ o.WriteString(", items_non_nullable")
+ }
+ o.WriteString(">")
+ return o.String()
+}
+
+func (t *MapType) KeyField() Field { return t.value.Elem().(*StructType).Field(0) }
+func (t *MapType) KeyType() DataType { return t.KeyField().Type }
+func (t *MapType) ItemField() Field { return t.value.Elem().(*StructType).Field(1) }
+func (t *MapType) ItemType() DataType { return t.ItemField().Type }
+
+// Deprecated: use MapType.Elem().(*StructType) instead
+func (t *MapType) ValueType() *StructType { return t.Elem().(*StructType) }
+
+// Deprecated: use MapType.ElemField() instead
+func (t *MapType) ValueField() Field { return t.ElemField() }
+
+// Elem returns the MapType's element type (if treating MapType as ListLikeType)
+func (t *MapType) Elem() DataType { return t.value.Elem() }
+
+// ElemField returns the MapType's element field (if treating MapType as ListLikeType)
+func (t *MapType) ElemField() Field { return Field{Name: "entries", Type: t.Elem()} }
+
+func (t *MapType) SetItemNullable(nullable bool) {
+ t.value.Elem().(*StructType).fields[1].Nullable = nullable
+}
+
+func (t *MapType) Fingerprint() string {
+ keyFingerprint := t.KeyType().Fingerprint()
+ itemFingerprint := t.ItemType().Fingerprint()
+ if keyFingerprint == "" || itemFingerprint == "" {
+ return ""
+ }
+
+ fingerprint := typeFingerprint(t)
+ if t.KeysSorted {
+ fingerprint += "s"
+ }
+ return fingerprint + "{" + keyFingerprint + itemFingerprint + "}"
+}
+
+func (t *MapType) Fields() []Field { return []Field{t.ElemField()} }
+
+func (t *MapType) Layout() DataTypeLayout {
+ return t.value.Layout()
+}
+
+func (*MapType) OffsetTypeTraits() OffsetTraits { return Int32Traits }
+
+type (
+ // UnionTypeCode is an alias to int8 which is the type of the ids
+ // used for union arrays.
+ UnionTypeCode = int8
+ UnionMode int8
+)
+
+const (
+ MaxUnionTypeCode UnionTypeCode = 127
+ InvalidUnionChildID int = -1
+
+ SparseMode UnionMode = iota // SPARSE
+ DenseMode // DENSE
+)
+
+// UnionType is an interface to encompass both Dense and Sparse Union types.
+//
+// A UnionType is a nested type where each logical value is taken
+// from a single child. A buffer of 8-bit type ids (typed as UnionTypeCode)
+// indicates which child a given logical value is to be taken from. This is
+// represented as the "child id" or "child index", which is the index into the
+// list of child fields for a given child.
+type UnionType interface {
+ NestedType
+ // Mode returns either SparseMode or DenseMode depending on the current
+ // concrete data type.
+ Mode() UnionMode
+ // ChildIDs returns a slice of ints to map UnionTypeCode values to
+ // the index in the Fields that represents the given Type. It is
+ // initialized with all values being InvalidUnionChildID (-1)
+ // before being populated based on the TypeCodes and fields of the type.
+ // The field for a given type can be retrieved by Fields()[ChildIDs()[typeCode]]
+ ChildIDs() []int
+ // TypeCodes returns the list of available type codes for this union type
+ // which will correspond to indexes into the ChildIDs slice to locate the
+ // appropriate child. A union Array contains a buffer of these type codes
+ // which indicate for a given index, which child has the value for that index.
+ TypeCodes() []UnionTypeCode
+ // MaxTypeCode returns the value of the largest TypeCode in the list of typecodes
+ // that are defined by this Union type
+ MaxTypeCode() UnionTypeCode
+}
+
+// UnionOf returns an appropriate union type for the given Mode (Sparse or Dense),
+// child fields, and type codes. len(fields) == len(typeCodes) must be true, or else
+// this will panic. len(fields) can be 0.
+func UnionOf(mode UnionMode, fields []Field, typeCodes []UnionTypeCode) UnionType {
+ switch mode {
+ case SparseMode:
+ return SparseUnionOf(fields, typeCodes)
+ case DenseMode:
+ return DenseUnionOf(fields, typeCodes)
+ default:
+ panic("arrow: invalid union mode")
+ }
+}
+
+type unionType struct {
+ children []Field
+ typeCodes []UnionTypeCode
+ childIDs [int(MaxUnionTypeCode) + 1]int
+}
+
+func (t *unionType) init(fields []Field, typeCodes []UnionTypeCode) {
+ // initialize all child IDs to -1
+ t.childIDs[0] = InvalidUnionChildID
+ for i := 1; i < len(t.childIDs); i *= 2 {
+ copy(t.childIDs[i:], t.childIDs[:i])
+ }
+
+ t.children = fields
+ t.typeCodes = typeCodes
+
+ for i, tc := range t.typeCodes {
+ t.childIDs[tc] = i
+ }
+}
+
+// Fields method provides a copy of union type fields
+// (so it can be safely mutated and will not result in updating the union type).
+func (t *unionType) Fields() []Field {
+ fields := make([]Field, len(t.children))
+ copy(fields, t.children)
+ return fields
+}
+
+func (t *unionType) TypeCodes() []UnionTypeCode { return t.typeCodes }
+func (t *unionType) ChildIDs() []int { return t.childIDs[:] }
+
+func (t *unionType) validate(fields []Field, typeCodes []UnionTypeCode, _ UnionMode) error {
+ if len(fields) != len(typeCodes) {
+ return errors.New("arrow: union types should have the same number of fields as type codes")
+ }
+
+ for _, c := range typeCodes {
+ if c < 0 || c > MaxUnionTypeCode {
+ return errors.New("arrow: union type code out of bounds")
+ }
+ }
+ return nil
+}
+
+func (t *unionType) MaxTypeCode() (max UnionTypeCode) {
+ if len(t.typeCodes) == 0 {
+ return
+ }
+
+ max = t.typeCodes[0]
+ for _, c := range t.typeCodes[1:] {
+ if c > max {
+ max = c
+ }
+ }
+ return
+}
+
+func (t *unionType) String() string {
+ var b strings.Builder
+ b.WriteByte('<')
+ for i := range t.typeCodes {
+ if i != 0 {
+ b.WriteString(", ")
+ }
+ fmt.Fprintf(&b, "%s=%d", t.children[i], t.typeCodes[i])
+ }
+ b.WriteByte('>')
+ return b.String()
+}
+
+func (t *unionType) fingerprint() string {
+ var b strings.Builder
+ for _, c := range t.typeCodes {
+ fmt.Fprintf(&b, ":%d", c)
+ }
+ b.WriteString("]{")
+ for _, c := range t.children {
+ fingerprint := c.Fingerprint()
+ if len(fingerprint) == 0 {
+ return ""
+ }
+ b.WriteString(fingerprint)
+ b.WriteByte(';')
+ }
+ b.WriteByte('}')
+ return b.String()
+}
+
+func fieldsFromArrays(arrays []Array, names ...string) (ret []Field) {
+ ret = make([]Field, len(arrays))
+ if len(names) == 0 {
+ for i, c := range arrays {
+ ret[i] = Field{Name: strconv.Itoa(i), Type: c.DataType(), Nullable: true}
+ }
+ } else {
+ debug.Assert(len(names) == len(arrays), "mismatch of arrays and names")
+ for i, c := range arrays {
+ ret[i] = Field{Name: names[i], Type: c.DataType(), Nullable: true}
+ }
+ }
+ return
+}
+
+// SparseUnionType is the concrete type for Sparse union data.
+//
+// A sparse union is a nested type where each logical value is taken
+// from a single child. A buffer of 8-bit type ids indicates which child
+// a given logical value is to be taken from.
+//
+// In a sparse union, each child array will have the same length as the
+// union array itself, regardless of the actual number of union values which
+// refer to it.
+//
+// Unlike most other types, unions do not have a top-level validity bitmap.
+type SparseUnionType struct {
+ unionType
+}
+
+// SparseUnionFromArrays enables creating a union type from a list of Arrays,
+// field names, and type codes. len(fields) should be either 0 or equal to len(children).
+// len(codes) should also be either 0, or equal to len(children).
+//
+// If len(fields) == 0, then the fields will be named numerically as "0", "1", "2"...
+// and so on. If len(codes) == 0, then the type codes will be constructed as
+// [0, 1, 2, ..., n].
+func SparseUnionFromArrays(children []Array, fields []string, codes []UnionTypeCode) *SparseUnionType {
+ if len(codes) == 0 {
+ codes = make([]UnionTypeCode, len(children))
+ for i := range children {
+ codes[i] = UnionTypeCode(i)
+ }
+ }
+ return SparseUnionOf(fieldsFromArrays(children, fields...), codes)
+}
+
+// SparseUnionOf is equivalent to UnionOf(arrow.SparseMode, fields, typeCodes),
+// constructing a SparseUnionType from a list of fields and type codes.
+//
+// If len(fields) != len(typeCodes) this will panic. They are allowed to be
+// of length 0.
+func SparseUnionOf(fields []Field, typeCodes []UnionTypeCode) *SparseUnionType {
+ ret := &SparseUnionType{}
+ if err := ret.validate(fields, typeCodes, ret.Mode()); err != nil {
+ panic(err)
+ }
+ ret.init(fields, typeCodes)
+ return ret
+}
+
+func (SparseUnionType) ID() Type { return SPARSE_UNION }
+func (SparseUnionType) Name() string { return "sparse_union" }
+func (SparseUnionType) Mode() UnionMode { return SparseMode }
+func (t *SparseUnionType) Fingerprint() string {
+ return typeFingerprint(t) + "[s" + t.fingerprint()
+}
+func (SparseUnionType) Layout() DataTypeLayout {
+ return DataTypeLayout{Buffers: []BufferSpec{SpecFixedWidth(Uint8SizeBytes)}}
+}
+func (t *SparseUnionType) String() string {
+ return t.Name() + t.unionType.String()
+}
+
+// DenseUnionType is the concrete type for dense union data.
+//
+// A dense union is a nested type where each logical value is taken from a
+// single child, at a specific offset. A buffer of 8-bit type ids (typed
+// as UnionTypeCode) indicates which child a given logical value is to be
+// taken from and a buffer of 32-bit offsets indicating which physical position
+// in the given child array has the logical value for that index.
+//
+// Unlike a sparse union, a dense union allows encoding only the child values
+// which are actually referred to by the union array. This is counterbalanced
+// by the additional footprint of the offsets buffer, and the additional
+// indirection cost when looking up values.
+//
+// Unlike most other types, unions don't have a top-level validity bitmap
+type DenseUnionType struct {
+ unionType
+}
+
+// DenseUnionFromArrays enables creating a union type from a list of Arrays,
+// field names, and type codes. len(fields) should be either 0 or equal to len(children).
+// len(codes) should also be either 0, or equal to len(children).
+//
+// If len(fields) == 0, then the fields will be named numerically as "0", "1", "2"...
+// and so on. If len(codes) == 0, then the type codes will be constructed as
+// [0, 1, 2, ..., n].
+func DenseUnionFromArrays(children []Array, fields []string, codes []UnionTypeCode) *DenseUnionType {
+ if len(codes) == 0 {
+ codes = make([]UnionTypeCode, len(children))
+ for i := range children {
+ codes[i] = UnionTypeCode(i)
+ }
+ }
+ return DenseUnionOf(fieldsFromArrays(children, fields...), codes)
+}
+
+// DenseUnionOf is equivalent to UnionOf(arrow.DenseMode, fields, typeCodes),
+// constructing a SparseUnionType from a list of fields and type codes.
+//
+// If len(fields) != len(typeCodes) this will panic. They are allowed to be
+// of length 0.
+func DenseUnionOf(fields []Field, typeCodes []UnionTypeCode) *DenseUnionType {
+ ret := &DenseUnionType{}
+ if err := ret.validate(fields, typeCodes, ret.Mode()); err != nil {
+ panic(err)
+ }
+ ret.init(fields, typeCodes)
+ return ret
+}
+
+func (DenseUnionType) ID() Type { return DENSE_UNION }
+func (DenseUnionType) Name() string { return "dense_union" }
+func (DenseUnionType) Mode() UnionMode { return DenseMode }
+func (t *DenseUnionType) Fingerprint() string {
+ return typeFingerprint(t) + "[s" + t.fingerprint()
+}
+
+func (DenseUnionType) Layout() DataTypeLayout {
+ return DataTypeLayout{Buffers: []BufferSpec{SpecFixedWidth(Uint8SizeBytes), SpecFixedWidth(Int32SizeBytes)}}
+}
+
+func (DenseUnionType) OffsetTypeTraits() OffsetTraits { return Int32Traits }
+
+func (t *DenseUnionType) String() string {
+ return t.Name() + t.unionType.String()
+}
+
+type Field struct {
+ Name string // Field name
+ Type DataType // The field's data type
+ Nullable bool // Fields can be nullable
+ Metadata Metadata // The field's metadata, if any
+}
+
+func (f Field) Fingerprint() string {
+ typeFingerprint := f.Type.Fingerprint()
+ if typeFingerprint == "" {
+ return ""
+ }
+
+ var b strings.Builder
+ b.WriteByte('F')
+ if f.Nullable {
+ b.WriteByte('n')
+ } else {
+ b.WriteByte('N')
+ }
+ b.WriteString(f.Name)
+ b.WriteByte('{')
+ b.WriteString(typeFingerprint)
+ b.WriteByte('}')
+ return b.String()
+}
+
+func (f Field) HasMetadata() bool { return f.Metadata.Len() != 0 }
+
+func (f Field) Equal(o Field) bool {
+ switch {
+ case f.Name != o.Name:
+ return false
+ case f.Nullable != o.Nullable:
+ return false
+ case !TypeEqual(f.Type, o.Type, CheckMetadata()):
+ return false
+ case !f.Metadata.Equal(o.Metadata):
+ return false
+ }
+ return true
+}
+
+func (f Field) String() string {
+ var o strings.Builder
+ nullable := ""
+ if f.Nullable {
+ nullable = ", nullable"
+ }
+ fmt.Fprintf(&o, "%s: type=%v%v", f.Name, f.Type, nullable)
+ if f.HasMetadata() {
+ fmt.Fprintf(&o, "\n%*.smetadata: %v", len(f.Name)+2, "", f.Metadata)
+ }
+ return o.String()
+}
+
+var (
+ _ DataType = (*ListType)(nil)
+ _ DataType = (*LargeListType)(nil)
+ _ DataType = (*FixedSizeListType)(nil)
+ _ DataType = (*StructType)(nil)
+ _ DataType = (*MapType)(nil)
+ _ DataType = (*DenseUnionType)(nil)
+ _ DataType = (*SparseUnionType)(nil)
+
+ _ NestedType = (*ListType)(nil)
+ _ NestedType = (*LargeListType)(nil)
+ _ NestedType = (*FixedSizeListType)(nil)
+ _ NestedType = (*MapType)(nil)
+ _ NestedType = (*DenseUnionType)(nil)
+ _ NestedType = (*SparseUnionType)(nil)
+
+ _ ListLikeType = (*ListType)(nil)
+ _ ListLikeType = (*LargeListType)(nil)
+ _ ListLikeType = (*FixedSizeListType)(nil)
+ _ ListLikeType = (*MapType)(nil)
+
+ _ VarLenListLikeType = (*ListType)(nil)
+ _ VarLenListLikeType = (*LargeListType)(nil)
+ _ VarLenListLikeType = (*ListViewType)(nil)
+ _ VarLenListLikeType = (*LargeListViewType)(nil)
+ _ VarLenListLikeType = (*FixedSizeListType)(nil)
+ _ VarLenListLikeType = (*MapType)(nil)
+)
diff --git a/vendor/github.com/apache/arrow/go/v14/arrow/datatype_null.go b/vendor/github.com/apache/arrow/go/v14/arrow/datatype_null.go
new file mode 100644
index 000000000..2d2454c65
--- /dev/null
+++ b/vendor/github.com/apache/arrow/go/v14/arrow/datatype_null.go
@@ -0,0 +1,33 @@
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements. See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership. The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License. You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package arrow
+
+// NullType describes a degenerate array, with zero physical storage.
+type NullType struct{}
+
+func (*NullType) ID() Type { return NULL }
+func (*NullType) Name() string { return "null" }
+func (*NullType) String() string { return "null" }
+func (*NullType) Fingerprint() string { return typeIDFingerprint(NULL) }
+func (*NullType) Layout() DataTypeLayout {
+ return DataTypeLayout{Buffers: []BufferSpec{SpecAlwaysNull()}}
+}
+
+var (
+ Null *NullType
+ _ DataType = Null
+)
diff --git a/vendor/github.com/apache/arrow/go/v14/arrow/datatype_numeric.gen.go b/vendor/github.com/apache/arrow/go/v14/arrow/datatype_numeric.gen.go
new file mode 100644
index 000000000..62cbd9001
--- /dev/null
+++ b/vendor/github.com/apache/arrow/go/v14/arrow/datatype_numeric.gen.go
@@ -0,0 +1,206 @@
+// Code generated by datatype_numeric.gen.go.tmpl. DO NOT EDIT.
+
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements. See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership. The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License. You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package arrow
+
+type Int8Type struct{}
+
+func (t *Int8Type) ID() Type { return INT8 }
+func (t *Int8Type) Name() string { return "int8" }
+func (t *Int8Type) String() string { return "int8" }
+func (t *Int8Type) BitWidth() int { return 8 }
+func (t *Int8Type) Bytes() int { return Int8SizeBytes }
+func (t *Int8Type) Fingerprint() string { return typeFingerprint(t) }
+func (t *Int8Type) Layout() DataTypeLayout {
+ return DataTypeLayout{Buffers: []BufferSpec{
+ SpecBitmap(), SpecFixedWidth(Int8SizeBytes)}}
+}
+
+type Int16Type struct{}
+
+func (t *Int16Type) ID() Type { return INT16 }
+func (t *Int16Type) Name() string { return "int16" }
+func (t *Int16Type) String() string { return "int16" }
+func (t *Int16Type) BitWidth() int { return 16 }
+func (t *Int16Type) Bytes() int { return Int16SizeBytes }
+func (t *Int16Type) Fingerprint() string { return typeFingerprint(t) }
+func (t *Int16Type) Layout() DataTypeLayout {
+ return DataTypeLayout{Buffers: []BufferSpec{
+ SpecBitmap(), SpecFixedWidth(Int16SizeBytes)}}
+}
+
+type Int32Type struct{}
+
+func (t *Int32Type) ID() Type { return INT32 }
+func (t *Int32Type) Name() string { return "int32" }
+func (t *Int32Type) String() string { return "int32" }
+func (t *Int32Type) BitWidth() int { return 32 }
+func (t *Int32Type) Bytes() int { return Int32SizeBytes }
+func (t *Int32Type) Fingerprint() string { return typeFingerprint(t) }
+func (t *Int32Type) Layout() DataTypeLayout {
+ return DataTypeLayout{Buffers: []BufferSpec{
+ SpecBitmap(), SpecFixedWidth(Int32SizeBytes)}}
+}
+
+type Int64Type struct{}
+
+func (t *Int64Type) ID() Type { return INT64 }
+func (t *Int64Type) Name() string { return "int64" }
+func (t *Int64Type) String() string { return "int64" }
+func (t *Int64Type) BitWidth() int { return 64 }
+func (t *Int64Type) Bytes() int { return Int64SizeBytes }
+func (t *Int64Type) Fingerprint() string { return typeFingerprint(t) }
+func (t *Int64Type) Layout() DataTypeLayout {
+ return DataTypeLayout{Buffers: []BufferSpec{
+ SpecBitmap(), SpecFixedWidth(Int64SizeBytes)}}
+}
+
+type Uint8Type struct{}
+
+func (t *Uint8Type) ID() Type { return UINT8 }
+func (t *Uint8Type) Name() string { return "uint8" }
+func (t *Uint8Type) String() string { return "uint8" }
+func (t *Uint8Type) BitWidth() int { return 8 }
+func (t *Uint8Type) Bytes() int { return Uint8SizeBytes }
+func (t *Uint8Type) Fingerprint() string { return typeFingerprint(t) }
+func (t *Uint8Type) Layout() DataTypeLayout {
+ return DataTypeLayout{Buffers: []BufferSpec{
+ SpecBitmap(), SpecFixedWidth(Uint8SizeBytes)}}
+}
+
+type Uint16Type struct{}
+
+func (t *Uint16Type) ID() Type { return UINT16 }
+func (t *Uint16Type) Name() string { return "uint16" }
+func (t *Uint16Type) String() string { return "uint16" }
+func (t *Uint16Type) BitWidth() int { return 16 }
+func (t *Uint16Type) Bytes() int { return Uint16SizeBytes }
+func (t *Uint16Type) Fingerprint() string { return typeFingerprint(t) }
+func (t *Uint16Type) Layout() DataTypeLayout {
+ return DataTypeLayout{Buffers: []BufferSpec{
+ SpecBitmap(), SpecFixedWidth(Uint16SizeBytes)}}
+}
+
+type Uint32Type struct{}
+
+func (t *Uint32Type) ID() Type { return UINT32 }
+func (t *Uint32Type) Name() string { return "uint32" }
+func (t *Uint32Type) String() string { return "uint32" }
+func (t *Uint32Type) BitWidth() int { return 32 }
+func (t *Uint32Type) Bytes() int { return Uint32SizeBytes }
+func (t *Uint32Type) Fingerprint() string { return typeFingerprint(t) }
+func (t *Uint32Type) Layout() DataTypeLayout {
+ return DataTypeLayout{Buffers: []BufferSpec{
+ SpecBitmap(), SpecFixedWidth(Uint32SizeBytes)}}
+}
+
+type Uint64Type struct{}
+
+func (t *Uint64Type) ID() Type { return UINT64 }
+func (t *Uint64Type) Name() string { return "uint64" }
+func (t *Uint64Type) String() string { return "uint64" }
+func (t *Uint64Type) BitWidth() int { return 64 }
+func (t *Uint64Type) Bytes() int { return Uint64SizeBytes }
+func (t *Uint64Type) Fingerprint() string { return typeFingerprint(t) }
+func (t *Uint64Type) Layout() DataTypeLayout {
+ return DataTypeLayout{Buffers: []BufferSpec{
+ SpecBitmap(), SpecFixedWidth(Uint64SizeBytes)}}
+}
+
+type Float32Type struct{}
+
+func (t *Float32Type) ID() Type { return FLOAT32 }
+func (t *Float32Type) Name() string { return "float32" }
+func (t *Float32Type) String() string { return "float32" }
+func (t *Float32Type) BitWidth() int { return 32 }
+func (t *Float32Type) Bytes() int { return Float32SizeBytes }
+func (t *Float32Type) Fingerprint() string { return typeFingerprint(t) }
+func (t *Float32Type) Layout() DataTypeLayout {
+ return DataTypeLayout{Buffers: []BufferSpec{
+ SpecBitmap(), SpecFixedWidth(Float32SizeBytes)}}
+}
+
+type Float64Type struct{}
+
+func (t *Float64Type) ID() Type { return FLOAT64 }
+func (t *Float64Type) Name() string { return "float64" }
+func (t *Float64Type) String() string { return "float64" }
+func (t *Float64Type) BitWidth() int { return 64 }
+func (t *Float64Type) Bytes() int { return Float64SizeBytes }
+func (t *Float64Type) Fingerprint() string { return typeFingerprint(t) }
+func (t *Float64Type) Layout() DataTypeLayout {
+ return DataTypeLayout{Buffers: []BufferSpec{
+ SpecBitmap(), SpecFixedWidth(Float64SizeBytes)}}
+}
+
+type Date32Type struct{}
+
+func (t *Date32Type) ID() Type { return DATE32 }
+func (t *Date32Type) Name() string { return "date32" }
+func (t *Date32Type) String() string { return "date32" }
+func (t *Date32Type) BitWidth() int { return 32 }
+func (t *Date32Type) Bytes() int { return Date32SizeBytes }
+func (t *Date32Type) Fingerprint() string { return typeFingerprint(t) }
+func (t *Date32Type) Layout() DataTypeLayout {
+ return DataTypeLayout{Buffers: []BufferSpec{
+ SpecBitmap(), SpecFixedWidth(Date32SizeBytes)}}
+}
+
+type Date64Type struct{}
+
+func (t *Date64Type) ID() Type { return DATE64 }
+func (t *Date64Type) Name() string { return "date64" }
+func (t *Date64Type) String() string { return "date64" }
+func (t *Date64Type) BitWidth() int { return 64 }
+func (t *Date64Type) Bytes() int { return Date64SizeBytes }
+func (t *Date64Type) Fingerprint() string { return typeFingerprint(t) }
+func (t *Date64Type) Layout() DataTypeLayout {
+ return DataTypeLayout{Buffers: []BufferSpec{
+ SpecBitmap(), SpecFixedWidth(Date64SizeBytes)}}
+}
+
+var (
+ PrimitiveTypes = struct {
+ Int8 DataType
+ Int16 DataType
+ Int32 DataType
+ Int64 DataType
+ Uint8 DataType
+ Uint16 DataType
+ Uint32 DataType
+ Uint64 DataType
+ Float32 DataType
+ Float64 DataType
+ Date32 DataType
+ Date64 DataType
+ }{
+
+ Int8: &Int8Type{},
+ Int16: &Int16Type{},
+ Int32: &Int32Type{},
+ Int64: &Int64Type{},
+ Uint8: &Uint8Type{},
+ Uint16: &Uint16Type{},
+ Uint32: &Uint32Type{},
+ Uint64: &Uint64Type{},
+ Float32: &Float32Type{},
+ Float64: &Float64Type{},
+ Date32: &Date32Type{},
+ Date64: &Date64Type{},
+ }
+)
diff --git a/vendor/github.com/apache/arrow/go/v14/arrow/datatype_numeric.gen.go.tmpl b/vendor/github.com/apache/arrow/go/v14/arrow/datatype_numeric.gen.go.tmpl
new file mode 100644
index 000000000..611046afc
--- /dev/null
+++ b/vendor/github.com/apache/arrow/go/v14/arrow/datatype_numeric.gen.go.tmpl
@@ -0,0 +1,45 @@
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements. See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership. The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License. You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package arrow
+
+{{range .In}}
+type {{.Name}}Type struct {}
+
+func (t *{{.Name}}Type) ID() Type { return {{.Name|upper}} }
+func (t *{{.Name}}Type) Name() string { return "{{.Name|lower}}" }
+func (t *{{.Name}}Type) String() string { return "{{.Name|lower}}" }
+func (t *{{.Name}}Type) BitWidth() int { return {{.Size}} }
+func (t *{{.Name}}Type) Bytes() int { return {{.Name}}SizeBytes }
+func (t *{{.Name}}Type) Fingerprint() string { return typeFingerprint(t) }
+func (t *{{.Name}}Type) Layout() DataTypeLayout {
+ return DataTypeLayout{Buffers: []BufferSpec{
+ SpecBitmap(), SpecFixedWidth({{.Name}}SizeBytes)}}
+}
+
+{{end}}
+
+var (
+ PrimitiveTypes = struct {
+{{range .In}}
+ {{.Name}} DataType
+{{- end}}
+ }{
+{{range .In}}
+ {{.Name}}: &{{.Name}}Type{},
+{{- end}}
+ }
+)
diff --git a/vendor/github.com/apache/arrow/go/v14/arrow/datatype_numeric.gen.go.tmpldata b/vendor/github.com/apache/arrow/go/v14/arrow/datatype_numeric.gen.go.tmpldata
new file mode 100644
index 000000000..12e69fe60
--- /dev/null
+++ b/vendor/github.com/apache/arrow/go/v14/arrow/datatype_numeric.gen.go.tmpldata
@@ -0,0 +1,66 @@
+[
+ {
+ "Name": "Int8",
+ "Type": "int8",
+ "Size": 8
+ },
+ {
+ "Name": "Int16",
+ "Type": "int16",
+ "Size": 16
+ },
+ {
+ "Name": "Int32",
+ "Type": "int32",
+ "Size": 32
+ },
+ {
+ "Name": "Int64",
+ "Type": "int64",
+ "Size": 64
+ },
+ {
+ "Name": "Uint8",
+ "Type": "uint8",
+ "Size": 8
+ },
+ {
+ "Name": "Uint16",
+ "Type": "uint16",
+ "Size": 16
+ },
+ {
+ "Name": "Uint32",
+ "Type": "uint32",
+ "Size": 32
+ },
+ {
+ "Name": "Uint64",
+ "Type": "uint64",
+ "Size": 64
+ },
+ {
+ "Name": "Float32",
+ "Type": "float32",
+ "Size": 32
+ },
+ {
+ "Name": "Float64",
+ "Type": "float64",
+ "Size": 64
+ },
+ {
+ "Name": "Date32",
+ "Type": "date32",
+ "QualifiedType": "arrow.Date32",
+ "InternalType": "int32",
+ "Size": 32
+ },
+ {
+ "Name": "Date64",
+ "Type": "date64",
+ "QualifiedType": "arrow.Date64",
+ "InternalType": "int64",
+ "Size": 64
+ }
+]
diff --git a/vendor/github.com/apache/arrow/go/v14/arrow/decimal128/decimal128.go b/vendor/github.com/apache/arrow/go/v14/arrow/decimal128/decimal128.go
new file mode 100644
index 000000000..898d7b427
--- /dev/null
+++ b/vendor/github.com/apache/arrow/go/v14/arrow/decimal128/decimal128.go
@@ -0,0 +1,611 @@
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements. See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership. The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License. You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package decimal128
+
+import (
+ "errors"
+ "fmt"
+ "math"
+ "math/big"
+ "math/bits"
+
+ "github.com/apache/arrow/go/v14/arrow/internal/debug"
+)
+
+const (
+ MaxPrecision = 38
+ MaxScale = 38
+)
+
+var (
+ MaxDecimal128 = New(542101086242752217, 687399551400673280-1)
+)
+
+func GetMaxValue(prec int32) Num {
+ return scaleMultipliers[prec].Sub(FromU64(1))
+}
+
+// Num represents a signed 128-bit integer in two's complement.
+// Calculations wrap around and overflow is ignored.
+//
+// For a discussion of the algorithms, look at Knuth's volume 2,
+// Semi-numerical Algorithms section 4.3.1.
+//
+// Adapted from the Apache ORC C++ implementation
+type Num struct {
+ lo uint64 // low bits
+ hi int64 // high bits
+}
+
+// New returns a new signed 128-bit integer value.
+func New(hi int64, lo uint64) Num {
+ return Num{lo: lo, hi: hi}
+}
+
+// FromU64 returns a new signed 128-bit integer value from the provided uint64 one.
+func FromU64(v uint64) Num {
+ return New(0, v)
+}
+
+// FromI64 returns a new signed 128-bit integer value from the provided int64 one.
+func FromI64(v int64) Num {
+ switch {
+ case v > 0:
+ return New(0, uint64(v))
+ case v < 0:
+ return New(-1, uint64(v))
+ default:
+ return Num{}
+ }
+}
+
+// FromBigInt will convert a big.Int to a Num, if the value in v has a
+// BitLen > 128, this will panic.
+func FromBigInt(v *big.Int) (n Num) {
+ bitlen := v.BitLen()
+ if bitlen > 127 {
+ panic("arrow/decimal128: cannot represent value larger than 128bits")
+ } else if bitlen == 0 {
+ // if bitlen is 0, then the value is 0 so return the default zeroed
+ // out n
+ return
+ }
+
+ // if the value is negative, then get the high and low bytes from
+ // v, and then negate it. this is because Num uses a two's compliment
+ // representation of values and big.Int stores the value as a bool for
+ // the sign and the absolute value of the integer. This means that the
+ // raw bytes are *always* the absolute value.
+ b := v.Bits()
+ n.lo = uint64(b[0])
+ if len(b) > 1 {
+ n.hi = int64(b[1])
+ }
+ if v.Sign() < 0 {
+ return n.Negate()
+ }
+ return
+}
+
+// Negate returns a copy of this Decimal128 value but with the sign negated
+func (n Num) Negate() Num {
+ n.lo = ^n.lo + 1
+ n.hi = ^n.hi
+ if n.lo == 0 {
+ n.hi += 1
+ }
+ return n
+}
+
+func (n Num) Add(rhs Num) Num {
+ n.hi += rhs.hi
+ var carry uint64
+ n.lo, carry = bits.Add64(n.lo, rhs.lo, 0)
+ n.hi += int64(carry)
+ return n
+}
+
+func (n Num) Sub(rhs Num) Num {
+ n.hi -= rhs.hi
+ var borrow uint64
+ n.lo, borrow = bits.Sub64(n.lo, rhs.lo, 0)
+ n.hi -= int64(borrow)
+ return n
+}
+
+func (n Num) Mul(rhs Num) Num {
+ hi, lo := bits.Mul64(n.lo, rhs.lo)
+ hi += (uint64(n.hi) * rhs.lo) + (n.lo * uint64(rhs.hi))
+ return Num{hi: int64(hi), lo: lo}
+}
+
+func (n Num) Div(rhs Num) (res, rem Num) {
+ b := n.BigInt()
+ out, remainder := b.QuoRem(b, rhs.BigInt(), &big.Int{})
+ return FromBigInt(out), FromBigInt(remainder)
+}
+
+func (n Num) Pow(rhs Num) Num {
+ b := n.BigInt()
+ return FromBigInt(b.Exp(b, rhs.BigInt(), nil))
+}
+
+func scalePositiveFloat64(v float64, prec, scale int32) (float64, error) {
+ var pscale float64
+ if scale >= -38 && scale <= 38 {
+ pscale = float64PowersOfTen[scale+38]
+ } else {
+ pscale = math.Pow10(int(scale))
+ }
+
+ v *= pscale
+ v = math.RoundToEven(v)
+ maxabs := float64PowersOfTen[prec+38]
+ if v <= -maxabs || v >= maxabs {
+ return 0, fmt.Errorf("cannot convert %f to decimal128(precision=%d, scale=%d): overflow", v, prec, scale)
+ }
+ return v, nil
+}
+
+func fromPositiveFloat64(v float64, prec, scale int32) (Num, error) {
+ v, err := scalePositiveFloat64(v, prec, scale)
+ if err != nil {
+ return Num{}, err
+ }
+
+ hi := math.Floor(math.Ldexp(v, -64))
+ low := v - math.Ldexp(hi, 64)
+ return Num{hi: int64(hi), lo: uint64(low)}, nil
+}
+
+// this has to exist despite sharing some code with fromPositiveFloat64
+// because if we don't do the casts back to float32 in between each
+// step, we end up with a significantly different answer!
+// Aren't floating point values so much fun?
+//
+// example value to use:
+//
+// v := float32(1.8446746e+15)
+//
+// You'll end up with a different values if you do:
+//
+// FromFloat64(float64(v), 20, 4)
+//
+// vs
+//
+// FromFloat32(v, 20, 4)
+//
+// because float64(v) == 1844674629206016 rather than 1844674600000000
+func fromPositiveFloat32(v float32, prec, scale int32) (Num, error) {
+ val, err := scalePositiveFloat64(float64(v), prec, scale)
+ if err != nil {
+ return Num{}, err
+ }
+
+ hi := float32(math.Floor(math.Ldexp(float64(float32(val)), -64)))
+ low := float32(val) - float32(math.Ldexp(float64(hi), 64))
+ return Num{hi: int64(hi), lo: uint64(low)}, nil
+}
+
+// FromFloat32 returns a new decimal128.Num constructed from the given float32
+// value using the provided precision and scale. Will return an error if the
+// value cannot be accurately represented with the desired precision and scale.
+func FromFloat32(v float32, prec, scale int32) (Num, error) {
+ if v < 0 {
+ dec, err := fromPositiveFloat32(-v, prec, scale)
+ if err != nil {
+ return dec, err
+ }
+ return dec.Negate(), nil
+ }
+ return fromPositiveFloat32(v, prec, scale)
+}
+
+// FromFloat64 returns a new decimal128.Num constructed from the given float64
+// value using the provided precision and scale. Will return an error if the
+// value cannot be accurately represented with the desired precision and scale.
+func FromFloat64(v float64, prec, scale int32) (Num, error) {
+ if v < 0 {
+ dec, err := fromPositiveFloat64(-v, prec, scale)
+ if err != nil {
+ return dec, err
+ }
+ return dec.Negate(), nil
+ }
+ return fromPositiveFloat64(v, prec, scale)
+}
+
+var pt5 = big.NewFloat(0.5)
+
+func FromString(v string, prec, scale int32) (n Num, err error) {
+ // time for some math!
+ // Our input precision means "number of digits of precision" but the
+ // math/big library refers to precision in floating point terms
+ // where it refers to the "number of bits of precision in the mantissa".
+ // So we need to figure out how many bits we should use for precision,
+ // based on the input precision. Too much precision and we're not rounding
+ // when we should. Too little precision and we round when we shouldn't.
+ //
+ // In general, the number of decimal digits you get from a given number
+ // of bits will be:
+ //
+ // digits = log[base 10](2^nbits)
+ //
+ // it thus follows that:
+ //
+ // digits = nbits * log[base 10](2)
+ // nbits = digits / log[base 10](2)
+ //
+ // So we need to account for our scale since we're going to be multiplying
+ // by 10^scale in order to get the integral value we're actually going to use
+ // So to get our number of bits we do:
+ //
+ // (prec + scale + 1) / log[base10](2)
+ //
+ // Finally, we still have a sign bit, so we -1 to account for the sign bit.
+ // Aren't floating point numbers fun?
+ var precInBits = uint(math.Round(float64(prec+scale+1)/math.Log10(2))) + 1
+
+ var out *big.Float
+ out, _, err = big.ParseFloat(v, 10, 127, big.ToNearestEven)
+ if err != nil {
+ return
+ }
+
+ // Since we're going to truncate this to get an integer, we need to round
+ // the value instead because of edge cases so that we match how other implementations
+ // (e.g. C++) handles Decimal values. So if we're negative we'll subtract 0.5 and if
+ // we're positive we'll add 0.5.
+ out.Mul(out, big.NewFloat(math.Pow10(int(scale)))).SetPrec(precInBits)
+ if out.Signbit() {
+ out.Sub(out, pt5)
+ } else {
+ out.Add(out, pt5)
+ }
+
+ var tmp big.Int
+ val, _ := out.Int(&tmp)
+ if val.BitLen() > 127 {
+ return Num{}, errors.New("bitlen too large for decimal128")
+ }
+ n = FromBigInt(val)
+ if !n.FitsInPrecision(prec) {
+ err = fmt.Errorf("val %v doesn't fit in precision %d", n, prec)
+ }
+ return
+}
+
+// ToFloat32 returns a float32 value representative of this decimal128.Num,
+// but with the given scale.
+func (n Num) ToFloat32(scale int32) float32 {
+ return float32(n.ToFloat64(scale))
+}
+
+func (n Num) tofloat64Positive(scale int32) float64 {
+ const twoTo64 float64 = 1.8446744073709552e+19
+ x := float64(n.hi) * twoTo64
+ x += float64(n.lo)
+ if scale >= -38 && scale <= 38 {
+ return x * float64PowersOfTen[-scale+38]
+ }
+
+ return x * math.Pow10(-int(scale))
+}
+
+// ToFloat64 returns a float64 value representative of this decimal128.Num,
+// but with the given scale.
+func (n Num) ToFloat64(scale int32) float64 {
+ if n.hi < 0 {
+ return -n.Negate().tofloat64Positive(scale)
+ }
+ return n.tofloat64Positive(scale)
+}
+
+// LowBits returns the low bits of the two's complement representation of the number.
+func (n Num) LowBits() uint64 { return n.lo }
+
+// HighBits returns the high bits of the two's complement representation of the number.
+func (n Num) HighBits() int64 { return n.hi }
+
+// Sign returns:
+//
+// -1 if x < 0
+//
+// 0 if x == 0
+//
+// +1 if x > 0
+func (n Num) Sign() int {
+ if n == (Num{}) {
+ return 0
+ }
+ return int(1 | (n.hi >> 63))
+}
+
+func toBigIntPositive(n Num) *big.Int {
+ return (&big.Int{}).SetBits([]big.Word{big.Word(n.lo), big.Word(n.hi)})
+}
+
+// while the code would be simpler to just do lsh/rsh and add
+// it turns out from benchmarking that calling SetBits passing
+// in the words and negating ends up being >2x faster
+func (n Num) BigInt() *big.Int {
+ if n.Sign() < 0 {
+ b := toBigIntPositive(n.Negate())
+ return b.Neg(b)
+ }
+ return toBigIntPositive(n)
+}
+
+// Greater returns true if the value represented by n is > other
+func (n Num) Greater(other Num) bool {
+ return other.Less(n)
+}
+
+// GreaterEqual returns true if the value represented by n is >= other
+func (n Num) GreaterEqual(other Num) bool {
+ return !n.Less(other)
+}
+
+// Less returns true if the value represented by n is < other
+func (n Num) Less(other Num) bool {
+ return n.hi < other.hi || (n.hi == other.hi && n.lo < other.lo)
+}
+
+// LessEqual returns true if the value represented by n is <= other
+func (n Num) LessEqual(other Num) bool {
+ return !n.Greater(other)
+}
+
+// Max returns the largest Decimal128 that was passed in the arguments
+func Max(first Num, rest ...Num) Num {
+ answer := first
+ for _, number := range rest {
+ if number.Greater(answer) {
+ answer = number
+ }
+ }
+ return answer
+}
+
+// Min returns the smallest Decimal128 that was passed in the arguments
+func Min(first Num, rest ...Num) Num {
+ answer := first
+ for _, number := range rest {
+ if number.Less(answer) {
+ answer = number
+ }
+ }
+ return answer
+}
+
+// Cmp compares the numbers represented by n and other and returns:
+//
+// +1 if n > other
+// 0 if n == other
+// -1 if n < other
+func (n Num) Cmp(other Num) int {
+ switch {
+ case n.Greater(other):
+ return 1
+ case n.Less(other):
+ return -1
+ }
+ return 0
+}
+
+// IncreaseScaleBy returns a new decimal128.Num with the value scaled up by
+// the desired amount. Must be 0 <= increase <= 38. Any data loss from scaling
+// is ignored. If you wish to prevent data loss, use Rescale which will
+// return an error if data loss is detected.
+func (n Num) IncreaseScaleBy(increase int32) Num {
+ debug.Assert(increase >= 0, "invalid increase scale for decimal128")
+ debug.Assert(increase <= 38, "invalid increase scale for decimal128")
+
+ v := scaleMultipliers[increase].BigInt()
+ return FromBigInt(v.Mul(n.BigInt(), v))
+}
+
+// ReduceScaleBy returns a new decimal128.Num with the value scaled down by
+// the desired amount and, if 'round' is true, the value will be rounded
+// accordingly. Assumes 0 <= reduce <= 38. Any data loss from scaling
+// is ignored. If you wish to prevent data loss, use Rescale which will
+// return an error if data loss is detected.
+func (n Num) ReduceScaleBy(reduce int32, round bool) Num {
+ debug.Assert(reduce >= 0, "invalid reduce scale for decimal128")
+ debug.Assert(reduce <= 38, "invalid reduce scale for decimal128")
+
+ if reduce == 0 {
+ return n
+ }
+
+ divisor := scaleMultipliers[reduce].BigInt()
+ result, remainder := divisor.QuoRem(n.BigInt(), divisor, (&big.Int{}))
+ if round {
+ divisorHalf := scaleMultipliersHalf[reduce]
+ if remainder.Abs(remainder).Cmp(divisorHalf.BigInt()) != -1 {
+ result.Add(result, big.NewInt(int64(n.Sign())))
+ }
+ }
+ return FromBigInt(result)
+}
+
+func (n Num) rescaleWouldCauseDataLoss(deltaScale int32, multiplier Num) (out Num, loss bool) {
+ var (
+ value, result, remainder *big.Int
+ )
+ value = n.BigInt()
+ if deltaScale < 0 {
+ debug.Assert(multiplier.lo != 0 || multiplier.hi != 0, "multiplier needs to not be zero")
+ result, remainder = (&big.Int{}).QuoRem(value, multiplier.BigInt(), (&big.Int{}))
+ return FromBigInt(result), remainder.Cmp(big.NewInt(0)) != 0
+ }
+
+ result = (&big.Int{}).Mul(value, multiplier.BigInt())
+ out = FromBigInt(result)
+ cmp := result.Cmp(value)
+ if n.Sign() < 0 {
+ loss = cmp == 1
+ } else {
+ loss = cmp == -1
+ }
+ return
+}
+
+// Rescale returns a new decimal128.Num with the value updated assuming
+// the current value is scaled to originalScale with the new value scaled
+// to newScale. If rescaling this way would cause data loss, an error is
+// returned instead.
+func (n Num) Rescale(originalScale, newScale int32) (out Num, err error) {
+ if originalScale == newScale {
+ return n, nil
+ }
+
+ deltaScale := newScale - originalScale
+ absDeltaScale := int32(math.Abs(float64(deltaScale)))
+
+ multiplier := scaleMultipliers[absDeltaScale]
+ var wouldHaveLoss bool
+ out, wouldHaveLoss = n.rescaleWouldCauseDataLoss(deltaScale, multiplier)
+ if wouldHaveLoss {
+ err = errors.New("rescale data loss")
+ }
+ return
+}
+
+// Abs returns a new decimal128.Num that contains the absolute value of n
+func (n Num) Abs() Num {
+ switch n.Sign() {
+ case -1:
+ return n.Negate()
+ }
+ return n
+}
+
+// FitsInPrecision returns true or false if the value currently held by
+// n would fit within precision (0 < prec <= 38) without losing any data.
+func (n Num) FitsInPrecision(prec int32) bool {
+ debug.Assert(prec > 0, "precision must be > 0")
+ debug.Assert(prec <= 38, "precision must be <= 38")
+ return n.Abs().Less(scaleMultipliers[prec])
+}
+
+func (n Num) ToString(scale int32) string {
+ f := (&big.Float{}).SetInt(n.BigInt())
+ f.Quo(f, (&big.Float{}).SetInt(scaleMultipliers[scale].BigInt()))
+ return f.Text('f', int(scale))
+}
+
+func GetScaleMultiplier(pow int) Num { return scaleMultipliers[pow] }
+
+func GetHalfScaleMultiplier(pow int) Num { return scaleMultipliersHalf[pow] }
+
+var (
+ scaleMultipliers = [...]Num{
+ FromU64(1),
+ FromU64(10),
+ FromU64(100),
+ FromU64(1000),
+ FromU64(10000),
+ FromU64(100000),
+ FromU64(1000000),
+ FromU64(10000000),
+ FromU64(100000000),
+ FromU64(1000000000),
+ FromU64(10000000000),
+ FromU64(100000000000),
+ FromU64(1000000000000),
+ FromU64(10000000000000),
+ FromU64(100000000000000),
+ FromU64(1000000000000000),
+ FromU64(10000000000000000),
+ FromU64(100000000000000000),
+ FromU64(1000000000000000000),
+ New(0, 10000000000000000000),
+ New(5, 7766279631452241920),
+ New(54, 3875820019684212736),
+ New(542, 1864712049423024128),
+ New(5421, 200376420520689664),
+ New(54210, 2003764205206896640),
+ New(542101, 1590897978359414784),
+ New(5421010, 15908979783594147840),
+ New(54210108, 11515845246265065472),
+ New(542101086, 4477988020393345024),
+ New(5421010862, 7886392056514347008),
+ New(54210108624, 5076944270305263616),
+ New(542101086242, 13875954555633532928),
+ New(5421010862427, 9632337040368467968),
+ New(54210108624275, 4089650035136921600),
+ New(542101086242752, 4003012203950112768),
+ New(5421010862427522, 3136633892082024448),
+ New(54210108624275221, 12919594847110692864),
+ New(542101086242752217, 68739955140067328),
+ New(5421010862427522170, 687399551400673280),
+ }
+
+ scaleMultipliersHalf = [...]Num{
+ FromU64(0),
+ FromU64(5),
+ FromU64(50),
+ FromU64(500),
+ FromU64(5000),
+ FromU64(50000),
+ FromU64(500000),
+ FromU64(5000000),
+ FromU64(50000000),
+ FromU64(500000000),
+ FromU64(5000000000),
+ FromU64(50000000000),
+ FromU64(500000000000),
+ FromU64(5000000000000),
+ FromU64(50000000000000),
+ FromU64(500000000000000),
+ FromU64(5000000000000000),
+ FromU64(50000000000000000),
+ FromU64(500000000000000000),
+ FromU64(5000000000000000000),
+ New(2, 13106511852580896768),
+ New(27, 1937910009842106368),
+ New(271, 932356024711512064),
+ New(2710, 9323560247115120640),
+ New(27105, 1001882102603448320),
+ New(271050, 10018821026034483200),
+ New(2710505, 7954489891797073920),
+ New(27105054, 5757922623132532736),
+ New(271050543, 2238994010196672512),
+ New(2710505431, 3943196028257173504),
+ New(27105054312, 2538472135152631808),
+ New(271050543121, 6937977277816766464),
+ New(2710505431213, 14039540557039009792),
+ New(27105054312137, 11268197054423236608),
+ New(271050543121376, 2001506101975056384),
+ New(2710505431213761, 1568316946041012224),
+ New(27105054312137610, 15683169460410122240),
+ New(271050543121376108, 9257742014424809472),
+ New(2710505431213761085, 343699775700336640),
+ }
+
+ float64PowersOfTen = [...]float64{
+ 1e-38, 1e-37, 1e-36, 1e-35, 1e-34, 1e-33, 1e-32, 1e-31, 1e-30, 1e-29,
+ 1e-28, 1e-27, 1e-26, 1e-25, 1e-24, 1e-23, 1e-22, 1e-21, 1e-20, 1e-19,
+ 1e-18, 1e-17, 1e-16, 1e-15, 1e-14, 1e-13, 1e-12, 1e-11, 1e-10, 1e-9,
+ 1e-8, 1e-7, 1e-6, 1e-5, 1e-4, 1e-3, 1e-2, 1e-1, 1e0, 1e1,
+ 1e2, 1e3, 1e4, 1e5, 1e6, 1e7, 1e8, 1e9, 1e10, 1e11,
+ 1e12, 1e13, 1e14, 1e15, 1e16, 1e17, 1e18, 1e19, 1e20, 1e21,
+ 1e22, 1e23, 1e24, 1e25, 1e26, 1e27, 1e28, 1e29, 1e30, 1e31,
+ 1e32, 1e33, 1e34, 1e35, 1e36, 1e37, 1e38,
+ }
+)
diff --git a/vendor/github.com/apache/arrow/go/v14/arrow/decimal256/decimal256.go b/vendor/github.com/apache/arrow/go/v14/arrow/decimal256/decimal256.go
new file mode 100644
index 000000000..4bfcd4e04
--- /dev/null
+++ b/vendor/github.com/apache/arrow/go/v14/arrow/decimal256/decimal256.go
@@ -0,0 +1,693 @@
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements. See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership. The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License. You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package decimal256
+
+import (
+ "errors"
+ "fmt"
+ "math"
+ "math/big"
+ "math/bits"
+
+ "github.com/apache/arrow/go/v14/arrow/decimal128"
+ "github.com/apache/arrow/go/v14/arrow/internal/debug"
+)
+
+const (
+ MaxPrecision = 76
+ MaxScale = 76
+)
+
+func GetMaxValue(prec int32) Num {
+ return scaleMultipliers[prec].Sub(FromU64(1))
+}
+
+type Num struct {
+ // arr[0] is the lowest bits, arr[3] is the highest bits
+ arr [4]uint64
+}
+
+// New returns a new signed 256-bit integer value where x1 contains
+// the highest bits with the rest of the values in order down to the
+// lowest bits
+//
+// ie: New(1, 2, 3, 4) returns with the elements in little-endian order
+// {4, 3, 2, 1} but each value is still represented as the native endianness
+func New(x1, x2, x3, x4 uint64) Num {
+ return Num{[4]uint64{x4, x3, x2, x1}}
+}
+
+func (n Num) Array() [4]uint64 { return n.arr }
+
+func (n Num) LowBits() uint64 { return n.arr[0] }
+
+func FromDecimal128(n decimal128.Num) Num {
+ var topBits uint64
+ if n.Sign() < 0 {
+ topBits = math.MaxUint64
+ }
+ return New(topBits, topBits, uint64(n.HighBits()), n.LowBits())
+}
+
+func FromU64(v uint64) Num {
+ return Num{[4]uint64{v, 0, 0, 0}}
+}
+
+func FromI64(v int64) Num {
+ switch {
+ case v > 0:
+ return New(0, 0, 0, uint64(v))
+ case v < 0:
+ return New(math.MaxUint64, math.MaxUint64, math.MaxUint64, uint64(v))
+ default:
+ return Num{}
+ }
+}
+
+func (n Num) Negate() Num {
+ var carry uint64 = 1
+ for i := range n.arr {
+ n.arr[i] = ^n.arr[i] + carry
+ if n.arr[i] != 0 {
+ carry = 0
+ }
+ }
+ return n
+}
+
+func (n Num) Add(rhs Num) Num {
+ var carry uint64
+ for i, v := range n.arr {
+ n.arr[i], carry = bits.Add64(v, rhs.arr[i], carry)
+ }
+ return n
+}
+
+func (n Num) Sub(rhs Num) Num {
+ return n.Add(rhs.Negate())
+}
+
+func (n Num) Mul(rhs Num) Num {
+ b := n.BigInt()
+ return FromBigInt(b.Mul(b, rhs.BigInt()))
+}
+
+func (n Num) Div(rhs Num) (res, rem Num) {
+ b := n.BigInt()
+ out, remainder := b.QuoRem(b, rhs.BigInt(), &big.Int{})
+ return FromBigInt(out), FromBigInt(remainder)
+}
+
+func (n Num) Pow(rhs Num) Num {
+ b := n.BigInt()
+ return FromBigInt(b.Exp(b, rhs.BigInt(), nil))
+}
+
+var pt5 = big.NewFloat(0.5)
+
+func FromString(v string, prec, scale int32) (n Num, err error) {
+ // time for some math!
+ // Our input precision means "number of digits of precision" but the
+ // math/big library refers to precision in floating point terms
+ // where it refers to the "number of bits of precision in the mantissa".
+ // So we need to figure out how many bits we should use for precision,
+ // based on the input precision. Too much precision and we're not rounding
+ // when we should. Too little precision and we round when we shouldn't.
+ //
+ // In general, the number of decimal digits you get from a given number
+ // of bits will be:
+ //
+ // digits = log[base 10](2^nbits)
+ //
+ // it thus follows that:
+ //
+ // digits = nbits * log[base 10](2)
+ // nbits = digits / log[base 10](2)
+ //
+ // So we need to account for our scale since we're going to be multiplying
+ // by 10^scale in order to get the integral value we're actually going to use
+ // So to get our number of bits we do:
+ //
+ // (prec + scale + 1) / log[base10](2)
+ //
+ // Finally, we still have a sign bit, so we -1 to account for the sign bit.
+ // Aren't floating point numbers fun?
+ var precInBits = uint(math.Round(float64(prec+scale+1)/math.Log10(2))) + 1
+
+ var out *big.Float
+ out, _, err = big.ParseFloat(v, 10, 255, big.ToNearestEven)
+ if err != nil {
+ return
+ }
+
+ out.Mul(out, big.NewFloat(math.Pow10(int(scale)))).SetPrec(precInBits)
+ // Since we're going to truncate this to get an integer, we need to round
+ // the value instead because of edge cases so that we match how other implementations
+ // (e.g. C++) handles Decimal values. So if we're negative we'll subtract 0.5 and if
+ // we're positive we'll add 0.5.
+ if out.Signbit() {
+ out.Sub(out, pt5)
+ } else {
+ out.Add(out, pt5)
+ }
+
+ var tmp big.Int
+ val, _ := out.Int(&tmp)
+ if val.BitLen() > 255 {
+ return Num{}, errors.New("bitlen too large for decimal256")
+ }
+ n = FromBigInt(val)
+ if !n.FitsInPrecision(prec) {
+ err = fmt.Errorf("value %v doesn't fit in precision %d", n, prec)
+ }
+ return
+}
+
+func FromFloat32(v float32, prec, scale int32) (Num, error) {
+ debug.Assert(prec > 0 && prec <= 76, "invalid precision for converting to decimal256")
+
+ if math.IsInf(float64(v), 0) {
+ return Num{}, fmt.Errorf("cannot convert %f to decimal256", v)
+ }
+
+ if v < 0 {
+ dec, err := fromPositiveFloat32(-v, prec, scale)
+ if err != nil {
+ return dec, err
+ }
+ return dec.Negate(), nil
+ }
+ return fromPositiveFloat32(v, prec, scale)
+}
+
+func FromFloat64(v float64, prec, scale int32) (Num, error) {
+ debug.Assert(prec > 0 && prec <= 76, "invalid precision for converting to decimal256")
+
+ if math.IsInf(v, 0) {
+ return Num{}, fmt.Errorf("cannot convert %f to decimal256", v)
+ }
+
+ if v < 0 {
+ dec, err := fromPositiveFloat64(-v, prec, scale)
+ if err != nil {
+ return dec, err
+ }
+ return dec.Negate(), nil
+ }
+ return fromPositiveFloat64(v, prec, scale)
+}
+
+// this has to exist despite sharing some code with fromPositiveFloat64
+// because if we don't do the casts back to float32 in between each
+// step, we end up with a significantly different answer!
+// Aren't floating point values so much fun?
+//
+// example value to use:
+//
+// v := float32(1.8446746e+15)
+//
+// You'll end up with a different values if you do:
+//
+// FromFloat64(float64(v), 20, 4)
+//
+// vs
+//
+// FromFloat32(v, 20, 4)
+//
+// because float64(v) == 1844674629206016 rather than 1844674600000000
+func fromPositiveFloat32(v float32, prec, scale int32) (Num, error) {
+ val, err := scalePositiveFloat64(float64(v), prec, scale)
+ if err != nil {
+ return Num{}, err
+ }
+
+ v = float32(val)
+ var arr [4]float32
+ arr[3] = float32(math.Floor(math.Ldexp(float64(v), -192)))
+ v -= float32(math.Ldexp(float64(arr[3]), 192))
+ arr[2] = float32(math.Floor(math.Ldexp(float64(v), -128)))
+ v -= float32(math.Ldexp(float64(arr[2]), 128))
+ arr[1] = float32(math.Floor(math.Ldexp(float64(v), -64)))
+ v -= float32(math.Ldexp(float64(arr[1]), 64))
+ arr[0] = v
+
+ debug.Assert(arr[3] >= 0, "bad conversion float64 to decimal256")
+ debug.Assert(arr[3] < 1.8446744073709552e+19, "bad conversion float64 to decimal256") // 2**64
+ debug.Assert(arr[2] >= 0, "bad conversion float64 to decimal256")
+ debug.Assert(arr[2] < 1.8446744073709552e+19, "bad conversion float64 to decimal256") // 2**64
+ debug.Assert(arr[1] >= 0, "bad conversion float64 to decimal256")
+ debug.Assert(arr[1] < 1.8446744073709552e+19, "bad conversion float64 to decimal256") // 2**64
+ debug.Assert(arr[0] >= 0, "bad conversion float64 to decimal256")
+ debug.Assert(arr[0] < 1.8446744073709552e+19, "bad conversion float64 to decimal256") // 2**64
+ return Num{[4]uint64{uint64(arr[0]), uint64(arr[1]), uint64(arr[2]), uint64(arr[3])}}, nil
+}
+
+func scalePositiveFloat64(v float64, prec, scale int32) (float64, error) {
+ var pscale float64
+ if scale >= -76 && scale <= 76 {
+ pscale = float64PowersOfTen[scale+76]
+ } else {
+ pscale = math.Pow10(int(scale))
+ }
+
+ v *= pscale
+ v = math.RoundToEven(v)
+ maxabs := float64PowersOfTen[prec+76]
+ if v <= -maxabs || v >= maxabs {
+ return 0, fmt.Errorf("cannot convert %f to decimal256(precision=%d, scale=%d): overflow",
+ v, prec, scale)
+ }
+ return v, nil
+}
+
+func fromPositiveFloat64(v float64, prec, scale int32) (Num, error) {
+ val, err := scalePositiveFloat64(v, prec, scale)
+ if err != nil {
+ return Num{}, err
+ }
+
+ var arr [4]float64
+ arr[3] = math.Floor(math.Ldexp(val, -192))
+ val -= math.Ldexp(arr[3], 192)
+ arr[2] = math.Floor(math.Ldexp(val, -128))
+ val -= math.Ldexp(arr[2], 128)
+ arr[1] = math.Floor(math.Ldexp(val, -64))
+ val -= math.Ldexp(arr[1], 64)
+ arr[0] = val
+
+ debug.Assert(arr[3] >= 0, "bad conversion float64 to decimal256")
+ debug.Assert(arr[3] < 1.8446744073709552e+19, "bad conversion float64 to decimal256") // 2**64
+ debug.Assert(arr[2] >= 0, "bad conversion float64 to decimal256")
+ debug.Assert(arr[2] < 1.8446744073709552e+19, "bad conversion float64 to decimal256") // 2**64
+ debug.Assert(arr[1] >= 0, "bad conversion float64 to decimal256")
+ debug.Assert(arr[1] < 1.8446744073709552e+19, "bad conversion float64 to decimal256") // 2**64
+ debug.Assert(arr[0] >= 0, "bad conversion float64 to decimal256")
+ debug.Assert(arr[0] < 1.8446744073709552e+19, "bad conversion float64 to decimal256") // 2**64
+ return Num{[4]uint64{uint64(arr[0]), uint64(arr[1]), uint64(arr[2]), uint64(arr[3])}}, nil
+}
+
+func (n Num) tofloat64Positive(scale int32) float64 {
+ const (
+ twoTo64 float64 = 1.8446744073709552e+19
+ twoTo128 float64 = 3.402823669209385e+38
+ twoTo192 float64 = 6.277101735386681e+57
+ )
+
+ x := float64(n.arr[3]) * twoTo192
+ x += float64(n.arr[2]) * twoTo128
+ x += float64(n.arr[1]) * twoTo64
+ x += float64(n.arr[0])
+
+ if scale >= -76 && scale <= 76 {
+ return x * float64PowersOfTen[-scale+76]
+ }
+
+ return x * math.Pow10(-int(scale))
+}
+
+func (n Num) ToFloat32(scale int32) float32 { return float32(n.ToFloat64(scale)) }
+
+func (n Num) ToFloat64(scale int32) float64 {
+ if n.Sign() < 0 {
+ return -n.Negate().tofloat64Positive(scale)
+ }
+ return n.tofloat64Positive(scale)
+}
+
+func (n Num) Sign() int {
+ if n == (Num{}) {
+ return 0
+ }
+ return int(1 | (int64(n.arr[3]) >> 63))
+}
+
+func FromBigInt(v *big.Int) (n Num) {
+ bitlen := v.BitLen()
+ if bitlen > 255 {
+ panic("arrow/decimal256: cannot represent value larger than 256bits")
+ } else if bitlen == 0 {
+ return
+ }
+
+ b := v.Bits()
+ for i, bits := range b {
+ n.arr[i] = uint64(bits)
+ }
+ if v.Sign() < 0 {
+ return n.Negate()
+ }
+ return
+}
+
+func toBigIntPositive(n Num) *big.Int {
+ return new(big.Int).SetBits([]big.Word{big.Word(n.arr[0]), big.Word(n.arr[1]), big.Word(n.arr[2]), big.Word(n.arr[3])})
+}
+
+func (n Num) BigInt() *big.Int {
+ if n.Sign() < 0 {
+ b := toBigIntPositive(n.Negate())
+ return b.Neg(b)
+ }
+ return toBigIntPositive(n)
+}
+
+// Greater returns true if the value represented by n is > other
+func (n Num) Greater(other Num) bool {
+ return other.Less(n)
+}
+
+// GreaterEqual returns true if the value represented by n is >= other
+func (n Num) GreaterEqual(other Num) bool {
+ return !n.Less(other)
+}
+
+// Less returns true if the value represented by n is < other
+func (n Num) Less(other Num) bool {
+ switch {
+ case n.arr[3] != other.arr[3]:
+ return int64(n.arr[3]) < int64(other.arr[3])
+ case n.arr[2] != other.arr[2]:
+ return n.arr[2] < other.arr[2]
+ case n.arr[1] != other.arr[1]:
+ return n.arr[1] < other.arr[1]
+ }
+ return n.arr[0] < other.arr[0]
+}
+
+// LessEqual returns true if the value represented by n is <= other
+func (n Num) LessEqual(other Num) bool {
+ return !n.Greater(other)
+}
+
+// Max returns the largest Decimal256 that was passed in the arguments
+func Max(first Num, rest ...Num) Num {
+ answer := first
+ for _, number := range rest {
+ if number.Greater(answer) {
+ answer = number
+ }
+ }
+ return answer
+}
+
+// Min returns the smallest Decimal256 that was passed in the arguments
+func Min(first Num, rest ...Num) Num {
+ answer := first
+ for _, number := range rest {
+ if number.Less(answer) {
+ answer = number
+ }
+ }
+ return answer
+}
+
+// Cmp compares the numbers represented by n and other and returns:
+//
+// +1 if n > other
+// 0 if n == other
+// -1 if n < other
+func (n Num) Cmp(other Num) int {
+ switch {
+ case n.Greater(other):
+ return 1
+ case n.Less(other):
+ return -1
+ }
+ return 0
+}
+
+func (n Num) IncreaseScaleBy(increase int32) Num {
+ debug.Assert(increase >= 0, "invalid amount to increase scale by")
+ debug.Assert(increase <= 76, "invalid amount to increase scale by")
+
+ v := scaleMultipliers[increase].BigInt()
+ return FromBigInt(v.Mul(n.BigInt(), v))
+}
+
+func (n Num) ReduceScaleBy(reduce int32, round bool) Num {
+ debug.Assert(reduce >= 0, "invalid amount to reduce scale by")
+ debug.Assert(reduce <= 76, "invalid amount to reduce scale by")
+
+ if reduce == 0 {
+ return n
+ }
+
+ divisor := scaleMultipliers[reduce].BigInt()
+ result, remainder := divisor.QuoRem(n.BigInt(), divisor, new(big.Int))
+ if round {
+ divisorHalf := scaleMultipliersHalf[reduce]
+ if remainder.Abs(remainder).Cmp(divisorHalf.BigInt()) != -1 {
+ result.Add(result, big.NewInt(int64(n.Sign())))
+ }
+ }
+ return FromBigInt(result)
+}
+
+func (n Num) rescaleWouldCauseDataLoss(deltaScale int32, multiplier Num) (out Num, loss bool) {
+ if deltaScale < 0 {
+ var remainder Num
+ out, remainder = n.Div(multiplier)
+ return out, remainder != Num{}
+ }
+
+ out = n.Mul(multiplier)
+ if n.Sign() < 0 {
+ loss = n.Less(out)
+ } else {
+ loss = out.Less(n)
+ }
+ return
+}
+
+func (n Num) Rescale(original, newscale int32) (out Num, err error) {
+ if original == newscale {
+ return n, nil
+ }
+
+ deltaScale := newscale - original
+ absDeltaScale := int32(math.Abs(float64(deltaScale)))
+
+ multiplier := scaleMultipliers[absDeltaScale]
+ var wouldHaveLoss bool
+ out, wouldHaveLoss = n.rescaleWouldCauseDataLoss(deltaScale, multiplier)
+ if wouldHaveLoss {
+ err = errors.New("rescale data loss")
+ }
+ return
+}
+
+func (n Num) Abs() Num {
+ switch n.Sign() {
+ case -1:
+ return n.Negate()
+ }
+ return n
+}
+
+func (n Num) FitsInPrecision(prec int32) bool {
+ debug.Assert(prec > 0, "precision must be > 0")
+ debug.Assert(prec <= 76, "precision must be <= 76")
+ return n.Abs().Less(scaleMultipliers[prec])
+}
+
+func (n Num) ToString(scale int32) string {
+ f := (&big.Float{}).SetInt(n.BigInt())
+ f.Quo(f, (&big.Float{}).SetInt(scaleMultipliers[scale].BigInt()))
+ return f.Text('f', int(scale))
+}
+
+func GetScaleMultiplier(pow int) Num { return scaleMultipliers[pow] }
+
+func GetHalfScaleMultiplier(pow int) Num { return scaleMultipliersHalf[pow] }
+
+var (
+ scaleMultipliers = [...]Num{
+ FromU64(1),
+ FromU64(10),
+ FromU64(100),
+ FromU64(1000),
+ FromU64(10000),
+ FromU64(100000),
+ FromU64(1000000),
+ FromU64(10000000),
+ FromU64(100000000),
+ FromU64(1000000000),
+ FromU64(10000000000),
+ FromU64(100000000000),
+ FromU64(1000000000000),
+ FromU64(10000000000000),
+ FromU64(100000000000000),
+ FromU64(1000000000000000),
+ FromU64(10000000000000000),
+ FromU64(100000000000000000),
+ FromU64(1000000000000000000),
+ New(0, 0, 0, 10000000000000000000),
+ New(0, 0, 5, 7766279631452241920),
+ New(0, 0, 54, 3875820019684212736),
+ New(0, 0, 542, 1864712049423024128),
+ New(0, 0, 5421, 200376420520689664),
+ New(0, 0, 54210, 2003764205206896640),
+ New(0, 0, 542101, 1590897978359414784),
+ New(0, 0, 5421010, 15908979783594147840),
+ New(0, 0, 54210108, 11515845246265065472),
+ New(0, 0, 542101086, 4477988020393345024),
+ New(0, 0, 5421010862, 7886392056514347008),
+ New(0, 0, 54210108624, 5076944270305263616),
+ New(0, 0, 542101086242, 13875954555633532928),
+ New(0, 0, 5421010862427, 9632337040368467968),
+ New(0, 0, 54210108624275, 4089650035136921600),
+ New(0, 0, 542101086242752, 4003012203950112768),
+ New(0, 0, 5421010862427522, 3136633892082024448),
+ New(0, 0, 54210108624275221, 12919594847110692864),
+ New(0, 0, 542101086242752217, 68739955140067328),
+ New(0, 0, 5421010862427522170, 687399551400673280),
+ New(0, 2, 17316620476856118468, 6873995514006732800),
+ New(0, 29, 7145508105175220139, 13399722918938673152),
+ New(0, 293, 16114848830623546549, 4870020673419870208),
+ New(0, 2938, 13574535716559052564, 11806718586779598848),
+ New(0, 29387, 6618148649623664334, 7386721425538678784),
+ New(0, 293873, 10841254275107988496, 80237960548581376),
+ New(0, 2938735, 16178822382532126880, 802379605485813760),
+ New(0, 29387358, 14214271235644855872, 8023796054858137600),
+ New(0, 293873587, 13015503840481697412, 6450984253743169536),
+ New(0, 2938735877, 1027829888850112811, 9169610316303040512),
+ New(0, 29387358770, 10278298888501128114, 17909126868192198656),
+ New(0, 293873587705, 10549268516463523069, 13070572018536022016),
+ New(0, 2938735877055, 13258964796087472617, 1578511669393358848),
+ New(0, 29387358770557, 3462439444907864858, 15785116693933588480),
+ New(0, 293873587705571, 16177650375369096972, 10277214349659471872),
+ New(0, 2938735877055718, 14202551164014556797, 10538423128046960640),
+ New(0, 29387358770557187, 12898303124178706663, 13150510911921848320),
+ New(0, 293873587705571876, 18302566799529756941, 2377900603251621888),
+ New(0, 2938735877055718769, 17004971331911604867, 5332261958806667264),
+ New(1, 10940614696847636083, 4029016655730084128, 16429131440647569408),
+ New(15, 17172426599928602752, 3396678409881738056, 16717361816799281152),
+ New(159, 5703569335900062977, 15520040025107828953, 1152921504606846976),
+ New(1593, 1695461137871974930, 7626447661401876602, 11529215046068469760),
+ New(15930, 16954611378719749304, 2477500319180559562, 4611686018427387904),
+ New(159309, 3525417123811528497, 6328259118096044006, 9223372036854775808),
+ New(1593091, 16807427164405733357, 7942358959831785217, 0),
+ New(15930919, 2053574980671369030, 5636613303479645706, 0),
+ New(159309191, 2089005733004138687, 1025900813667802212, 0),
+ New(1593091911, 2443313256331835254, 10259008136678022120, 0),
+ New(15930919111, 5986388489608800929, 10356360998232463120, 0),
+ New(159309191113, 4523652674959354447, 11329889613776873120, 0),
+ New(1593091911132, 8343038602174441244, 2618431695511421504, 0),
+ New(15930919111324, 9643409726906205977, 7737572881404663424, 0),
+ New(159309191113245, 4200376900514301694, 3588752519208427776, 0),
+ New(1593091911132452, 5110280857723913709, 17440781118374726144, 0),
+ New(15930919111324522, 14209320429820033867, 8387114520361296896, 0),
+ New(159309191113245227, 12965995782233477362, 10084168908774762496, 0),
+ New(1593091911132452277, 532749306367912313, 8607968719199866880, 0),
+ }
+
+ scaleMultipliersHalf = [...]Num{
+ FromU64(0),
+ FromU64(5),
+ FromU64(50),
+ FromU64(500),
+ FromU64(5000),
+ FromU64(50000),
+ FromU64(500000),
+ FromU64(5000000),
+ FromU64(50000000),
+ FromU64(500000000),
+ FromU64(5000000000),
+ FromU64(50000000000),
+ FromU64(500000000000),
+ FromU64(5000000000000),
+ FromU64(50000000000000),
+ FromU64(500000000000000),
+ FromU64(5000000000000000),
+ FromU64(50000000000000000),
+ FromU64(500000000000000000),
+ FromU64(5000000000000000000),
+ New(0, 0, 2, 13106511852580896768),
+ New(0, 0, 27, 1937910009842106368),
+ New(0, 0, 271, 932356024711512064),
+ New(0, 0, 2710, 9323560247115120640),
+ New(0, 0, 27105, 1001882102603448320),
+ New(0, 0, 271050, 10018821026034483200),
+ New(0, 0, 2710505, 7954489891797073920),
+ New(0, 0, 27105054, 5757922623132532736),
+ New(0, 0, 271050543, 2238994010196672512),
+ New(0, 0, 2710505431, 3943196028257173504),
+ New(0, 0, 27105054312, 2538472135152631808),
+ New(0, 0, 271050543121, 6937977277816766464),
+ New(0, 0, 2710505431213, 14039540557039009792),
+ New(0, 0, 27105054312137, 11268197054423236608),
+ New(0, 0, 271050543121376, 2001506101975056384),
+ New(0, 0, 2710505431213761, 1568316946041012224),
+ New(0, 0, 27105054312137610, 15683169460410122240),
+ New(0, 0, 271050543121376108, 9257742014424809472),
+ New(0, 0, 2710505431213761085, 343699775700336640),
+ New(0, 1, 8658310238428059234, 3436997757003366400),
+ New(0, 14, 12796126089442385877, 15923233496324112384),
+ New(0, 146, 17280796452166549082, 11658382373564710912),
+ New(0, 1469, 6787267858279526282, 5903359293389799424),
+ New(0, 14693, 12532446361666607975, 3693360712769339392),
+ New(0, 146936, 14643999174408770056, 40118980274290688),
+ New(0, 1469367, 17312783228120839248, 401189802742906880),
+ New(0, 14693679, 7107135617822427936, 4011898027429068800),
+ New(0, 146936793, 15731123957095624514, 3225492126871584768),
+ New(0, 1469367938, 9737286981279832213, 13808177195006296064),
+ New(0, 14693679385, 5139149444250564057, 8954563434096099328),
+ New(0, 146936793852, 14498006295086537342, 15758658046122786816),
+ New(0, 1469367938527, 15852854434898512116, 10012627871551455232),
+ New(0, 14693679385278, 10954591759308708237, 7892558346966794240),
+ New(0, 146936793852785, 17312197224539324294, 5138607174829735936),
+ New(0, 1469367938527859, 7101275582007278398, 14492583600878256128),
+ New(0, 14693679385278593, 15672523598944129139, 15798627492815699968),
+ New(0, 146936793852785938, 9151283399764878470, 10412322338480586752),
+ New(0, 1469367938527859384, 17725857702810578241, 11889503016258109440),
+ New(0, 14693679385278593849, 11237880364719817872, 8214565720323784704),
+ New(7, 17809585336819077184, 1698339204940869028, 8358680908399640576),
+ New(79, 12075156704804807296, 16983392049408690284, 9799832789158199296),
+ New(796, 10071102605790763273, 3813223830700938301, 5764607523034234880),
+ New(7965, 8477305689359874652, 1238750159590279781, 2305843009213693952),
+ New(79654, 10986080598760540056, 12387501595902797811, 4611686018427387904),
+ New(796545, 17627085619057642486, 13194551516770668416, 9223372036854775808),
+ New(7965459, 10250159527190460323, 2818306651739822853, 0),
+ New(79654595, 10267874903356845151, 9736322443688676914, 0),
+ New(796545955, 10445028665020693435, 5129504068339011060, 0),
+ New(7965459555, 12216566281659176272, 14401552535971007368, 0),
+ New(79654595556, 11485198374334453031, 14888316843743212368, 0),
+ New(796545955566, 4171519301087220622, 1309215847755710752, 0),
+ New(7965459555662, 4821704863453102988, 13092158477557107520, 0),
+ New(79654595556622, 11323560487111926655, 1794376259604213888, 0),
+ New(796545955566226, 2555140428861956854, 17943762596042138880, 0),
+ New(7965459555662261, 7104660214910016933, 13416929297035424256, 0),
+ New(79654595556622613, 15706369927971514489, 5042084454387381248, 0),
+ New(796545955566226138, 9489746690038731964, 13527356396454709248, 0),
+ }
+
+ float64PowersOfTen = [...]float64{
+ 1e-76, 1e-75, 1e-74, 1e-73, 1e-72, 1e-71, 1e-70, 1e-69, 1e-68, 1e-67, 1e-66, 1e-65,
+ 1e-64, 1e-63, 1e-62, 1e-61, 1e-60, 1e-59, 1e-58, 1e-57, 1e-56, 1e-55, 1e-54, 1e-53,
+ 1e-52, 1e-51, 1e-50, 1e-49, 1e-48, 1e-47, 1e-46, 1e-45, 1e-44, 1e-43, 1e-42, 1e-41,
+ 1e-40, 1e-39, 1e-38, 1e-37, 1e-36, 1e-35, 1e-34, 1e-33, 1e-32, 1e-31, 1e-30, 1e-29,
+ 1e-28, 1e-27, 1e-26, 1e-25, 1e-24, 1e-23, 1e-22, 1e-21, 1e-20, 1e-19, 1e-18, 1e-17,
+ 1e-16, 1e-15, 1e-14, 1e-13, 1e-12, 1e-11, 1e-10, 1e-9, 1e-8, 1e-7, 1e-6, 1e-5,
+ 1e-4, 1e-3, 1e-2, 1e-1, 1e0, 1e1, 1e2, 1e3, 1e4, 1e5, 1e6, 1e7,
+ 1e8, 1e9, 1e10, 1e11, 1e12, 1e13, 1e14, 1e15, 1e16, 1e17, 1e18, 1e19,
+ 1e20, 1e21, 1e22, 1e23, 1e24, 1e25, 1e26, 1e27, 1e28, 1e29, 1e30, 1e31,
+ 1e32, 1e33, 1e34, 1e35, 1e36, 1e37, 1e38, 1e39, 1e40, 1e41, 1e42, 1e43,
+ 1e44, 1e45, 1e46, 1e47, 1e48, 1e49, 1e50, 1e51, 1e52, 1e53, 1e54, 1e55,
+ 1e56, 1e57, 1e58, 1e59, 1e60, 1e61, 1e62, 1e63, 1e64, 1e65, 1e66, 1e67,
+ 1e68, 1e69, 1e70, 1e71, 1e72, 1e73, 1e74, 1e75, 1e76,
+ }
+)
diff --git a/vendor/github.com/apache/arrow/go/v14/arrow/doc.go b/vendor/github.com/apache/arrow/go/v14/arrow/doc.go
new file mode 100644
index 000000000..e923d05d6
--- /dev/null
+++ b/vendor/github.com/apache/arrow/go/v14/arrow/doc.go
@@ -0,0 +1,48 @@
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements. See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership. The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License. You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+/*
+Package arrow provides an implementation of Apache Arrow.
+
+Apache Arrow is a cross-language development platform for in-memory data. It specifies a standardized
+language-independent columnar memory format for flat and hierarchical data, organized for efficient analytic
+operations on modern hardware. It also provides computational libraries and zero-copy streaming
+messaging and inter-process communication.
+
+# Basics
+
+The fundamental data structure in Arrow is an Array, which holds a sequence of values of the same type. An array
+consists of memory holding the data and an additional validity bitmap that indicates if the corresponding entry in the
+array is valid (not null). If the array has no null entries, it is possible to omit this bitmap.
+
+# Requirements
+
+Despite the go.mod stating go1.20, everything is able to be built with go1.19 or higher.
+
+To build with tinygo include the noasm build tag.
+*/
+package arrow
+
+const PkgVersion = "14.0.2"
+
+//go:generate go run _tools/tmpl/main.go -i -data=numeric.tmpldata type_traits_numeric.gen.go.tmpl type_traits_numeric.gen_test.go.tmpl array/numeric.gen.go.tmpl array/numericbuilder.gen.go.tmpl array/bufferbuilder_numeric.gen.go.tmpl
+//go:generate go run _tools/tmpl/main.go -i -data=datatype_numeric.gen.go.tmpldata datatype_numeric.gen.go.tmpl tensor/numeric.gen.go.tmpl tensor/numeric.gen_test.go.tmpl
+//go:generate go run _tools/tmpl/main.go -i -data=scalar/numeric.gen.go.tmpldata scalar/numeric.gen.go.tmpl scalar/numeric.gen_test.go.tmpl
+//go:generate go run ./gen-flatbuffers.go
+
+// stringer
+//go:generate stringer -type=Type
+//go:generate stringer -type=UnionMode -linecomment
diff --git a/vendor/github.com/apache/arrow/go/v14/arrow/encoded/ree_utils.go b/vendor/github.com/apache/arrow/go/v14/arrow/encoded/ree_utils.go
new file mode 100644
index 000000000..1f71e7b52
--- /dev/null
+++ b/vendor/github.com/apache/arrow/go/v14/arrow/encoded/ree_utils.go
@@ -0,0 +1,219 @@
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements. See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership. The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License. You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package encoded
+
+import (
+ "math"
+ "sort"
+
+ "github.com/apache/arrow/go/v14/arrow"
+)
+
+// FindPhysicalIndex performs a binary search on the run-ends to return
+// the appropriate physical offset into the values/run-ends that corresponds
+// with the logical index provided when called. If the array's logical offset
+// is provided, this is equivalent to calling FindPhysicalOffset.
+//
+// For example, an array with run-ends [10, 20, 30, 40, 50] and a logicalIdx
+// of 25 will return the value 2. This returns the smallest offset
+// whose run-end is greater than the logicalIdx requested, which would
+// also be the index into the values that contains the correct value.
+//
+// This function assumes it receives Run End Encoded array data
+func FindPhysicalIndex(arr arrow.ArrayData, logicalIdx int) int {
+ data := arr.Children()[0]
+ if data.Len() == 0 {
+ return 0
+ }
+
+ switch data.DataType().ID() {
+ case arrow.INT16:
+ runEnds := arrow.Int16Traits.CastFromBytes(data.Buffers()[1].Bytes())
+ runEnds = runEnds[data.Offset() : data.Offset()+data.Len()]
+ return sort.Search(len(runEnds), func(i int) bool { return runEnds[i] > int16(logicalIdx) })
+ case arrow.INT32:
+ runEnds := arrow.Int32Traits.CastFromBytes(data.Buffers()[1].Bytes())
+ runEnds = runEnds[data.Offset() : data.Offset()+data.Len()]
+ return sort.Search(len(runEnds), func(i int) bool { return runEnds[i] > int32(logicalIdx) })
+ case arrow.INT64:
+ runEnds := arrow.Int64Traits.CastFromBytes(data.Buffers()[1].Bytes())
+ runEnds = runEnds[data.Offset() : data.Offset()+data.Len()]
+ return sort.Search(len(runEnds), func(i int) bool { return runEnds[i] > int64(logicalIdx) })
+ default:
+ panic("only int16, int32, and int64 are allowed for the run-ends")
+ }
+}
+
+// FindPhysicalOffset performs a binary search on the run-ends to return
+// the appropriate physical offset into the values/run-ends that corresponds
+// with the logical offset defined in the array.
+//
+// For example, an array with run-ends [10, 20, 30, 40, 50] and a logical
+// offset of 25 will return the value 2. This returns the smallest offset
+// whose run-end is greater than the logical offset, which would also be the
+// offset index into the values that contains the correct value.
+//
+// This function assumes it receives Run End Encoded array data
+func FindPhysicalOffset(arr arrow.ArrayData) int {
+ return FindPhysicalIndex(arr, arr.Offset())
+}
+
+// GetPhysicalLength returns the physical number of values which are in
+// the passed in RunEndEncoded array data. This will take into account
+// the offset and length of the array as reported in the array data
+// (so that it properly handles slices).
+//
+// This function assumes it receives Run End Encoded array data
+func GetPhysicalLength(arr arrow.ArrayData) int {
+ if arr.Len() == 0 {
+ return 0
+ }
+
+ data := arr.Children()[0]
+ physicalOffset := FindPhysicalOffset(arr)
+ start, length := data.Offset()+physicalOffset, data.Len()-physicalOffset
+ offset := arr.Offset() + arr.Len() - 1
+
+ switch data.DataType().ID() {
+ case arrow.INT16:
+ runEnds := arrow.Int16Traits.CastFromBytes(data.Buffers()[1].Bytes())
+ runEnds = runEnds[start : start+length]
+ return sort.Search(len(runEnds), func(i int) bool { return runEnds[i] > int16(offset) }) + 1
+ case arrow.INT32:
+ runEnds := arrow.Int32Traits.CastFromBytes(data.Buffers()[1].Bytes())
+ runEnds = runEnds[start : start+length]
+ return sort.Search(len(runEnds), func(i int) bool { return runEnds[i] > int32(offset) }) + 1
+ case arrow.INT64:
+ runEnds := arrow.Int64Traits.CastFromBytes(data.Buffers()[1].Bytes())
+ runEnds = runEnds[start : start+length]
+ return sort.Search(len(runEnds), func(i int) bool { return runEnds[i] > int64(offset) }) + 1
+ default:
+ panic("arrow/rle: can only get rle.PhysicalLength for int16/int32/int64 run ends array")
+ }
+}
+
+func getRunEnds(arr arrow.ArrayData) func(int64) int64 {
+ switch arr.DataType().ID() {
+ case arrow.INT16:
+ runEnds := arrow.Int16Traits.CastFromBytes(arr.Buffers()[1].Bytes())
+ runEnds = runEnds[arr.Offset() : arr.Offset()+arr.Len()]
+ return func(i int64) int64 { return int64(runEnds[i]) }
+ case arrow.INT32:
+ runEnds := arrow.Int32Traits.CastFromBytes(arr.Buffers()[1].Bytes())
+ runEnds = runEnds[arr.Offset() : arr.Offset()+arr.Len()]
+ return func(i int64) int64 { return int64(runEnds[i]) }
+ case arrow.INT64:
+ runEnds := arrow.Int64Traits.CastFromBytes(arr.Buffers()[1].Bytes())
+ runEnds = runEnds[arr.Offset() : arr.Offset()+arr.Len()]
+ return func(i int64) int64 { return int64(runEnds[i]) }
+ default:
+ panic("only int16, int32, and int64 are allowed for the run-ends")
+ }
+}
+
+// MergedRuns is used to take two Run End Encoded arrays and iterate
+// them, finding the correct physical indices to correspond with the
+// runs.
+type MergedRuns struct {
+ inputs [2]arrow.Array
+ runIndex [2]int64
+ inputRunEnds [2]func(int64) int64
+ runEnds [2]int64
+ logicalLen int
+ logicalPos int
+ mergedEnd int64
+}
+
+// NewMergedRuns takes two RunEndEncoded arrays and returns a MergedRuns
+// object that will allow iterating over the physical indices of the runs.
+func NewMergedRuns(inputs [2]arrow.Array) *MergedRuns {
+ if len(inputs) == 0 {
+ return &MergedRuns{logicalLen: 0}
+ }
+
+ mr := &MergedRuns{inputs: inputs, logicalLen: inputs[0].Len()}
+ for i, in := range inputs {
+ if in.DataType().ID() != arrow.RUN_END_ENCODED {
+ panic("arrow/rle: NewMergedRuns can only be called with RunLengthEncoded arrays")
+ }
+ if in.Len() != mr.logicalLen {
+ panic("arrow/rle: can only merge runs of RLE arrays of the same length")
+ }
+
+ mr.inputRunEnds[i] = getRunEnds(in.Data().Children()[0])
+ // initialize the runIndex at the physical offset - 1 so the first
+ // call to Next will increment it to the correct initial offset
+ // since the initial state is logicalPos == 0 and mergedEnd == 0
+ mr.runIndex[i] = int64(FindPhysicalOffset(in.Data())) - 1
+ }
+
+ return mr
+}
+
+// Next returns true if there are more values/runs to iterate and false
+// when one of the arrays has reached the end.
+func (mr *MergedRuns) Next() bool {
+ mr.logicalPos = int(mr.mergedEnd)
+ if mr.isEnd() {
+ return false
+ }
+
+ for i := range mr.inputs {
+ if mr.logicalPos == int(mr.runEnds[i]) {
+ mr.runIndex[i]++
+ }
+ }
+ mr.findMergedRun()
+
+ return true
+}
+
+// IndexIntoBuffer returns the physical index into the value buffer of
+// the passed in array index (ie: 0 for the first array and 1 for the second)
+// this takes into account the offset of the array so it is the true physical
+// index into the value *buffer* in the child.
+func (mr *MergedRuns) IndexIntoBuffer(id int) int64 {
+ return mr.runIndex[id] + int64(mr.inputs[id].Data().Children()[1].Offset())
+}
+
+// IndexIntoArray is like IndexIntoBuffer but it doesn't take into account
+// the array offset and instead is the index that can be used with the .Value
+// method on the array to get the correct value.
+func (mr *MergedRuns) IndexIntoArray(id int) int64 { return mr.runIndex[id] }
+
+// RunLength returns the logical length of the current merged run being looked at.
+func (mr *MergedRuns) RunLength() int64 { return mr.mergedEnd - int64(mr.logicalPos) }
+
+// AccumulatedRunLength returns the logical run end of the current merged run.
+func (mr *MergedRuns) AccumulatedRunLength() int64 { return mr.mergedEnd }
+
+func (mr *MergedRuns) findMergedRun() {
+ mr.mergedEnd = int64(math.MaxInt64)
+ for i, in := range mr.inputs {
+ // logical indices of the end of the run we are currently in each input
+ mr.runEnds[i] = int64(mr.inputRunEnds[i](mr.runIndex[i]) - int64(in.Data().Offset()))
+ // the logical length may end in the middle of a run, in case the array was sliced
+ if mr.logicalLen < int(mr.runEnds[i]) {
+ mr.runEnds[i] = int64(mr.logicalLen)
+ }
+ if mr.runEnds[i] < mr.mergedEnd {
+ mr.mergedEnd = mr.runEnds[i]
+ }
+ }
+}
+
+func (mr *MergedRuns) isEnd() bool { return mr.logicalPos == mr.logicalLen }
diff --git a/vendor/github.com/apache/arrow/go/v14/arrow/endian/big.go b/vendor/github.com/apache/arrow/go/v14/arrow/endian/big.go
new file mode 100644
index 000000000..0b9258574
--- /dev/null
+++ b/vendor/github.com/apache/arrow/go/v14/arrow/endian/big.go
@@ -0,0 +1,30 @@
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements. See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership. The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License. You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+//go:build s390x
+// +build s390x
+
+package endian
+
+import "encoding/binary"
+
+var Native = binary.BigEndian
+
+const (
+ IsBigEndian = true
+ NativeEndian = BigEndian
+ NonNativeEndian = LittleEndian
+)
diff --git a/vendor/github.com/apache/arrow/go/v14/arrow/endian/endian.go b/vendor/github.com/apache/arrow/go/v14/arrow/endian/endian.go
new file mode 100644
index 000000000..3ecda7b36
--- /dev/null
+++ b/vendor/github.com/apache/arrow/go/v14/arrow/endian/endian.go
@@ -0,0 +1,41 @@
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements. See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership. The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License. You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package endian
+
+import (
+ "github.com/apache/arrow/go/v14/arrow/internal/debug"
+ "github.com/apache/arrow/go/v14/arrow/internal/flatbuf"
+)
+
+type Endianness flatbuf.Endianness
+
+const (
+ LittleEndian Endianness = Endianness(flatbuf.EndiannessLittle)
+ BigEndian Endianness = Endianness(flatbuf.EndiannessBig)
+)
+
+func (e Endianness) String() string {
+ switch e {
+ case LittleEndian:
+ return "little"
+ case BigEndian:
+ return "big"
+ default:
+ debug.Assert(false, "wtf? bad endianness value")
+ return "???"
+ }
+}
diff --git a/vendor/github.com/apache/arrow/go/v14/arrow/endian/little.go b/vendor/github.com/apache/arrow/go/v14/arrow/endian/little.go
new file mode 100644
index 000000000..def1fc64b
--- /dev/null
+++ b/vendor/github.com/apache/arrow/go/v14/arrow/endian/little.go
@@ -0,0 +1,30 @@
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements. See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership. The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License. You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+//go:build !s390x
+// +build !s390x
+
+package endian
+
+import "encoding/binary"
+
+var Native = binary.LittleEndian
+
+const (
+ IsBigEndian = false
+ NativeEndian = LittleEndian
+ NonNativeEndian = BigEndian
+)
diff --git a/vendor/github.com/apache/arrow/go/v14/arrow/errors.go b/vendor/github.com/apache/arrow/go/v14/arrow/errors.go
new file mode 100644
index 000000000..72e6fd8bf
--- /dev/null
+++ b/vendor/github.com/apache/arrow/go/v14/arrow/errors.go
@@ -0,0 +1,28 @@
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements. See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership. The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License. You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package arrow
+
+import "errors"
+
+var (
+ ErrInvalid = errors.New("invalid")
+ ErrNotImplemented = errors.New("not implemented")
+ ErrType = errors.New("type error")
+ ErrKey = errors.New("key error")
+ ErrIndex = errors.New("index error")
+ ErrNotFound = errors.New("not found")
+)
diff --git a/vendor/github.com/apache/arrow/go/v14/arrow/float16/float16.go b/vendor/github.com/apache/arrow/go/v14/arrow/float16/float16.go
new file mode 100644
index 000000000..4e03d13df
--- /dev/null
+++ b/vendor/github.com/apache/arrow/go/v14/arrow/float16/float16.go
@@ -0,0 +1,165 @@
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements. See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership. The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License. You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package float16
+
+import (
+ "math"
+ "strconv"
+)
+
+// Num represents a half-precision floating point value (float16)
+// stored on 16 bits.
+//
+// See https://en.wikipedia.org/wiki/Half-precision_floating-point_format for more informations.
+type Num struct {
+ bits uint16
+}
+
+// New creates a new half-precision floating point value from the provided
+// float32 value.
+func New(f float32) Num {
+ b := math.Float32bits(f)
+ sn := uint16((b >> 31) & 0x1)
+ exp := (b >> 23) & 0xff
+ res := int16(exp) - 127 + 15
+ fc := uint16(b>>13) & 0x3ff
+ switch {
+ case exp == 0:
+ res = 0
+ case exp == 0xff:
+ res = 0x1f
+ case res > 0x1e:
+ res = 0x1f
+ fc = 0
+ case res < 0x01:
+ res = 0
+ fc = 0
+ }
+ return Num{bits: (sn << 15) | uint16(res<<10) | fc}
+}
+
+func (f Num) Float32() float32 {
+ sn := uint32((f.bits >> 15) & 0x1)
+ exp := (f.bits >> 10) & 0x1f
+ res := uint32(exp) + 127 - 15
+ fc := uint32(f.bits & 0x3ff)
+ switch {
+ case exp == 0:
+ res = 0
+ case exp == 0x1f:
+ res = 0xff
+ }
+ return math.Float32frombits((sn << 31) | (res << 23) | (fc << 13))
+}
+
+func (n Num) Negate() Num {
+ return Num{bits: n.bits ^ 0x8000}
+}
+
+func (n Num) Add(rhs Num) Num {
+ return New(n.Float32() + rhs.Float32())
+}
+
+func (n Num) Sub(rhs Num) Num {
+ return New(n.Float32() - rhs.Float32())
+}
+
+func (n Num) Mul(rhs Num) Num {
+ return New(n.Float32() * rhs.Float32())
+}
+
+func (n Num) Div(rhs Num) Num {
+ return New(n.Float32() / rhs.Float32())
+}
+
+// Greater returns true if the value represented by n is > other
+func (n Num) Greater(other Num) bool {
+ return n.Float32() > other.Float32()
+}
+
+// GreaterEqual returns true if the value represented by n is >= other
+func (n Num) GreaterEqual(other Num) bool {
+ return n.Float32() >= other.Float32()
+}
+
+// Less returns true if the value represented by n is < other
+func (n Num) Less(other Num) bool {
+ return n.Float32() < other.Float32()
+}
+
+// LessEqual returns true if the value represented by n is <= other
+func (n Num) LessEqual(other Num) bool {
+ return n.Float32() <= other.Float32()
+}
+
+// Max returns the largest Decimal128 that was passed in the arguments
+func Max(first Num, rest ...Num) Num {
+ answer := first
+ for _, number := range rest {
+ if number.Greater(answer) {
+ answer = number
+ }
+ }
+ return answer
+}
+
+// Min returns the smallest Decimal128 that was passed in the arguments
+func Min(first Num, rest ...Num) Num {
+ answer := first
+ for _, number := range rest {
+ if number.Less(answer) {
+ answer = number
+ }
+ }
+ return answer
+}
+
+// Cmp compares the numbers represented by n and other and returns:
+//
+// +1 if n > other
+// 0 if n == other
+// -1 if n < other
+func (n Num) Cmp(other Num) int {
+ switch {
+ case n.Greater(other):
+ return 1
+ case n.Less(other):
+ return -1
+ }
+ return 0
+}
+
+func (n Num) Abs() Num {
+ switch n.Sign() {
+ case -1:
+ return n.Negate()
+ }
+ return n
+}
+
+func (n Num) Sign() int {
+ f := n.Float32()
+ if f > 0 {
+ return 1
+ } else if f == 0 {
+ return 0
+ }
+ return -1
+}
+
+func (f Num) Uint16() uint16 { return f.bits }
+func (f Num) String() string { return strconv.FormatFloat(float64(f.Float32()), 'g', -1, 32) }
diff --git a/vendor/github.com/apache/arrow/go/v14/arrow/internal/debug/assert_off.go b/vendor/github.com/apache/arrow/go/v14/arrow/internal/debug/assert_off.go
new file mode 100644
index 000000000..52b9a2331
--- /dev/null
+++ b/vendor/github.com/apache/arrow/go/v14/arrow/internal/debug/assert_off.go
@@ -0,0 +1,24 @@
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements. See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership. The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License. You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// +build !assert
+
+package debug
+
+// Assert will panic with msg if cond is false.
+//
+// msg must be a string, func() string or fmt.Stringer.
+func Assert(cond bool, msg interface{}) {}
diff --git a/vendor/github.com/apache/arrow/go/v14/arrow/internal/debug/assert_on.go b/vendor/github.com/apache/arrow/go/v14/arrow/internal/debug/assert_on.go
new file mode 100644
index 000000000..2aa5d6ace
--- /dev/null
+++ b/vendor/github.com/apache/arrow/go/v14/arrow/internal/debug/assert_on.go
@@ -0,0 +1,28 @@
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements. See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership. The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License. You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// +build assert
+
+package debug
+
+// Assert will panic with msg if cond is false.
+//
+// msg must be a string, func() string or fmt.Stringer.
+func Assert(cond bool, msg interface{}) {
+ if !cond {
+ panic(getStringValue(msg))
+ }
+}
diff --git a/vendor/github.com/apache/arrow/go/v14/arrow/internal/debug/doc.go b/vendor/github.com/apache/arrow/go/v14/arrow/internal/debug/doc.go
new file mode 100644
index 000000000..3ee1783ca
--- /dev/null
+++ b/vendor/github.com/apache/arrow/go/v14/arrow/internal/debug/doc.go
@@ -0,0 +1,32 @@
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements. See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership. The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License. You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+/*
+Package debug provides APIs for conditional runtime assertions and debug logging.
+
+
+Using Assert
+
+To enable runtime assertions, build with the assert tag. When the assert tag is omitted,
+the code for the assertion will be omitted from the binary.
+
+
+Using Log
+
+To enable runtime debug logs, build with the debug tag. When the debug tag is omitted,
+the code for logging will be omitted from the binary.
+*/
+package debug
diff --git a/vendor/github.com/apache/arrow/go/v14/arrow/internal/debug/log_off.go b/vendor/github.com/apache/arrow/go/v14/arrow/internal/debug/log_off.go
new file mode 100644
index 000000000..48da8e1ee
--- /dev/null
+++ b/vendor/github.com/apache/arrow/go/v14/arrow/internal/debug/log_off.go
@@ -0,0 +1,21 @@
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements. See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership. The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License. You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// +build !debug
+
+package debug
+
+func Log(interface{}) {}
diff --git a/vendor/github.com/apache/arrow/go/v14/arrow/internal/debug/log_on.go b/vendor/github.com/apache/arrow/go/v14/arrow/internal/debug/log_on.go
new file mode 100644
index 000000000..99d0c8ae3
--- /dev/null
+++ b/vendor/github.com/apache/arrow/go/v14/arrow/internal/debug/log_on.go
@@ -0,0 +1,32 @@
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements. See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership. The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License. You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// +build debug
+
+package debug
+
+import (
+ "log"
+ "os"
+)
+
+var (
+ debug = log.New(os.Stderr, "[D] ", log.LstdFlags)
+)
+
+func Log(msg interface{}) {
+ debug.Output(1, getStringValue(msg))
+}
diff --git a/vendor/github.com/apache/arrow/go/v14/arrow/internal/debug/util.go b/vendor/github.com/apache/arrow/go/v14/arrow/internal/debug/util.go
new file mode 100644
index 000000000..7bd3d5389
--- /dev/null
+++ b/vendor/github.com/apache/arrow/go/v14/arrow/internal/debug/util.go
@@ -0,0 +1,37 @@
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements. See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership. The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License. You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// +build debug assert
+
+package debug
+
+import "fmt"
+
+func getStringValue(v interface{}) string {
+ switch a := v.(type) {
+ case func() string:
+ return a()
+
+ case string:
+ return a
+
+ case fmt.Stringer:
+ return a.String()
+
+ default:
+ panic(fmt.Sprintf("unexpected type, %t", v))
+ }
+}
diff --git a/vendor/github.com/apache/arrow/go/v14/arrow/internal/dictutils/dict.go b/vendor/github.com/apache/arrow/go/v14/arrow/internal/dictutils/dict.go
new file mode 100644
index 000000000..e09a2f4a0
--- /dev/null
+++ b/vendor/github.com/apache/arrow/go/v14/arrow/internal/dictutils/dict.go
@@ -0,0 +1,406 @@
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements. See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership. The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License. You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package dictutils
+
+import (
+ "errors"
+ "fmt"
+ "hash/maphash"
+
+ "github.com/apache/arrow/go/v14/arrow"
+ "github.com/apache/arrow/go/v14/arrow/array"
+ "github.com/apache/arrow/go/v14/arrow/memory"
+)
+
+type Kind int8
+
+const (
+ KindNew Kind = iota
+ KindDelta
+ KindReplacement
+)
+
+type FieldPos struct {
+ parent *FieldPos
+ index, depth int32
+}
+
+func NewFieldPos() FieldPos { return FieldPos{index: -1} }
+
+func (f *FieldPos) Child(index int32) FieldPos {
+ return FieldPos{parent: f, index: index, depth: f.depth + 1}
+}
+
+func (f *FieldPos) Path() []int32 {
+ path := make([]int32, f.depth)
+ cur := f
+ for i := f.depth - 1; i >= 0; i-- {
+ path[i] = int32(cur.index)
+ cur = cur.parent
+ }
+ return path
+}
+
+type Mapper struct {
+ pathToID map[uint64]int64
+ hasher maphash.Hash
+}
+
+func (d *Mapper) NumDicts() int {
+ unique := make(map[int64]bool)
+ for _, id := range d.pathToID {
+ unique[id] = true
+ }
+ return len(unique)
+}
+
+func (d *Mapper) AddField(id int64, fieldPath []int32) error {
+ d.hasher.Write(arrow.Int32Traits.CastToBytes(fieldPath))
+ defer d.hasher.Reset()
+
+ sum := d.hasher.Sum64()
+ if _, ok := d.pathToID[sum]; ok {
+ return errors.New("field already mapped to id")
+ }
+
+ d.pathToID[sum] = id
+ return nil
+}
+
+func (d *Mapper) GetFieldID(fieldPath []int32) (int64, error) {
+ d.hasher.Write(arrow.Int32Traits.CastToBytes(fieldPath))
+ defer d.hasher.Reset()
+
+ id, ok := d.pathToID[d.hasher.Sum64()]
+ if !ok {
+ return -1, errors.New("arrow/ipc: dictionary field not found")
+ }
+ return id, nil
+}
+
+func (d *Mapper) NumFields() int {
+ return len(d.pathToID)
+}
+
+func (d *Mapper) InsertPath(pos FieldPos) {
+ id := len(d.pathToID)
+ d.hasher.Write(arrow.Int32Traits.CastToBytes(pos.Path()))
+
+ d.pathToID[d.hasher.Sum64()] = int64(id)
+ d.hasher.Reset()
+}
+
+func (d *Mapper) ImportField(pos FieldPos, field *arrow.Field) {
+ dt := field.Type
+ if dt.ID() == arrow.EXTENSION {
+ dt = dt.(arrow.ExtensionType).StorageType()
+ }
+
+ if dt.ID() == arrow.DICTIONARY {
+ d.InsertPath(pos)
+ // import nested dicts
+ if nested, ok := dt.(*arrow.DictionaryType).ValueType.(arrow.NestedType); ok {
+ d.ImportFields(pos, nested.Fields())
+ }
+ return
+ }
+
+ if nested, ok := dt.(arrow.NestedType); ok {
+ d.ImportFields(pos, nested.Fields())
+ }
+}
+
+func (d *Mapper) ImportFields(pos FieldPos, fields []arrow.Field) {
+ for i := range fields {
+ d.ImportField(pos.Child(int32(i)), &fields[i])
+ }
+}
+
+func (d *Mapper) ImportSchema(schema *arrow.Schema) {
+ d.pathToID = make(map[uint64]int64)
+ d.ImportFields(NewFieldPos(), schema.Fields())
+}
+
+func hasUnresolvedNestedDict(data arrow.ArrayData) bool {
+ d := data.(*array.Data)
+ if d.DataType().ID() == arrow.DICTIONARY {
+ if d.Dictionary().(*array.Data) == nil {
+ return true
+ }
+ if hasUnresolvedNestedDict(d.Dictionary()) {
+ return true
+ }
+ }
+ for _, c := range d.Children() {
+ if hasUnresolvedNestedDict(c) {
+ return true
+ }
+ }
+ return false
+}
+
+type dictpair struct {
+ ID int64
+ Dict arrow.Array
+}
+
+type dictCollector struct {
+ dictionaries []dictpair
+ mapper *Mapper
+}
+
+func (d *dictCollector) visitChildren(pos FieldPos, typ arrow.DataType, arr arrow.Array) error {
+ for i, c := range arr.Data().Children() {
+ child := array.MakeFromData(c)
+ defer child.Release()
+ if err := d.visit(pos.Child(int32(i)), child); err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+func (d *dictCollector) visit(pos FieldPos, arr arrow.Array) error {
+ dt := arr.DataType()
+ if dt.ID() == arrow.EXTENSION {
+ dt = dt.(arrow.ExtensionType).StorageType()
+ arr = arr.(array.ExtensionArray).Storage()
+ }
+
+ if dt.ID() == arrow.DICTIONARY {
+ dictarr := arr.(*array.Dictionary)
+ dict := dictarr.Dictionary()
+
+ // traverse the dictionary to first gather any nested dictionaries
+ // so they appear in the output before their respective parents
+ dictType := dt.(*arrow.DictionaryType)
+ d.visitChildren(pos, dictType.ValueType, dict)
+
+ id, err := d.mapper.GetFieldID(pos.Path())
+ if err != nil {
+ return err
+ }
+ dict.Retain()
+ d.dictionaries = append(d.dictionaries, dictpair{ID: id, Dict: dict})
+ return nil
+ }
+ return d.visitChildren(pos, dt, arr)
+}
+
+func (d *dictCollector) collect(batch arrow.Record) error {
+ var (
+ pos = NewFieldPos()
+ schema = batch.Schema()
+ )
+ d.dictionaries = make([]dictpair, 0, d.mapper.NumFields())
+ for i := range schema.Fields() {
+ if err := d.visit(pos.Child(int32(i)), batch.Column(i)); err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+type dictMap map[int64][]arrow.ArrayData
+type dictTypeMap map[int64]arrow.DataType
+
+type Memo struct {
+ Mapper Mapper
+ dict2id map[arrow.ArrayData]int64
+
+ id2type dictTypeMap
+ id2dict dictMap // map of dictionary ID to dictionary array
+}
+
+func NewMemo() Memo {
+ return Memo{
+ dict2id: make(map[arrow.ArrayData]int64),
+ id2dict: make(dictMap),
+ id2type: make(dictTypeMap),
+ Mapper: Mapper{
+ pathToID: make(map[uint64]int64),
+ },
+ }
+}
+
+func (memo *Memo) Len() int { return len(memo.id2dict) }
+
+func (memo *Memo) Clear() {
+ for id, v := range memo.id2dict {
+ delete(memo.id2dict, id)
+ for _, d := range v {
+ delete(memo.dict2id, d)
+ d.Release()
+ }
+ }
+}
+
+func (memo *Memo) reify(id int64, mem memory.Allocator) (arrow.ArrayData, error) {
+ v, ok := memo.id2dict[id]
+ if !ok {
+ return nil, fmt.Errorf("arrow/ipc: no dictionaries found for id=%d", id)
+ }
+
+ if len(v) == 1 {
+ return v[0], nil
+ }
+
+ // there are deltas we need to concatenate them with the first dictionary
+ toCombine := make([]arrow.Array, 0, len(v))
+ // NOTE: at this point the dictionary data may not be trusted. it needs to
+ // be validated as concatenation can crash on invalid or corrupted data.
+ for _, data := range v {
+ if hasUnresolvedNestedDict(data) {
+ return nil, fmt.Errorf("arrow/ipc: delta dict with unresolved nested dictionary not implemented")
+ }
+ arr := array.MakeFromData(data)
+ defer arr.Release()
+
+ toCombine = append(toCombine, arr)
+ defer data.Release()
+ }
+
+ combined, err := array.Concatenate(toCombine, mem)
+ if err != nil {
+ return nil, err
+ }
+ defer combined.Release()
+ combined.Data().Retain()
+
+ memo.id2dict[id] = []arrow.ArrayData{combined.Data()}
+ return combined.Data(), nil
+}
+
+func (memo *Memo) Dict(id int64, mem memory.Allocator) (arrow.ArrayData, error) {
+ return memo.reify(id, mem)
+}
+
+func (memo *Memo) AddType(id int64, typ arrow.DataType) error {
+ if existing, dup := memo.id2type[id]; dup && !arrow.TypeEqual(existing, typ) {
+ return fmt.Errorf("arrow/ipc: conflicting dictionary types for id %d", id)
+ }
+
+ memo.id2type[id] = typ
+ return nil
+}
+
+func (memo *Memo) Type(id int64) (arrow.DataType, bool) {
+ t, ok := memo.id2type[id]
+ return t, ok
+}
+
+// func (memo *dictMemo) ID(v arrow.Array) int64 {
+// id, ok := memo.dict2id[v]
+// if ok {
+// return id
+// }
+
+// v.Retain()
+// id = int64(len(memo.dict2id))
+// memo.dict2id[v] = id
+// memo.id2dict[id] = v
+// return id
+// }
+
+func (memo Memo) HasDict(v arrow.ArrayData) bool {
+ _, ok := memo.dict2id[v]
+ return ok
+}
+
+func (memo Memo) HasID(id int64) bool {
+ _, ok := memo.id2dict[id]
+ return ok
+}
+
+func (memo *Memo) Add(id int64, v arrow.ArrayData) {
+ if _, dup := memo.id2dict[id]; dup {
+ panic(fmt.Errorf("arrow/ipc: duplicate id=%d", id))
+ }
+ v.Retain()
+ memo.id2dict[id] = []arrow.ArrayData{v}
+ memo.dict2id[v] = id
+}
+
+func (memo *Memo) AddDelta(id int64, v arrow.ArrayData) {
+ d, ok := memo.id2dict[id]
+ if !ok {
+ panic(fmt.Errorf("arrow/ipc: adding delta to non-existing id=%d", id))
+ }
+ v.Retain()
+ memo.id2dict[id] = append(d, v)
+}
+
+// AddOrReplace puts the provided dictionary into the memo table. If it
+// already exists, then the new data will replace it. Otherwise it is added
+// to the memo table.
+func (memo *Memo) AddOrReplace(id int64, v arrow.ArrayData) bool {
+ d, ok := memo.id2dict[id]
+ if ok {
+ // replace the dictionary and release any existing ones
+ for _, dict := range d {
+ dict.Release()
+ }
+ d[0] = v
+ d = d[:1]
+ } else {
+ d = []arrow.ArrayData{v}
+ }
+ v.Retain()
+ memo.id2dict[id] = d
+ return !ok
+}
+
+func CollectDictionaries(batch arrow.Record, mapper *Mapper) (out []dictpair, err error) {
+ collector := dictCollector{mapper: mapper}
+ err = collector.collect(batch)
+ out = collector.dictionaries
+ return
+}
+
+func ResolveFieldDict(memo *Memo, data arrow.ArrayData, pos FieldPos, mem memory.Allocator) error {
+ typ := data.DataType()
+ if typ.ID() == arrow.EXTENSION {
+ typ = typ.(arrow.ExtensionType).StorageType()
+ }
+ if typ.ID() == arrow.DICTIONARY {
+ id, err := memo.Mapper.GetFieldID(pos.Path())
+ if err != nil {
+ return err
+ }
+ dictData, err := memo.Dict(id, mem)
+ if err != nil {
+ return err
+ }
+ data.(*array.Data).SetDictionary(dictData)
+ if err := ResolveFieldDict(memo, dictData, pos, mem); err != nil {
+ return err
+ }
+ }
+ return ResolveDictionaries(memo, data.Children(), pos, mem)
+}
+
+func ResolveDictionaries(memo *Memo, cols []arrow.ArrayData, parentPos FieldPos, mem memory.Allocator) error {
+ for i, c := range cols {
+ if c == nil {
+ continue
+ }
+ if err := ResolveFieldDict(memo, c, parentPos.Child(int32(i)), mem); err != nil {
+ return err
+ }
+ }
+ return nil
+}
diff --git a/vendor/github.com/apache/arrow/go/v14/arrow/internal/flatbuf/Binary.go b/vendor/github.com/apache/arrow/go/v14/arrow/internal/flatbuf/Binary.go
new file mode 100644
index 000000000..e8018e74c
--- /dev/null
+++ b/vendor/github.com/apache/arrow/go/v14/arrow/internal/flatbuf/Binary.go
@@ -0,0 +1,51 @@
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements. See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership. The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License. You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Code generated by the FlatBuffers compiler. DO NOT EDIT.
+
+package flatbuf
+
+import (
+ flatbuffers "github.com/google/flatbuffers/go"
+)
+
+/// Opaque binary data
+type Binary struct {
+ _tab flatbuffers.Table
+}
+
+func GetRootAsBinary(buf []byte, offset flatbuffers.UOffsetT) *Binary {
+ n := flatbuffers.GetUOffsetT(buf[offset:])
+ x := &Binary{}
+ x.Init(buf, n+offset)
+ return x
+}
+
+func (rcv *Binary) Init(buf []byte, i flatbuffers.UOffsetT) {
+ rcv._tab.Bytes = buf
+ rcv._tab.Pos = i
+}
+
+func (rcv *Binary) Table() flatbuffers.Table {
+ return rcv._tab
+}
+
+func BinaryStart(builder *flatbuffers.Builder) {
+ builder.StartObject(0)
+}
+func BinaryEnd(builder *flatbuffers.Builder) flatbuffers.UOffsetT {
+ return builder.EndObject()
+}
diff --git a/vendor/github.com/apache/arrow/go/v14/arrow/internal/flatbuf/BinaryView.go b/vendor/github.com/apache/arrow/go/v14/arrow/internal/flatbuf/BinaryView.go
new file mode 100644
index 000000000..09ca5e7db
--- /dev/null
+++ b/vendor/github.com/apache/arrow/go/v14/arrow/internal/flatbuf/BinaryView.go
@@ -0,0 +1,57 @@
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements. See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership. The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License. You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Code generated by the FlatBuffers compiler. DO NOT EDIT.
+
+package flatbuf
+
+import (
+ flatbuffers "github.com/google/flatbuffers/go"
+)
+
+/// Logically the same as Binary, but the internal representation uses a view
+/// struct that contains the string length and either the string's entire data
+/// inline (for small strings) or an inlined prefix, an index of another buffer,
+/// and an offset pointing to a slice in that buffer (for non-small strings).
+///
+/// Since it uses a variable number of data buffers, each Field with this type
+/// must have a corresponding entry in `variadicBufferCounts`.
+type BinaryView struct {
+ _tab flatbuffers.Table
+}
+
+func GetRootAsBinaryView(buf []byte, offset flatbuffers.UOffsetT) *BinaryView {
+ n := flatbuffers.GetUOffsetT(buf[offset:])
+ x := &BinaryView{}
+ x.Init(buf, n+offset)
+ return x
+}
+
+func (rcv *BinaryView) Init(buf []byte, i flatbuffers.UOffsetT) {
+ rcv._tab.Bytes = buf
+ rcv._tab.Pos = i
+}
+
+func (rcv *BinaryView) Table() flatbuffers.Table {
+ return rcv._tab
+}
+
+func BinaryViewStart(builder *flatbuffers.Builder) {
+ builder.StartObject(0)
+}
+func BinaryViewEnd(builder *flatbuffers.Builder) flatbuffers.UOffsetT {
+ return builder.EndObject()
+}
diff --git a/vendor/github.com/apache/arrow/go/v14/arrow/internal/flatbuf/Block.go b/vendor/github.com/apache/arrow/go/v14/arrow/internal/flatbuf/Block.go
new file mode 100644
index 000000000..57a697b19
--- /dev/null
+++ b/vendor/github.com/apache/arrow/go/v14/arrow/internal/flatbuf/Block.go
@@ -0,0 +1,74 @@
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements. See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership. The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License. You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Code generated by the FlatBuffers compiler. DO NOT EDIT.
+
+package flatbuf
+
+import (
+ flatbuffers "github.com/google/flatbuffers/go"
+)
+
+type Block struct {
+ _tab flatbuffers.Struct
+}
+
+func (rcv *Block) Init(buf []byte, i flatbuffers.UOffsetT) {
+ rcv._tab.Bytes = buf
+ rcv._tab.Pos = i
+}
+
+func (rcv *Block) Table() flatbuffers.Table {
+ return rcv._tab.Table
+}
+
+/// Index to the start of the RecordBlock (note this is past the Message header)
+func (rcv *Block) Offset() int64 {
+ return rcv._tab.GetInt64(rcv._tab.Pos + flatbuffers.UOffsetT(0))
+}
+/// Index to the start of the RecordBlock (note this is past the Message header)
+func (rcv *Block) MutateOffset(n int64) bool {
+ return rcv._tab.MutateInt64(rcv._tab.Pos+flatbuffers.UOffsetT(0), n)
+}
+
+/// Length of the metadata
+func (rcv *Block) MetaDataLength() int32 {
+ return rcv._tab.GetInt32(rcv._tab.Pos + flatbuffers.UOffsetT(8))
+}
+/// Length of the metadata
+func (rcv *Block) MutateMetaDataLength(n int32) bool {
+ return rcv._tab.MutateInt32(rcv._tab.Pos+flatbuffers.UOffsetT(8), n)
+}
+
+/// Length of the data (this is aligned so there can be a gap between this and
+/// the metadata).
+func (rcv *Block) BodyLength() int64 {
+ return rcv._tab.GetInt64(rcv._tab.Pos + flatbuffers.UOffsetT(16))
+}
+/// Length of the data (this is aligned so there can be a gap between this and
+/// the metadata).
+func (rcv *Block) MutateBodyLength(n int64) bool {
+ return rcv._tab.MutateInt64(rcv._tab.Pos+flatbuffers.UOffsetT(16), n)
+}
+
+func CreateBlock(builder *flatbuffers.Builder, offset int64, metaDataLength int32, bodyLength int64) flatbuffers.UOffsetT {
+ builder.Prep(8, 24)
+ builder.PrependInt64(bodyLength)
+ builder.Pad(4)
+ builder.PrependInt32(metaDataLength)
+ builder.PrependInt64(offset)
+ return builder.Offset()
+}
diff --git a/vendor/github.com/apache/arrow/go/v14/arrow/internal/flatbuf/BodyCompression.go b/vendor/github.com/apache/arrow/go/v14/arrow/internal/flatbuf/BodyCompression.go
new file mode 100644
index 000000000..6468e2313
--- /dev/null
+++ b/vendor/github.com/apache/arrow/go/v14/arrow/internal/flatbuf/BodyCompression.go
@@ -0,0 +1,89 @@
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements. See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership. The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License. You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Code generated by the FlatBuffers compiler. DO NOT EDIT.
+
+package flatbuf
+
+import (
+ flatbuffers "github.com/google/flatbuffers/go"
+)
+
+/// Optional compression for the memory buffers constituting IPC message
+/// bodies. Intended for use with RecordBatch but could be used for other
+/// message types
+type BodyCompression struct {
+ _tab flatbuffers.Table
+}
+
+func GetRootAsBodyCompression(buf []byte, offset flatbuffers.UOffsetT) *BodyCompression {
+ n := flatbuffers.GetUOffsetT(buf[offset:])
+ x := &BodyCompression{}
+ x.Init(buf, n+offset)
+ return x
+}
+
+func (rcv *BodyCompression) Init(buf []byte, i flatbuffers.UOffsetT) {
+ rcv._tab.Bytes = buf
+ rcv._tab.Pos = i
+}
+
+func (rcv *BodyCompression) Table() flatbuffers.Table {
+ return rcv._tab
+}
+
+/// Compressor library.
+/// For LZ4_FRAME, each compressed buffer must consist of a single frame.
+func (rcv *BodyCompression) Codec() CompressionType {
+ o := flatbuffers.UOffsetT(rcv._tab.Offset(4))
+ if o != 0 {
+ return CompressionType(rcv._tab.GetInt8(o + rcv._tab.Pos))
+ }
+ return 0
+}
+
+/// Compressor library.
+/// For LZ4_FRAME, each compressed buffer must consist of a single frame.
+func (rcv *BodyCompression) MutateCodec(n CompressionType) bool {
+ return rcv._tab.MutateInt8Slot(4, int8(n))
+}
+
+/// Indicates the way the record batch body was compressed
+func (rcv *BodyCompression) Method() BodyCompressionMethod {
+ o := flatbuffers.UOffsetT(rcv._tab.Offset(6))
+ if o != 0 {
+ return BodyCompressionMethod(rcv._tab.GetInt8(o + rcv._tab.Pos))
+ }
+ return 0
+}
+
+/// Indicates the way the record batch body was compressed
+func (rcv *BodyCompression) MutateMethod(n BodyCompressionMethod) bool {
+ return rcv._tab.MutateInt8Slot(6, int8(n))
+}
+
+func BodyCompressionStart(builder *flatbuffers.Builder) {
+ builder.StartObject(2)
+}
+func BodyCompressionAddCodec(builder *flatbuffers.Builder, codec CompressionType) {
+ builder.PrependInt8Slot(0, int8(codec), 0)
+}
+func BodyCompressionAddMethod(builder *flatbuffers.Builder, method BodyCompressionMethod) {
+ builder.PrependInt8Slot(1, int8(method), 0)
+}
+func BodyCompressionEnd(builder *flatbuffers.Builder) flatbuffers.UOffsetT {
+ return builder.EndObject()
+}
diff --git a/vendor/github.com/apache/arrow/go/v14/arrow/internal/flatbuf/BodyCompressionMethod.go b/vendor/github.com/apache/arrow/go/v14/arrow/internal/flatbuf/BodyCompressionMethod.go
new file mode 100644
index 000000000..108ab3e07
--- /dev/null
+++ b/vendor/github.com/apache/arrow/go/v14/arrow/internal/flatbuf/BodyCompressionMethod.go
@@ -0,0 +1,52 @@
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements. See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership. The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License. You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Code generated by the FlatBuffers compiler. DO NOT EDIT.
+
+package flatbuf
+
+import "strconv"
+
+/// Provided for forward compatibility in case we need to support different
+/// strategies for compressing the IPC message body (like whole-body
+/// compression rather than buffer-level) in the future
+type BodyCompressionMethod int8
+
+const (
+ /// Each constituent buffer is first compressed with the indicated
+ /// compressor, and then written with the uncompressed length in the first 8
+ /// bytes as a 64-bit little-endian signed integer followed by the compressed
+ /// buffer bytes (and then padding as required by the protocol). The
+ /// uncompressed length may be set to -1 to indicate that the data that
+ /// follows is not compressed, which can be useful for cases where
+ /// compression does not yield appreciable savings.
+ BodyCompressionMethodBUFFER BodyCompressionMethod = 0
+)
+
+var EnumNamesBodyCompressionMethod = map[BodyCompressionMethod]string{
+ BodyCompressionMethodBUFFER: "BUFFER",
+}
+
+var EnumValuesBodyCompressionMethod = map[string]BodyCompressionMethod{
+ "BUFFER": BodyCompressionMethodBUFFER,
+}
+
+func (v BodyCompressionMethod) String() string {
+ if s, ok := EnumNamesBodyCompressionMethod[v]; ok {
+ return s
+ }
+ return "BodyCompressionMethod(" + strconv.FormatInt(int64(v), 10) + ")"
+}
diff --git a/vendor/github.com/apache/arrow/go/v14/arrow/internal/flatbuf/Bool.go b/vendor/github.com/apache/arrow/go/v14/arrow/internal/flatbuf/Bool.go
new file mode 100644
index 000000000..6a4a9d268
--- /dev/null
+++ b/vendor/github.com/apache/arrow/go/v14/arrow/internal/flatbuf/Bool.go
@@ -0,0 +1,50 @@
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements. See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership. The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License. You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Code generated by the FlatBuffers compiler. DO NOT EDIT.
+
+package flatbuf
+
+import (
+ flatbuffers "github.com/google/flatbuffers/go"
+)
+
+type Bool struct {
+ _tab flatbuffers.Table
+}
+
+func GetRootAsBool(buf []byte, offset flatbuffers.UOffsetT) *Bool {
+ n := flatbuffers.GetUOffsetT(buf[offset:])
+ x := &Bool{}
+ x.Init(buf, n+offset)
+ return x
+}
+
+func (rcv *Bool) Init(buf []byte, i flatbuffers.UOffsetT) {
+ rcv._tab.Bytes = buf
+ rcv._tab.Pos = i
+}
+
+func (rcv *Bool) Table() flatbuffers.Table {
+ return rcv._tab
+}
+
+func BoolStart(builder *flatbuffers.Builder) {
+ builder.StartObject(0)
+}
+func BoolEnd(builder *flatbuffers.Builder) flatbuffers.UOffsetT {
+ return builder.EndObject()
+}
diff --git a/vendor/github.com/apache/arrow/go/v14/arrow/internal/flatbuf/Buffer.go b/vendor/github.com/apache/arrow/go/v14/arrow/internal/flatbuf/Buffer.go
new file mode 100644
index 000000000..eba8d99b2
--- /dev/null
+++ b/vendor/github.com/apache/arrow/go/v14/arrow/internal/flatbuf/Buffer.go
@@ -0,0 +1,73 @@
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements. See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership. The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License. You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Code generated by the FlatBuffers compiler. DO NOT EDIT.
+
+package flatbuf
+
+import (
+ flatbuffers "github.com/google/flatbuffers/go"
+)
+
+/// ----------------------------------------------------------------------
+/// A Buffer represents a single contiguous memory segment
+type Buffer struct {
+ _tab flatbuffers.Struct
+}
+
+func (rcv *Buffer) Init(buf []byte, i flatbuffers.UOffsetT) {
+ rcv._tab.Bytes = buf
+ rcv._tab.Pos = i
+}
+
+func (rcv *Buffer) Table() flatbuffers.Table {
+ return rcv._tab.Table
+}
+
+/// The relative offset into the shared memory page where the bytes for this
+/// buffer starts
+func (rcv *Buffer) Offset() int64 {
+ return rcv._tab.GetInt64(rcv._tab.Pos + flatbuffers.UOffsetT(0))
+}
+/// The relative offset into the shared memory page where the bytes for this
+/// buffer starts
+func (rcv *Buffer) MutateOffset(n int64) bool {
+ return rcv._tab.MutateInt64(rcv._tab.Pos+flatbuffers.UOffsetT(0), n)
+}
+
+/// The absolute length (in bytes) of the memory buffer. The memory is found
+/// from offset (inclusive) to offset + length (non-inclusive). When building
+/// messages using the encapsulated IPC message, padding bytes may be written
+/// after a buffer, but such padding bytes do not need to be accounted for in
+/// the size here.
+func (rcv *Buffer) Length() int64 {
+ return rcv._tab.GetInt64(rcv._tab.Pos + flatbuffers.UOffsetT(8))
+}
+/// The absolute length (in bytes) of the memory buffer. The memory is found
+/// from offset (inclusive) to offset + length (non-inclusive). When building
+/// messages using the encapsulated IPC message, padding bytes may be written
+/// after a buffer, but such padding bytes do not need to be accounted for in
+/// the size here.
+func (rcv *Buffer) MutateLength(n int64) bool {
+ return rcv._tab.MutateInt64(rcv._tab.Pos+flatbuffers.UOffsetT(8), n)
+}
+
+func CreateBuffer(builder *flatbuffers.Builder, offset int64, length int64) flatbuffers.UOffsetT {
+ builder.Prep(8, 16)
+ builder.PrependInt64(length)
+ builder.PrependInt64(offset)
+ return builder.Offset()
+}
diff --git a/vendor/github.com/apache/arrow/go/v14/arrow/internal/flatbuf/CompressionType.go b/vendor/github.com/apache/arrow/go/v14/arrow/internal/flatbuf/CompressionType.go
new file mode 100644
index 000000000..96e9df072
--- /dev/null
+++ b/vendor/github.com/apache/arrow/go/v14/arrow/internal/flatbuf/CompressionType.go
@@ -0,0 +1,45 @@
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements. See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership. The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License. You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Code generated by the FlatBuffers compiler. DO NOT EDIT.
+
+package flatbuf
+
+import "strconv"
+
+type CompressionType int8
+
+const (
+ CompressionTypeLZ4_FRAME CompressionType = 0
+ CompressionTypeZSTD CompressionType = 1
+)
+
+var EnumNamesCompressionType = map[CompressionType]string{
+ CompressionTypeLZ4_FRAME: "LZ4_FRAME",
+ CompressionTypeZSTD: "ZSTD",
+}
+
+var EnumValuesCompressionType = map[string]CompressionType{
+ "LZ4_FRAME": CompressionTypeLZ4_FRAME,
+ "ZSTD": CompressionTypeZSTD,
+}
+
+func (v CompressionType) String() string {
+ if s, ok := EnumNamesCompressionType[v]; ok {
+ return s
+ }
+ return "CompressionType(" + strconv.FormatInt(int64(v), 10) + ")"
+}
diff --git a/vendor/github.com/apache/arrow/go/v14/arrow/internal/flatbuf/Date.go b/vendor/github.com/apache/arrow/go/v14/arrow/internal/flatbuf/Date.go
new file mode 100644
index 000000000..32983ec54
--- /dev/null
+++ b/vendor/github.com/apache/arrow/go/v14/arrow/internal/flatbuf/Date.go
@@ -0,0 +1,71 @@
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements. See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership. The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License. You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Code generated by the FlatBuffers compiler. DO NOT EDIT.
+
+package flatbuf
+
+import (
+ flatbuffers "github.com/google/flatbuffers/go"
+)
+
+/// Date is either a 32-bit or 64-bit signed integer type representing an
+/// elapsed time since UNIX epoch (1970-01-01), stored in either of two units:
+///
+/// * Milliseconds (64 bits) indicating UNIX time elapsed since the epoch (no
+/// leap seconds), where the values are evenly divisible by 86400000
+/// * Days (32 bits) since the UNIX epoch
+type Date struct {
+ _tab flatbuffers.Table
+}
+
+func GetRootAsDate(buf []byte, offset flatbuffers.UOffsetT) *Date {
+ n := flatbuffers.GetUOffsetT(buf[offset:])
+ x := &Date{}
+ x.Init(buf, n+offset)
+ return x
+}
+
+func (rcv *Date) Init(buf []byte, i flatbuffers.UOffsetT) {
+ rcv._tab.Bytes = buf
+ rcv._tab.Pos = i
+}
+
+func (rcv *Date) Table() flatbuffers.Table {
+ return rcv._tab
+}
+
+func (rcv *Date) Unit() DateUnit {
+ o := flatbuffers.UOffsetT(rcv._tab.Offset(4))
+ if o != 0 {
+ return DateUnit(rcv._tab.GetInt16(o + rcv._tab.Pos))
+ }
+ return 1
+}
+
+func (rcv *Date) MutateUnit(n DateUnit) bool {
+ return rcv._tab.MutateInt16Slot(4, int16(n))
+}
+
+func DateStart(builder *flatbuffers.Builder) {
+ builder.StartObject(1)
+}
+func DateAddUnit(builder *flatbuffers.Builder, unit DateUnit) {
+ builder.PrependInt16Slot(0, int16(unit), 1)
+}
+func DateEnd(builder *flatbuffers.Builder) flatbuffers.UOffsetT {
+ return builder.EndObject()
+}
diff --git a/vendor/github.com/apache/arrow/go/v14/arrow/internal/flatbuf/DateUnit.go b/vendor/github.com/apache/arrow/go/v14/arrow/internal/flatbuf/DateUnit.go
new file mode 100644
index 000000000..8a12eec17
--- /dev/null
+++ b/vendor/github.com/apache/arrow/go/v14/arrow/internal/flatbuf/DateUnit.go
@@ -0,0 +1,45 @@
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements. See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership. The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License. You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Code generated by the FlatBuffers compiler. DO NOT EDIT.
+
+package flatbuf
+
+import "strconv"
+
+type DateUnit int16
+
+const (
+ DateUnitDAY DateUnit = 0
+ DateUnitMILLISECOND DateUnit = 1
+)
+
+var EnumNamesDateUnit = map[DateUnit]string{
+ DateUnitDAY: "DAY",
+ DateUnitMILLISECOND: "MILLISECOND",
+}
+
+var EnumValuesDateUnit = map[string]DateUnit{
+ "DAY": DateUnitDAY,
+ "MILLISECOND": DateUnitMILLISECOND,
+}
+
+func (v DateUnit) String() string {
+ if s, ok := EnumNamesDateUnit[v]; ok {
+ return s
+ }
+ return "DateUnit(" + strconv.FormatInt(int64(v), 10) + ")"
+}
diff --git a/vendor/github.com/apache/arrow/go/v14/arrow/internal/flatbuf/Decimal.go b/vendor/github.com/apache/arrow/go/v14/arrow/internal/flatbuf/Decimal.go
new file mode 100644
index 000000000..c9de254d1
--- /dev/null
+++ b/vendor/github.com/apache/arrow/go/v14/arrow/internal/flatbuf/Decimal.go
@@ -0,0 +1,107 @@
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements. See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership. The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License. You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Code generated by the FlatBuffers compiler. DO NOT EDIT.
+
+package flatbuf
+
+import (
+ flatbuffers "github.com/google/flatbuffers/go"
+)
+
+/// Exact decimal value represented as an integer value in two's
+/// complement. Currently only 128-bit (16-byte) and 256-bit (32-byte) integers
+/// are used. The representation uses the endianness indicated
+/// in the Schema.
+type Decimal struct {
+ _tab flatbuffers.Table
+}
+
+func GetRootAsDecimal(buf []byte, offset flatbuffers.UOffsetT) *Decimal {
+ n := flatbuffers.GetUOffsetT(buf[offset:])
+ x := &Decimal{}
+ x.Init(buf, n+offset)
+ return x
+}
+
+func (rcv *Decimal) Init(buf []byte, i flatbuffers.UOffsetT) {
+ rcv._tab.Bytes = buf
+ rcv._tab.Pos = i
+}
+
+func (rcv *Decimal) Table() flatbuffers.Table {
+ return rcv._tab
+}
+
+/// Total number of decimal digits
+func (rcv *Decimal) Precision() int32 {
+ o := flatbuffers.UOffsetT(rcv._tab.Offset(4))
+ if o != 0 {
+ return rcv._tab.GetInt32(o + rcv._tab.Pos)
+ }
+ return 0
+}
+
+/// Total number of decimal digits
+func (rcv *Decimal) MutatePrecision(n int32) bool {
+ return rcv._tab.MutateInt32Slot(4, n)
+}
+
+/// Number of digits after the decimal point "."
+func (rcv *Decimal) Scale() int32 {
+ o := flatbuffers.UOffsetT(rcv._tab.Offset(6))
+ if o != 0 {
+ return rcv._tab.GetInt32(o + rcv._tab.Pos)
+ }
+ return 0
+}
+
+/// Number of digits after the decimal point "."
+func (rcv *Decimal) MutateScale(n int32) bool {
+ return rcv._tab.MutateInt32Slot(6, n)
+}
+
+/// Number of bits per value. The only accepted widths are 128 and 256.
+/// We use bitWidth for consistency with Int::bitWidth.
+func (rcv *Decimal) BitWidth() int32 {
+ o := flatbuffers.UOffsetT(rcv._tab.Offset(8))
+ if o != 0 {
+ return rcv._tab.GetInt32(o + rcv._tab.Pos)
+ }
+ return 128
+}
+
+/// Number of bits per value. The only accepted widths are 128 and 256.
+/// We use bitWidth for consistency with Int::bitWidth.
+func (rcv *Decimal) MutateBitWidth(n int32) bool {
+ return rcv._tab.MutateInt32Slot(8, n)
+}
+
+func DecimalStart(builder *flatbuffers.Builder) {
+ builder.StartObject(3)
+}
+func DecimalAddPrecision(builder *flatbuffers.Builder, precision int32) {
+ builder.PrependInt32Slot(0, precision, 0)
+}
+func DecimalAddScale(builder *flatbuffers.Builder, scale int32) {
+ builder.PrependInt32Slot(1, scale, 0)
+}
+func DecimalAddBitWidth(builder *flatbuffers.Builder, bitWidth int32) {
+ builder.PrependInt32Slot(2, bitWidth, 128)
+}
+func DecimalEnd(builder *flatbuffers.Builder) flatbuffers.UOffsetT {
+ return builder.EndObject()
+}
diff --git a/vendor/github.com/apache/arrow/go/v14/arrow/internal/flatbuf/DictionaryBatch.go b/vendor/github.com/apache/arrow/go/v14/arrow/internal/flatbuf/DictionaryBatch.go
new file mode 100644
index 000000000..25b5384e4
--- /dev/null
+++ b/vendor/github.com/apache/arrow/go/v14/arrow/internal/flatbuf/DictionaryBatch.go
@@ -0,0 +1,108 @@
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements. See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership. The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License. You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Code generated by the FlatBuffers compiler. DO NOT EDIT.
+
+package flatbuf
+
+import (
+ flatbuffers "github.com/google/flatbuffers/go"
+)
+
+/// For sending dictionary encoding information. Any Field can be
+/// dictionary-encoded, but in this case none of its children may be
+/// dictionary-encoded.
+/// There is one vector / column per dictionary, but that vector / column
+/// may be spread across multiple dictionary batches by using the isDelta
+/// flag
+type DictionaryBatch struct {
+ _tab flatbuffers.Table
+}
+
+func GetRootAsDictionaryBatch(buf []byte, offset flatbuffers.UOffsetT) *DictionaryBatch {
+ n := flatbuffers.GetUOffsetT(buf[offset:])
+ x := &DictionaryBatch{}
+ x.Init(buf, n+offset)
+ return x
+}
+
+func (rcv *DictionaryBatch) Init(buf []byte, i flatbuffers.UOffsetT) {
+ rcv._tab.Bytes = buf
+ rcv._tab.Pos = i
+}
+
+func (rcv *DictionaryBatch) Table() flatbuffers.Table {
+ return rcv._tab
+}
+
+func (rcv *DictionaryBatch) Id() int64 {
+ o := flatbuffers.UOffsetT(rcv._tab.Offset(4))
+ if o != 0 {
+ return rcv._tab.GetInt64(o + rcv._tab.Pos)
+ }
+ return 0
+}
+
+func (rcv *DictionaryBatch) MutateId(n int64) bool {
+ return rcv._tab.MutateInt64Slot(4, n)
+}
+
+func (rcv *DictionaryBatch) Data(obj *RecordBatch) *RecordBatch {
+ o := flatbuffers.UOffsetT(rcv._tab.Offset(6))
+ if o != 0 {
+ x := rcv._tab.Indirect(o + rcv._tab.Pos)
+ if obj == nil {
+ obj = new(RecordBatch)
+ }
+ obj.Init(rcv._tab.Bytes, x)
+ return obj
+ }
+ return nil
+}
+
+/// If isDelta is true the values in the dictionary are to be appended to a
+/// dictionary with the indicated id. If isDelta is false this dictionary
+/// should replace the existing dictionary.
+func (rcv *DictionaryBatch) IsDelta() bool {
+ o := flatbuffers.UOffsetT(rcv._tab.Offset(8))
+ if o != 0 {
+ return rcv._tab.GetBool(o + rcv._tab.Pos)
+ }
+ return false
+}
+
+/// If isDelta is true the values in the dictionary are to be appended to a
+/// dictionary with the indicated id. If isDelta is false this dictionary
+/// should replace the existing dictionary.
+func (rcv *DictionaryBatch) MutateIsDelta(n bool) bool {
+ return rcv._tab.MutateBoolSlot(8, n)
+}
+
+func DictionaryBatchStart(builder *flatbuffers.Builder) {
+ builder.StartObject(3)
+}
+func DictionaryBatchAddId(builder *flatbuffers.Builder, id int64) {
+ builder.PrependInt64Slot(0, id, 0)
+}
+func DictionaryBatchAddData(builder *flatbuffers.Builder, data flatbuffers.UOffsetT) {
+ builder.PrependUOffsetTSlot(1, flatbuffers.UOffsetT(data), 0)
+}
+func DictionaryBatchAddIsDelta(builder *flatbuffers.Builder, isDelta bool) {
+ builder.PrependBoolSlot(2, isDelta, false)
+}
+func DictionaryBatchEnd(builder *flatbuffers.Builder) flatbuffers.UOffsetT {
+ return builder.EndObject()
+}
diff --git a/vendor/github.com/apache/arrow/go/v14/arrow/internal/flatbuf/DictionaryEncoding.go b/vendor/github.com/apache/arrow/go/v14/arrow/internal/flatbuf/DictionaryEncoding.go
new file mode 100644
index 000000000..a9b09530b
--- /dev/null
+++ b/vendor/github.com/apache/arrow/go/v14/arrow/internal/flatbuf/DictionaryEncoding.go
@@ -0,0 +1,135 @@
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements. See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership. The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License. You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Code generated by the FlatBuffers compiler. DO NOT EDIT.
+
+package flatbuf
+
+import (
+ flatbuffers "github.com/google/flatbuffers/go"
+)
+
+type DictionaryEncoding struct {
+ _tab flatbuffers.Table
+}
+
+func GetRootAsDictionaryEncoding(buf []byte, offset flatbuffers.UOffsetT) *DictionaryEncoding {
+ n := flatbuffers.GetUOffsetT(buf[offset:])
+ x := &DictionaryEncoding{}
+ x.Init(buf, n+offset)
+ return x
+}
+
+func (rcv *DictionaryEncoding) Init(buf []byte, i flatbuffers.UOffsetT) {
+ rcv._tab.Bytes = buf
+ rcv._tab.Pos = i
+}
+
+func (rcv *DictionaryEncoding) Table() flatbuffers.Table {
+ return rcv._tab
+}
+
+/// The known dictionary id in the application where this data is used. In
+/// the file or streaming formats, the dictionary ids are found in the
+/// DictionaryBatch messages
+func (rcv *DictionaryEncoding) Id() int64 {
+ o := flatbuffers.UOffsetT(rcv._tab.Offset(4))
+ if o != 0 {
+ return rcv._tab.GetInt64(o + rcv._tab.Pos)
+ }
+ return 0
+}
+
+/// The known dictionary id in the application where this data is used. In
+/// the file or streaming formats, the dictionary ids are found in the
+/// DictionaryBatch messages
+func (rcv *DictionaryEncoding) MutateId(n int64) bool {
+ return rcv._tab.MutateInt64Slot(4, n)
+}
+
+/// The dictionary indices are constrained to be non-negative integers. If
+/// this field is null, the indices must be signed int32. To maximize
+/// cross-language compatibility and performance, implementations are
+/// recommended to prefer signed integer types over unsigned integer types
+/// and to avoid uint64 indices unless they are required by an application.
+func (rcv *DictionaryEncoding) IndexType(obj *Int) *Int {
+ o := flatbuffers.UOffsetT(rcv._tab.Offset(6))
+ if o != 0 {
+ x := rcv._tab.Indirect(o + rcv._tab.Pos)
+ if obj == nil {
+ obj = new(Int)
+ }
+ obj.Init(rcv._tab.Bytes, x)
+ return obj
+ }
+ return nil
+}
+
+/// The dictionary indices are constrained to be non-negative integers. If
+/// this field is null, the indices must be signed int32. To maximize
+/// cross-language compatibility and performance, implementations are
+/// recommended to prefer signed integer types over unsigned integer types
+/// and to avoid uint64 indices unless they are required by an application.
+/// By default, dictionaries are not ordered, or the order does not have
+/// semantic meaning. In some statistical, applications, dictionary-encoding
+/// is used to represent ordered categorical data, and we provide a way to
+/// preserve that metadata here
+func (rcv *DictionaryEncoding) IsOrdered() bool {
+ o := flatbuffers.UOffsetT(rcv._tab.Offset(8))
+ if o != 0 {
+ return rcv._tab.GetBool(o + rcv._tab.Pos)
+ }
+ return false
+}
+
+/// By default, dictionaries are not ordered, or the order does not have
+/// semantic meaning. In some statistical, applications, dictionary-encoding
+/// is used to represent ordered categorical data, and we provide a way to
+/// preserve that metadata here
+func (rcv *DictionaryEncoding) MutateIsOrdered(n bool) bool {
+ return rcv._tab.MutateBoolSlot(8, n)
+}
+
+func (rcv *DictionaryEncoding) DictionaryKind() DictionaryKind {
+ o := flatbuffers.UOffsetT(rcv._tab.Offset(10))
+ if o != 0 {
+ return DictionaryKind(rcv._tab.GetInt16(o + rcv._tab.Pos))
+ }
+ return 0
+}
+
+func (rcv *DictionaryEncoding) MutateDictionaryKind(n DictionaryKind) bool {
+ return rcv._tab.MutateInt16Slot(10, int16(n))
+}
+
+func DictionaryEncodingStart(builder *flatbuffers.Builder) {
+ builder.StartObject(4)
+}
+func DictionaryEncodingAddId(builder *flatbuffers.Builder, id int64) {
+ builder.PrependInt64Slot(0, id, 0)
+}
+func DictionaryEncodingAddIndexType(builder *flatbuffers.Builder, indexType flatbuffers.UOffsetT) {
+ builder.PrependUOffsetTSlot(1, flatbuffers.UOffsetT(indexType), 0)
+}
+func DictionaryEncodingAddIsOrdered(builder *flatbuffers.Builder, isOrdered bool) {
+ builder.PrependBoolSlot(2, isOrdered, false)
+}
+func DictionaryEncodingAddDictionaryKind(builder *flatbuffers.Builder, dictionaryKind DictionaryKind) {
+ builder.PrependInt16Slot(3, int16(dictionaryKind), 0)
+}
+func DictionaryEncodingEnd(builder *flatbuffers.Builder) flatbuffers.UOffsetT {
+ return builder.EndObject()
+}
diff --git a/vendor/github.com/apache/arrow/go/v14/arrow/internal/flatbuf/DictionaryKind.go b/vendor/github.com/apache/arrow/go/v14/arrow/internal/flatbuf/DictionaryKind.go
new file mode 100644
index 000000000..126ba5f7f
--- /dev/null
+++ b/vendor/github.com/apache/arrow/go/v14/arrow/internal/flatbuf/DictionaryKind.go
@@ -0,0 +1,47 @@
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements. See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership. The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License. You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Code generated by the FlatBuffers compiler. DO NOT EDIT.
+
+package flatbuf
+
+import "strconv"
+
+/// ----------------------------------------------------------------------
+/// Dictionary encoding metadata
+/// Maintained for forwards compatibility, in the future
+/// Dictionaries might be explicit maps between integers and values
+/// allowing for non-contiguous index values
+type DictionaryKind int16
+
+const (
+ DictionaryKindDenseArray DictionaryKind = 0
+)
+
+var EnumNamesDictionaryKind = map[DictionaryKind]string{
+ DictionaryKindDenseArray: "DenseArray",
+}
+
+var EnumValuesDictionaryKind = map[string]DictionaryKind{
+ "DenseArray": DictionaryKindDenseArray,
+}
+
+func (v DictionaryKind) String() string {
+ if s, ok := EnumNamesDictionaryKind[v]; ok {
+ return s
+ }
+ return "DictionaryKind(" + strconv.FormatInt(int64(v), 10) + ")"
+}
diff --git a/vendor/github.com/apache/arrow/go/v14/arrow/internal/flatbuf/Duration.go b/vendor/github.com/apache/arrow/go/v14/arrow/internal/flatbuf/Duration.go
new file mode 100644
index 000000000..57b7b2a03
--- /dev/null
+++ b/vendor/github.com/apache/arrow/go/v14/arrow/internal/flatbuf/Duration.go
@@ -0,0 +1,65 @@
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements. See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership. The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License. You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Code generated by the FlatBuffers compiler. DO NOT EDIT.
+
+package flatbuf
+
+import (
+ flatbuffers "github.com/google/flatbuffers/go"
+)
+
+type Duration struct {
+ _tab flatbuffers.Table
+}
+
+func GetRootAsDuration(buf []byte, offset flatbuffers.UOffsetT) *Duration {
+ n := flatbuffers.GetUOffsetT(buf[offset:])
+ x := &Duration{}
+ x.Init(buf, n+offset)
+ return x
+}
+
+func (rcv *Duration) Init(buf []byte, i flatbuffers.UOffsetT) {
+ rcv._tab.Bytes = buf
+ rcv._tab.Pos = i
+}
+
+func (rcv *Duration) Table() flatbuffers.Table {
+ return rcv._tab
+}
+
+func (rcv *Duration) Unit() TimeUnit {
+ o := flatbuffers.UOffsetT(rcv._tab.Offset(4))
+ if o != 0 {
+ return TimeUnit(rcv._tab.GetInt16(o + rcv._tab.Pos))
+ }
+ return 1
+}
+
+func (rcv *Duration) MutateUnit(n TimeUnit) bool {
+ return rcv._tab.MutateInt16Slot(4, int16(n))
+}
+
+func DurationStart(builder *flatbuffers.Builder) {
+ builder.StartObject(1)
+}
+func DurationAddUnit(builder *flatbuffers.Builder, unit TimeUnit) {
+ builder.PrependInt16Slot(0, int16(unit), 1)
+}
+func DurationEnd(builder *flatbuffers.Builder) flatbuffers.UOffsetT {
+ return builder.EndObject()
+}
diff --git a/vendor/github.com/apache/arrow/go/v14/arrow/internal/flatbuf/Endianness.go b/vendor/github.com/apache/arrow/go/v14/arrow/internal/flatbuf/Endianness.go
new file mode 100644
index 000000000..cefa2ff9c
--- /dev/null
+++ b/vendor/github.com/apache/arrow/go/v14/arrow/internal/flatbuf/Endianness.go
@@ -0,0 +1,47 @@
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements. See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership. The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License. You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Code generated by the FlatBuffers compiler. DO NOT EDIT.
+
+package flatbuf
+
+import "strconv"
+
+/// ----------------------------------------------------------------------
+/// Endianness of the platform producing the data
+type Endianness int16
+
+const (
+ EndiannessLittle Endianness = 0
+ EndiannessBig Endianness = 1
+)
+
+var EnumNamesEndianness = map[Endianness]string{
+ EndiannessLittle: "Little",
+ EndiannessBig: "Big",
+}
+
+var EnumValuesEndianness = map[string]Endianness{
+ "Little": EndiannessLittle,
+ "Big": EndiannessBig,
+}
+
+func (v Endianness) String() string {
+ if s, ok := EnumNamesEndianness[v]; ok {
+ return s
+ }
+ return "Endianness(" + strconv.FormatInt(int64(v), 10) + ")"
+}
diff --git a/vendor/github.com/apache/arrow/go/v14/arrow/internal/flatbuf/Feature.go b/vendor/github.com/apache/arrow/go/v14/arrow/internal/flatbuf/Feature.go
new file mode 100644
index 000000000..ae5a0398b
--- /dev/null
+++ b/vendor/github.com/apache/arrow/go/v14/arrow/internal/flatbuf/Feature.go
@@ -0,0 +1,71 @@
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements. See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership. The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License. You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Code generated by the FlatBuffers compiler. DO NOT EDIT.
+
+package flatbuf
+
+import "strconv"
+
+/// Represents Arrow Features that might not have full support
+/// within implementations. This is intended to be used in
+/// two scenarios:
+/// 1. A mechanism for readers of Arrow Streams
+/// and files to understand that the stream or file makes
+/// use of a feature that isn't supported or unknown to
+/// the implementation (and therefore can meet the Arrow
+/// forward compatibility guarantees).
+/// 2. A means of negotiating between a client and server
+/// what features a stream is allowed to use. The enums
+/// values here are intented to represent higher level
+/// features, additional details maybe negotiated
+/// with key-value pairs specific to the protocol.
+///
+/// Enums added to this list should be assigned power-of-two values
+/// to facilitate exchanging and comparing bitmaps for supported
+/// features.
+type Feature int64
+
+const (
+ /// Needed to make flatbuffers happy.
+ FeatureUNUSED Feature = 0
+ /// The stream makes use of multiple full dictionaries with the
+ /// same ID and assumes clients implement dictionary replacement
+ /// correctly.
+ FeatureDICTIONARY_REPLACEMENT Feature = 1
+ /// The stream makes use of compressed bodies as described
+ /// in Message.fbs.
+ FeatureCOMPRESSED_BODY Feature = 2
+)
+
+var EnumNamesFeature = map[Feature]string{
+ FeatureUNUSED: "UNUSED",
+ FeatureDICTIONARY_REPLACEMENT: "DICTIONARY_REPLACEMENT",
+ FeatureCOMPRESSED_BODY: "COMPRESSED_BODY",
+}
+
+var EnumValuesFeature = map[string]Feature{
+ "UNUSED": FeatureUNUSED,
+ "DICTIONARY_REPLACEMENT": FeatureDICTIONARY_REPLACEMENT,
+ "COMPRESSED_BODY": FeatureCOMPRESSED_BODY,
+}
+
+func (v Feature) String() string {
+ if s, ok := EnumNamesFeature[v]; ok {
+ return s
+ }
+ return "Feature(" + strconv.FormatInt(int64(v), 10) + ")"
+}
diff --git a/vendor/github.com/apache/arrow/go/v14/arrow/internal/flatbuf/Field.go b/vendor/github.com/apache/arrow/go/v14/arrow/internal/flatbuf/Field.go
new file mode 100644
index 000000000..c03cf2f87
--- /dev/null
+++ b/vendor/github.com/apache/arrow/go/v14/arrow/internal/flatbuf/Field.go
@@ -0,0 +1,188 @@
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements. See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership. The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License. You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Code generated by the FlatBuffers compiler. DO NOT EDIT.
+
+package flatbuf
+
+import (
+ flatbuffers "github.com/google/flatbuffers/go"
+)
+
+/// ----------------------------------------------------------------------
+/// A field represents a named column in a record / row batch or child of a
+/// nested type.
+type Field struct {
+ _tab flatbuffers.Table
+}
+
+func GetRootAsField(buf []byte, offset flatbuffers.UOffsetT) *Field {
+ n := flatbuffers.GetUOffsetT(buf[offset:])
+ x := &Field{}
+ x.Init(buf, n+offset)
+ return x
+}
+
+func (rcv *Field) Init(buf []byte, i flatbuffers.UOffsetT) {
+ rcv._tab.Bytes = buf
+ rcv._tab.Pos = i
+}
+
+func (rcv *Field) Table() flatbuffers.Table {
+ return rcv._tab
+}
+
+/// Name is not required, in i.e. a List
+func (rcv *Field) Name() []byte {
+ o := flatbuffers.UOffsetT(rcv._tab.Offset(4))
+ if o != 0 {
+ return rcv._tab.ByteVector(o + rcv._tab.Pos)
+ }
+ return nil
+}
+
+/// Name is not required, in i.e. a List
+/// Whether or not this field can contain nulls. Should be true in general.
+func (rcv *Field) Nullable() bool {
+ o := flatbuffers.UOffsetT(rcv._tab.Offset(6))
+ if o != 0 {
+ return rcv._tab.GetBool(o + rcv._tab.Pos)
+ }
+ return false
+}
+
+/// Whether or not this field can contain nulls. Should be true in general.
+func (rcv *Field) MutateNullable(n bool) bool {
+ return rcv._tab.MutateBoolSlot(6, n)
+}
+
+func (rcv *Field) TypeType() Type {
+ o := flatbuffers.UOffsetT(rcv._tab.Offset(8))
+ if o != 0 {
+ return Type(rcv._tab.GetByte(o + rcv._tab.Pos))
+ }
+ return 0
+}
+
+func (rcv *Field) MutateTypeType(n Type) bool {
+ return rcv._tab.MutateByteSlot(8, byte(n))
+}
+
+/// This is the type of the decoded value if the field is dictionary encoded.
+func (rcv *Field) Type(obj *flatbuffers.Table) bool {
+ o := flatbuffers.UOffsetT(rcv._tab.Offset(10))
+ if o != 0 {
+ rcv._tab.Union(obj, o)
+ return true
+ }
+ return false
+}
+
+/// This is the type of the decoded value if the field is dictionary encoded.
+/// Present only if the field is dictionary encoded.
+func (rcv *Field) Dictionary(obj *DictionaryEncoding) *DictionaryEncoding {
+ o := flatbuffers.UOffsetT(rcv._tab.Offset(12))
+ if o != 0 {
+ x := rcv._tab.Indirect(o + rcv._tab.Pos)
+ if obj == nil {
+ obj = new(DictionaryEncoding)
+ }
+ obj.Init(rcv._tab.Bytes, x)
+ return obj
+ }
+ return nil
+}
+
+/// Present only if the field is dictionary encoded.
+/// children apply only to nested data types like Struct, List and Union. For
+/// primitive types children will have length 0.
+func (rcv *Field) Children(obj *Field, j int) bool {
+ o := flatbuffers.UOffsetT(rcv._tab.Offset(14))
+ if o != 0 {
+ x := rcv._tab.Vector(o)
+ x += flatbuffers.UOffsetT(j) * 4
+ x = rcv._tab.Indirect(x)
+ obj.Init(rcv._tab.Bytes, x)
+ return true
+ }
+ return false
+}
+
+func (rcv *Field) ChildrenLength() int {
+ o := flatbuffers.UOffsetT(rcv._tab.Offset(14))
+ if o != 0 {
+ return rcv._tab.VectorLen(o)
+ }
+ return 0
+}
+
+/// children apply only to nested data types like Struct, List and Union. For
+/// primitive types children will have length 0.
+/// User-defined metadata
+func (rcv *Field) CustomMetadata(obj *KeyValue, j int) bool {
+ o := flatbuffers.UOffsetT(rcv._tab.Offset(16))
+ if o != 0 {
+ x := rcv._tab.Vector(o)
+ x += flatbuffers.UOffsetT(j) * 4
+ x = rcv._tab.Indirect(x)
+ obj.Init(rcv._tab.Bytes, x)
+ return true
+ }
+ return false
+}
+
+func (rcv *Field) CustomMetadataLength() int {
+ o := flatbuffers.UOffsetT(rcv._tab.Offset(16))
+ if o != 0 {
+ return rcv._tab.VectorLen(o)
+ }
+ return 0
+}
+
+/// User-defined metadata
+func FieldStart(builder *flatbuffers.Builder) {
+ builder.StartObject(7)
+}
+func FieldAddName(builder *flatbuffers.Builder, name flatbuffers.UOffsetT) {
+ builder.PrependUOffsetTSlot(0, flatbuffers.UOffsetT(name), 0)
+}
+func FieldAddNullable(builder *flatbuffers.Builder, nullable bool) {
+ builder.PrependBoolSlot(1, nullable, false)
+}
+func FieldAddTypeType(builder *flatbuffers.Builder, typeType Type) {
+ builder.PrependByteSlot(2, byte(typeType), 0)
+}
+func FieldAddType(builder *flatbuffers.Builder, type_ flatbuffers.UOffsetT) {
+ builder.PrependUOffsetTSlot(3, flatbuffers.UOffsetT(type_), 0)
+}
+func FieldAddDictionary(builder *flatbuffers.Builder, dictionary flatbuffers.UOffsetT) {
+ builder.PrependUOffsetTSlot(4, flatbuffers.UOffsetT(dictionary), 0)
+}
+func FieldAddChildren(builder *flatbuffers.Builder, children flatbuffers.UOffsetT) {
+ builder.PrependUOffsetTSlot(5, flatbuffers.UOffsetT(children), 0)
+}
+func FieldStartChildrenVector(builder *flatbuffers.Builder, numElems int) flatbuffers.UOffsetT {
+ return builder.StartVector(4, numElems, 4)
+}
+func FieldAddCustomMetadata(builder *flatbuffers.Builder, customMetadata flatbuffers.UOffsetT) {
+ builder.PrependUOffsetTSlot(6, flatbuffers.UOffsetT(customMetadata), 0)
+}
+func FieldStartCustomMetadataVector(builder *flatbuffers.Builder, numElems int) flatbuffers.UOffsetT {
+ return builder.StartVector(4, numElems, 4)
+}
+func FieldEnd(builder *flatbuffers.Builder) flatbuffers.UOffsetT {
+ return builder.EndObject()
+}
diff --git a/vendor/github.com/apache/arrow/go/v14/arrow/internal/flatbuf/FieldNode.go b/vendor/github.com/apache/arrow/go/v14/arrow/internal/flatbuf/FieldNode.go
new file mode 100644
index 000000000..606b30bfe
--- /dev/null
+++ b/vendor/github.com/apache/arrow/go/v14/arrow/internal/flatbuf/FieldNode.go
@@ -0,0 +1,76 @@
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements. See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership. The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License. You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Code generated by the FlatBuffers compiler. DO NOT EDIT.
+
+package flatbuf
+
+import (
+ flatbuffers "github.com/google/flatbuffers/go"
+)
+
+/// ----------------------------------------------------------------------
+/// Data structures for describing a table row batch (a collection of
+/// equal-length Arrow arrays)
+/// Metadata about a field at some level of a nested type tree (but not
+/// its children).
+///
+/// For example, a List<Int16> with values `[[1, 2, 3], null, [4], [5, 6], null]`
+/// would have {length: 5, null_count: 2} for its List node, and {length: 6,
+/// null_count: 0} for its Int16 node, as separate FieldNode structs
+type FieldNode struct {
+ _tab flatbuffers.Struct
+}
+
+func (rcv *FieldNode) Init(buf []byte, i flatbuffers.UOffsetT) {
+ rcv._tab.Bytes = buf
+ rcv._tab.Pos = i
+}
+
+func (rcv *FieldNode) Table() flatbuffers.Table {
+ return rcv._tab.Table
+}
+
+/// The number of value slots in the Arrow array at this level of a nested
+/// tree
+func (rcv *FieldNode) Length() int64 {
+ return rcv._tab.GetInt64(rcv._tab.Pos + flatbuffers.UOffsetT(0))
+}
+/// The number of value slots in the Arrow array at this level of a nested
+/// tree
+func (rcv *FieldNode) MutateLength(n int64) bool {
+ return rcv._tab.MutateInt64(rcv._tab.Pos+flatbuffers.UOffsetT(0), n)
+}
+
+/// The number of observed nulls. Fields with null_count == 0 may choose not
+/// to write their physical validity bitmap out as a materialized buffer,
+/// instead setting the length of the bitmap buffer to 0.
+func (rcv *FieldNode) NullCount() int64 {
+ return rcv._tab.GetInt64(rcv._tab.Pos + flatbuffers.UOffsetT(8))
+}
+/// The number of observed nulls. Fields with null_count == 0 may choose not
+/// to write their physical validity bitmap out as a materialized buffer,
+/// instead setting the length of the bitmap buffer to 0.
+func (rcv *FieldNode) MutateNullCount(n int64) bool {
+ return rcv._tab.MutateInt64(rcv._tab.Pos+flatbuffers.UOffsetT(8), n)
+}
+
+func CreateFieldNode(builder *flatbuffers.Builder, length int64, nullCount int64) flatbuffers.UOffsetT {
+ builder.Prep(8, 16)
+ builder.PrependInt64(nullCount)
+ builder.PrependInt64(length)
+ return builder.Offset()
+}
diff --git a/vendor/github.com/apache/arrow/go/v14/arrow/internal/flatbuf/FixedSizeBinary.go b/vendor/github.com/apache/arrow/go/v14/arrow/internal/flatbuf/FixedSizeBinary.go
new file mode 100644
index 000000000..4e660d507
--- /dev/null
+++ b/vendor/github.com/apache/arrow/go/v14/arrow/internal/flatbuf/FixedSizeBinary.go
@@ -0,0 +1,67 @@
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements. See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership. The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License. You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Code generated by the FlatBuffers compiler. DO NOT EDIT.
+
+package flatbuf
+
+import (
+ flatbuffers "github.com/google/flatbuffers/go"
+)
+
+type FixedSizeBinary struct {
+ _tab flatbuffers.Table
+}
+
+func GetRootAsFixedSizeBinary(buf []byte, offset flatbuffers.UOffsetT) *FixedSizeBinary {
+ n := flatbuffers.GetUOffsetT(buf[offset:])
+ x := &FixedSizeBinary{}
+ x.Init(buf, n+offset)
+ return x
+}
+
+func (rcv *FixedSizeBinary) Init(buf []byte, i flatbuffers.UOffsetT) {
+ rcv._tab.Bytes = buf
+ rcv._tab.Pos = i
+}
+
+func (rcv *FixedSizeBinary) Table() flatbuffers.Table {
+ return rcv._tab
+}
+
+/// Number of bytes per value
+func (rcv *FixedSizeBinary) ByteWidth() int32 {
+ o := flatbuffers.UOffsetT(rcv._tab.Offset(4))
+ if o != 0 {
+ return rcv._tab.GetInt32(o + rcv._tab.Pos)
+ }
+ return 0
+}
+
+/// Number of bytes per value
+func (rcv *FixedSizeBinary) MutateByteWidth(n int32) bool {
+ return rcv._tab.MutateInt32Slot(4, n)
+}
+
+func FixedSizeBinaryStart(builder *flatbuffers.Builder) {
+ builder.StartObject(1)
+}
+func FixedSizeBinaryAddByteWidth(builder *flatbuffers.Builder, byteWidth int32) {
+ builder.PrependInt32Slot(0, byteWidth, 0)
+}
+func FixedSizeBinaryEnd(builder *flatbuffers.Builder) flatbuffers.UOffsetT {
+ return builder.EndObject()
+}
diff --git a/vendor/github.com/apache/arrow/go/v14/arrow/internal/flatbuf/FixedSizeList.go b/vendor/github.com/apache/arrow/go/v14/arrow/internal/flatbuf/FixedSizeList.go
new file mode 100644
index 000000000..dabf5cc85
--- /dev/null
+++ b/vendor/github.com/apache/arrow/go/v14/arrow/internal/flatbuf/FixedSizeList.go
@@ -0,0 +1,67 @@
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements. See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership. The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License. You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Code generated by the FlatBuffers compiler. DO NOT EDIT.
+
+package flatbuf
+
+import (
+ flatbuffers "github.com/google/flatbuffers/go"
+)
+
+type FixedSizeList struct {
+ _tab flatbuffers.Table
+}
+
+func GetRootAsFixedSizeList(buf []byte, offset flatbuffers.UOffsetT) *FixedSizeList {
+ n := flatbuffers.GetUOffsetT(buf[offset:])
+ x := &FixedSizeList{}
+ x.Init(buf, n+offset)
+ return x
+}
+
+func (rcv *FixedSizeList) Init(buf []byte, i flatbuffers.UOffsetT) {
+ rcv._tab.Bytes = buf
+ rcv._tab.Pos = i
+}
+
+func (rcv *FixedSizeList) Table() flatbuffers.Table {
+ return rcv._tab
+}
+
+/// Number of list items per value
+func (rcv *FixedSizeList) ListSize() int32 {
+ o := flatbuffers.UOffsetT(rcv._tab.Offset(4))
+ if o != 0 {
+ return rcv._tab.GetInt32(o + rcv._tab.Pos)
+ }
+ return 0
+}
+
+/// Number of list items per value
+func (rcv *FixedSizeList) MutateListSize(n int32) bool {
+ return rcv._tab.MutateInt32Slot(4, n)
+}
+
+func FixedSizeListStart(builder *flatbuffers.Builder) {
+ builder.StartObject(1)
+}
+func FixedSizeListAddListSize(builder *flatbuffers.Builder, listSize int32) {
+ builder.PrependInt32Slot(0, listSize, 0)
+}
+func FixedSizeListEnd(builder *flatbuffers.Builder) flatbuffers.UOffsetT {
+ return builder.EndObject()
+}
diff --git a/vendor/github.com/apache/arrow/go/v14/arrow/internal/flatbuf/FloatingPoint.go b/vendor/github.com/apache/arrow/go/v14/arrow/internal/flatbuf/FloatingPoint.go
new file mode 100644
index 000000000..241d448dc
--- /dev/null
+++ b/vendor/github.com/apache/arrow/go/v14/arrow/internal/flatbuf/FloatingPoint.go
@@ -0,0 +1,65 @@
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements. See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership. The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License. You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Code generated by the FlatBuffers compiler. DO NOT EDIT.
+
+package flatbuf
+
+import (
+ flatbuffers "github.com/google/flatbuffers/go"
+)
+
+type FloatingPoint struct {
+ _tab flatbuffers.Table
+}
+
+func GetRootAsFloatingPoint(buf []byte, offset flatbuffers.UOffsetT) *FloatingPoint {
+ n := flatbuffers.GetUOffsetT(buf[offset:])
+ x := &FloatingPoint{}
+ x.Init(buf, n+offset)
+ return x
+}
+
+func (rcv *FloatingPoint) Init(buf []byte, i flatbuffers.UOffsetT) {
+ rcv._tab.Bytes = buf
+ rcv._tab.Pos = i
+}
+
+func (rcv *FloatingPoint) Table() flatbuffers.Table {
+ return rcv._tab
+}
+
+func (rcv *FloatingPoint) Precision() Precision {
+ o := flatbuffers.UOffsetT(rcv._tab.Offset(4))
+ if o != 0 {
+ return Precision(rcv._tab.GetInt16(o + rcv._tab.Pos))
+ }
+ return 0
+}
+
+func (rcv *FloatingPoint) MutatePrecision(n Precision) bool {
+ return rcv._tab.MutateInt16Slot(4, int16(n))
+}
+
+func FloatingPointStart(builder *flatbuffers.Builder) {
+ builder.StartObject(1)
+}
+func FloatingPointAddPrecision(builder *flatbuffers.Builder, precision Precision) {
+ builder.PrependInt16Slot(0, int16(precision), 0)
+}
+func FloatingPointEnd(builder *flatbuffers.Builder) flatbuffers.UOffsetT {
+ return builder.EndObject()
+}
diff --git a/vendor/github.com/apache/arrow/go/v14/arrow/internal/flatbuf/Footer.go b/vendor/github.com/apache/arrow/go/v14/arrow/internal/flatbuf/Footer.go
new file mode 100644
index 000000000..65b0ff095
--- /dev/null
+++ b/vendor/github.com/apache/arrow/go/v14/arrow/internal/flatbuf/Footer.go
@@ -0,0 +1,162 @@
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements. See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership. The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License. You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Code generated by the FlatBuffers compiler. DO NOT EDIT.
+
+package flatbuf
+
+import (
+ flatbuffers "github.com/google/flatbuffers/go"
+)
+
+/// ----------------------------------------------------------------------
+/// Arrow File metadata
+///
+type Footer struct {
+ _tab flatbuffers.Table
+}
+
+func GetRootAsFooter(buf []byte, offset flatbuffers.UOffsetT) *Footer {
+ n := flatbuffers.GetUOffsetT(buf[offset:])
+ x := &Footer{}
+ x.Init(buf, n+offset)
+ return x
+}
+
+func (rcv *Footer) Init(buf []byte, i flatbuffers.UOffsetT) {
+ rcv._tab.Bytes = buf
+ rcv._tab.Pos = i
+}
+
+func (rcv *Footer) Table() flatbuffers.Table {
+ return rcv._tab
+}
+
+func (rcv *Footer) Version() MetadataVersion {
+ o := flatbuffers.UOffsetT(rcv._tab.Offset(4))
+ if o != 0 {
+ return MetadataVersion(rcv._tab.GetInt16(o + rcv._tab.Pos))
+ }
+ return 0
+}
+
+func (rcv *Footer) MutateVersion(n MetadataVersion) bool {
+ return rcv._tab.MutateInt16Slot(4, int16(n))
+}
+
+func (rcv *Footer) Schema(obj *Schema) *Schema {
+ o := flatbuffers.UOffsetT(rcv._tab.Offset(6))
+ if o != 0 {
+ x := rcv._tab.Indirect(o + rcv._tab.Pos)
+ if obj == nil {
+ obj = new(Schema)
+ }
+ obj.Init(rcv._tab.Bytes, x)
+ return obj
+ }
+ return nil
+}
+
+func (rcv *Footer) Dictionaries(obj *Block, j int) bool {
+ o := flatbuffers.UOffsetT(rcv._tab.Offset(8))
+ if o != 0 {
+ x := rcv._tab.Vector(o)
+ x += flatbuffers.UOffsetT(j) * 24
+ obj.Init(rcv._tab.Bytes, x)
+ return true
+ }
+ return false
+}
+
+func (rcv *Footer) DictionariesLength() int {
+ o := flatbuffers.UOffsetT(rcv._tab.Offset(8))
+ if o != 0 {
+ return rcv._tab.VectorLen(o)
+ }
+ return 0
+}
+
+func (rcv *Footer) RecordBatches(obj *Block, j int) bool {
+ o := flatbuffers.UOffsetT(rcv._tab.Offset(10))
+ if o != 0 {
+ x := rcv._tab.Vector(o)
+ x += flatbuffers.UOffsetT(j) * 24
+ obj.Init(rcv._tab.Bytes, x)
+ return true
+ }
+ return false
+}
+
+func (rcv *Footer) RecordBatchesLength() int {
+ o := flatbuffers.UOffsetT(rcv._tab.Offset(10))
+ if o != 0 {
+ return rcv._tab.VectorLen(o)
+ }
+ return 0
+}
+
+/// User-defined metadata
+func (rcv *Footer) CustomMetadata(obj *KeyValue, j int) bool {
+ o := flatbuffers.UOffsetT(rcv._tab.Offset(12))
+ if o != 0 {
+ x := rcv._tab.Vector(o)
+ x += flatbuffers.UOffsetT(j) * 4
+ x = rcv._tab.Indirect(x)
+ obj.Init(rcv._tab.Bytes, x)
+ return true
+ }
+ return false
+}
+
+func (rcv *Footer) CustomMetadataLength() int {
+ o := flatbuffers.UOffsetT(rcv._tab.Offset(12))
+ if o != 0 {
+ return rcv._tab.VectorLen(o)
+ }
+ return 0
+}
+
+/// User-defined metadata
+func FooterStart(builder *flatbuffers.Builder) {
+ builder.StartObject(5)
+}
+func FooterAddVersion(builder *flatbuffers.Builder, version MetadataVersion) {
+ builder.PrependInt16Slot(0, int16(version), 0)
+}
+func FooterAddSchema(builder *flatbuffers.Builder, schema flatbuffers.UOffsetT) {
+ builder.PrependUOffsetTSlot(1, flatbuffers.UOffsetT(schema), 0)
+}
+func FooterAddDictionaries(builder *flatbuffers.Builder, dictionaries flatbuffers.UOffsetT) {
+ builder.PrependUOffsetTSlot(2, flatbuffers.UOffsetT(dictionaries), 0)
+}
+func FooterStartDictionariesVector(builder *flatbuffers.Builder, numElems int) flatbuffers.UOffsetT {
+ return builder.StartVector(24, numElems, 8)
+}
+func FooterAddRecordBatches(builder *flatbuffers.Builder, recordBatches flatbuffers.UOffsetT) {
+ builder.PrependUOffsetTSlot(3, flatbuffers.UOffsetT(recordBatches), 0)
+}
+func FooterStartRecordBatchesVector(builder *flatbuffers.Builder, numElems int) flatbuffers.UOffsetT {
+ return builder.StartVector(24, numElems, 8)
+}
+func FooterAddCustomMetadata(builder *flatbuffers.Builder, customMetadata flatbuffers.UOffsetT) {
+ builder.PrependUOffsetTSlot(4, flatbuffers.UOffsetT(customMetadata), 0)
+}
+func FooterStartCustomMetadataVector(builder *flatbuffers.Builder, numElems int) flatbuffers.UOffsetT {
+ return builder.StartVector(4, numElems, 4)
+}
+func FooterEnd(builder *flatbuffers.Builder) flatbuffers.UOffsetT {
+ return builder.EndObject()
+}
diff --git a/vendor/github.com/apache/arrow/go/v14/arrow/internal/flatbuf/Int.go b/vendor/github.com/apache/arrow/go/v14/arrow/internal/flatbuf/Int.go
new file mode 100644
index 000000000..9f4b19117
--- /dev/null
+++ b/vendor/github.com/apache/arrow/go/v14/arrow/internal/flatbuf/Int.go
@@ -0,0 +1,80 @@
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements. See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership. The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License. You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Code generated by the FlatBuffers compiler. DO NOT EDIT.
+
+package flatbuf
+
+import (
+ flatbuffers "github.com/google/flatbuffers/go"
+)
+
+type Int struct {
+ _tab flatbuffers.Table
+}
+
+func GetRootAsInt(buf []byte, offset flatbuffers.UOffsetT) *Int {
+ n := flatbuffers.GetUOffsetT(buf[offset:])
+ x := &Int{}
+ x.Init(buf, n+offset)
+ return x
+}
+
+func (rcv *Int) Init(buf []byte, i flatbuffers.UOffsetT) {
+ rcv._tab.Bytes = buf
+ rcv._tab.Pos = i
+}
+
+func (rcv *Int) Table() flatbuffers.Table {
+ return rcv._tab
+}
+
+func (rcv *Int) BitWidth() int32 {
+ o := flatbuffers.UOffsetT(rcv._tab.Offset(4))
+ if o != 0 {
+ return rcv._tab.GetInt32(o + rcv._tab.Pos)
+ }
+ return 0
+}
+
+func (rcv *Int) MutateBitWidth(n int32) bool {
+ return rcv._tab.MutateInt32Slot(4, n)
+}
+
+func (rcv *Int) IsSigned() bool {
+ o := flatbuffers.UOffsetT(rcv._tab.Offset(6))
+ if o != 0 {
+ return rcv._tab.GetBool(o + rcv._tab.Pos)
+ }
+ return false
+}
+
+func (rcv *Int) MutateIsSigned(n bool) bool {
+ return rcv._tab.MutateBoolSlot(6, n)
+}
+
+func IntStart(builder *flatbuffers.Builder) {
+ builder.StartObject(2)
+}
+func IntAddBitWidth(builder *flatbuffers.Builder, bitWidth int32) {
+ builder.PrependInt32Slot(0, bitWidth, 0)
+}
+func IntAddIsSigned(builder *flatbuffers.Builder, isSigned bool) {
+ builder.PrependBoolSlot(1, isSigned, false)
+}
+func IntEnd(builder *flatbuffers.Builder) flatbuffers.UOffsetT {
+ return builder.EndObject()
+}
diff --git a/vendor/github.com/apache/arrow/go/v14/arrow/internal/flatbuf/Interval.go b/vendor/github.com/apache/arrow/go/v14/arrow/internal/flatbuf/Interval.go
new file mode 100644
index 000000000..12c56d5c2
--- /dev/null
+++ b/vendor/github.com/apache/arrow/go/v14/arrow/internal/flatbuf/Interval.go
@@ -0,0 +1,65 @@
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements. See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership. The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License. You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Code generated by the FlatBuffers compiler. DO NOT EDIT.
+
+package flatbuf
+
+import (
+ flatbuffers "github.com/google/flatbuffers/go"
+)
+
+type Interval struct {
+ _tab flatbuffers.Table
+}
+
+func GetRootAsInterval(buf []byte, offset flatbuffers.UOffsetT) *Interval {
+ n := flatbuffers.GetUOffsetT(buf[offset:])
+ x := &Interval{}
+ x.Init(buf, n+offset)
+ return x
+}
+
+func (rcv *Interval) Init(buf []byte, i flatbuffers.UOffsetT) {
+ rcv._tab.Bytes = buf
+ rcv._tab.Pos = i
+}
+
+func (rcv *Interval) Table() flatbuffers.Table {
+ return rcv._tab
+}
+
+func (rcv *Interval) Unit() IntervalUnit {
+ o := flatbuffers.UOffsetT(rcv._tab.Offset(4))
+ if o != 0 {
+ return IntervalUnit(rcv._tab.GetInt16(o + rcv._tab.Pos))
+ }
+ return 0
+}
+
+func (rcv *Interval) MutateUnit(n IntervalUnit) bool {
+ return rcv._tab.MutateInt16Slot(4, int16(n))
+}
+
+func IntervalStart(builder *flatbuffers.Builder) {
+ builder.StartObject(1)
+}
+func IntervalAddUnit(builder *flatbuffers.Builder, unit IntervalUnit) {
+ builder.PrependInt16Slot(0, int16(unit), 0)
+}
+func IntervalEnd(builder *flatbuffers.Builder) flatbuffers.UOffsetT {
+ return builder.EndObject()
+}
diff --git a/vendor/github.com/apache/arrow/go/v14/arrow/internal/flatbuf/IntervalUnit.go b/vendor/github.com/apache/arrow/go/v14/arrow/internal/flatbuf/IntervalUnit.go
new file mode 100644
index 000000000..f3ed1ae7b
--- /dev/null
+++ b/vendor/github.com/apache/arrow/go/v14/arrow/internal/flatbuf/IntervalUnit.go
@@ -0,0 +1,48 @@
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements. See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership. The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License. You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Code generated by the FlatBuffers compiler. DO NOT EDIT.
+
+package flatbuf
+
+import "strconv"
+
+type IntervalUnit int16
+
+const (
+ IntervalUnitYEAR_MONTH IntervalUnit = 0
+ IntervalUnitDAY_TIME IntervalUnit = 1
+ IntervalUnitMONTH_DAY_NANO IntervalUnit = 2
+)
+
+var EnumNamesIntervalUnit = map[IntervalUnit]string{
+ IntervalUnitYEAR_MONTH: "YEAR_MONTH",
+ IntervalUnitDAY_TIME: "DAY_TIME",
+ IntervalUnitMONTH_DAY_NANO: "MONTH_DAY_NANO",
+}
+
+var EnumValuesIntervalUnit = map[string]IntervalUnit{
+ "YEAR_MONTH": IntervalUnitYEAR_MONTH,
+ "DAY_TIME": IntervalUnitDAY_TIME,
+ "MONTH_DAY_NANO": IntervalUnitMONTH_DAY_NANO,
+}
+
+func (v IntervalUnit) String() string {
+ if s, ok := EnumNamesIntervalUnit[v]; ok {
+ return s
+ }
+ return "IntervalUnit(" + strconv.FormatInt(int64(v), 10) + ")"
+}
diff --git a/vendor/github.com/apache/arrow/go/v14/arrow/internal/flatbuf/KeyValue.go b/vendor/github.com/apache/arrow/go/v14/arrow/internal/flatbuf/KeyValue.go
new file mode 100644
index 000000000..c1b85318e
--- /dev/null
+++ b/vendor/github.com/apache/arrow/go/v14/arrow/internal/flatbuf/KeyValue.go
@@ -0,0 +1,75 @@
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements. See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership. The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License. You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Code generated by the FlatBuffers compiler. DO NOT EDIT.
+
+package flatbuf
+
+import (
+ flatbuffers "github.com/google/flatbuffers/go"
+)
+
+/// ----------------------------------------------------------------------
+/// user defined key value pairs to add custom metadata to arrow
+/// key namespacing is the responsibility of the user
+type KeyValue struct {
+ _tab flatbuffers.Table
+}
+
+func GetRootAsKeyValue(buf []byte, offset flatbuffers.UOffsetT) *KeyValue {
+ n := flatbuffers.GetUOffsetT(buf[offset:])
+ x := &KeyValue{}
+ x.Init(buf, n+offset)
+ return x
+}
+
+func (rcv *KeyValue) Init(buf []byte, i flatbuffers.UOffsetT) {
+ rcv._tab.Bytes = buf
+ rcv._tab.Pos = i
+}
+
+func (rcv *KeyValue) Table() flatbuffers.Table {
+ return rcv._tab
+}
+
+func (rcv *KeyValue) Key() []byte {
+ o := flatbuffers.UOffsetT(rcv._tab.Offset(4))
+ if o != 0 {
+ return rcv._tab.ByteVector(o + rcv._tab.Pos)
+ }
+ return nil
+}
+
+func (rcv *KeyValue) Value() []byte {
+ o := flatbuffers.UOffsetT(rcv._tab.Offset(6))
+ if o != 0 {
+ return rcv._tab.ByteVector(o + rcv._tab.Pos)
+ }
+ return nil
+}
+
+func KeyValueStart(builder *flatbuffers.Builder) {
+ builder.StartObject(2)
+}
+func KeyValueAddKey(builder *flatbuffers.Builder, key flatbuffers.UOffsetT) {
+ builder.PrependUOffsetTSlot(0, flatbuffers.UOffsetT(key), 0)
+}
+func KeyValueAddValue(builder *flatbuffers.Builder, value flatbuffers.UOffsetT) {
+ builder.PrependUOffsetTSlot(1, flatbuffers.UOffsetT(value), 0)
+}
+func KeyValueEnd(builder *flatbuffers.Builder) flatbuffers.UOffsetT {
+ return builder.EndObject()
+}
diff --git a/vendor/github.com/apache/arrow/go/v14/arrow/internal/flatbuf/LargeBinary.go b/vendor/github.com/apache/arrow/go/v14/arrow/internal/flatbuf/LargeBinary.go
new file mode 100644
index 000000000..2c3befcc1
--- /dev/null
+++ b/vendor/github.com/apache/arrow/go/v14/arrow/internal/flatbuf/LargeBinary.go
@@ -0,0 +1,52 @@
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements. See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership. The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License. You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Code generated by the FlatBuffers compiler. DO NOT EDIT.
+
+package flatbuf
+
+import (
+ flatbuffers "github.com/google/flatbuffers/go"
+)
+
+/// Same as Binary, but with 64-bit offsets, allowing to represent
+/// extremely large data values.
+type LargeBinary struct {
+ _tab flatbuffers.Table
+}
+
+func GetRootAsLargeBinary(buf []byte, offset flatbuffers.UOffsetT) *LargeBinary {
+ n := flatbuffers.GetUOffsetT(buf[offset:])
+ x := &LargeBinary{}
+ x.Init(buf, n+offset)
+ return x
+}
+
+func (rcv *LargeBinary) Init(buf []byte, i flatbuffers.UOffsetT) {
+ rcv._tab.Bytes = buf
+ rcv._tab.Pos = i
+}
+
+func (rcv *LargeBinary) Table() flatbuffers.Table {
+ return rcv._tab
+}
+
+func LargeBinaryStart(builder *flatbuffers.Builder) {
+ builder.StartObject(0)
+}
+func LargeBinaryEnd(builder *flatbuffers.Builder) flatbuffers.UOffsetT {
+ return builder.EndObject()
+}
diff --git a/vendor/github.com/apache/arrow/go/v14/arrow/internal/flatbuf/LargeList.go b/vendor/github.com/apache/arrow/go/v14/arrow/internal/flatbuf/LargeList.go
new file mode 100644
index 000000000..92f228458
--- /dev/null
+++ b/vendor/github.com/apache/arrow/go/v14/arrow/internal/flatbuf/LargeList.go
@@ -0,0 +1,52 @@
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements. See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership. The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License. You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Code generated by the FlatBuffers compiler. DO NOT EDIT.
+
+package flatbuf
+
+import (
+ flatbuffers "github.com/google/flatbuffers/go"
+)
+
+/// Same as List, but with 64-bit offsets, allowing to represent
+/// extremely large data values.
+type LargeList struct {
+ _tab flatbuffers.Table
+}
+
+func GetRootAsLargeList(buf []byte, offset flatbuffers.UOffsetT) *LargeList {
+ n := flatbuffers.GetUOffsetT(buf[offset:])
+ x := &LargeList{}
+ x.Init(buf, n+offset)
+ return x
+}
+
+func (rcv *LargeList) Init(buf []byte, i flatbuffers.UOffsetT) {
+ rcv._tab.Bytes = buf
+ rcv._tab.Pos = i
+}
+
+func (rcv *LargeList) Table() flatbuffers.Table {
+ return rcv._tab
+}
+
+func LargeListStart(builder *flatbuffers.Builder) {
+ builder.StartObject(0)
+}
+func LargeListEnd(builder *flatbuffers.Builder) flatbuffers.UOffsetT {
+ return builder.EndObject()
+}
diff --git a/vendor/github.com/apache/arrow/go/v14/arrow/internal/flatbuf/LargeListView.go b/vendor/github.com/apache/arrow/go/v14/arrow/internal/flatbuf/LargeListView.go
new file mode 100644
index 000000000..5b1df149c
--- /dev/null
+++ b/vendor/github.com/apache/arrow/go/v14/arrow/internal/flatbuf/LargeListView.go
@@ -0,0 +1,52 @@
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements. See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership. The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License. You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Code generated by the FlatBuffers compiler. DO NOT EDIT.
+
+package flatbuf
+
+import (
+ flatbuffers "github.com/google/flatbuffers/go"
+)
+
+/// Same as ListView, but with 64-bit offsets and sizes, allowing to represent
+/// extremely large data values.
+type LargeListView struct {
+ _tab flatbuffers.Table
+}
+
+func GetRootAsLargeListView(buf []byte, offset flatbuffers.UOffsetT) *LargeListView {
+ n := flatbuffers.GetUOffsetT(buf[offset:])
+ x := &LargeListView{}
+ x.Init(buf, n+offset)
+ return x
+}
+
+func (rcv *LargeListView) Init(buf []byte, i flatbuffers.UOffsetT) {
+ rcv._tab.Bytes = buf
+ rcv._tab.Pos = i
+}
+
+func (rcv *LargeListView) Table() flatbuffers.Table {
+ return rcv._tab
+}
+
+func LargeListViewStart(builder *flatbuffers.Builder) {
+ builder.StartObject(0)
+}
+func LargeListViewEnd(builder *flatbuffers.Builder) flatbuffers.UOffsetT {
+ return builder.EndObject()
+}
diff --git a/vendor/github.com/apache/arrow/go/v14/arrow/internal/flatbuf/LargeUtf8.go b/vendor/github.com/apache/arrow/go/v14/arrow/internal/flatbuf/LargeUtf8.go
new file mode 100644
index 000000000..e78b33e11
--- /dev/null
+++ b/vendor/github.com/apache/arrow/go/v14/arrow/internal/flatbuf/LargeUtf8.go
@@ -0,0 +1,52 @@
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements. See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership. The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License. You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Code generated by the FlatBuffers compiler. DO NOT EDIT.
+
+package flatbuf
+
+import (
+ flatbuffers "github.com/google/flatbuffers/go"
+)
+
+/// Same as Utf8, but with 64-bit offsets, allowing to represent
+/// extremely large data values.
+type LargeUtf8 struct {
+ _tab flatbuffers.Table
+}
+
+func GetRootAsLargeUtf8(buf []byte, offset flatbuffers.UOffsetT) *LargeUtf8 {
+ n := flatbuffers.GetUOffsetT(buf[offset:])
+ x := &LargeUtf8{}
+ x.Init(buf, n+offset)
+ return x
+}
+
+func (rcv *LargeUtf8) Init(buf []byte, i flatbuffers.UOffsetT) {
+ rcv._tab.Bytes = buf
+ rcv._tab.Pos = i
+}
+
+func (rcv *LargeUtf8) Table() flatbuffers.Table {
+ return rcv._tab
+}
+
+func LargeUtf8Start(builder *flatbuffers.Builder) {
+ builder.StartObject(0)
+}
+func LargeUtf8End(builder *flatbuffers.Builder) flatbuffers.UOffsetT {
+ return builder.EndObject()
+}
diff --git a/vendor/github.com/apache/arrow/go/v14/arrow/internal/flatbuf/List.go b/vendor/github.com/apache/arrow/go/v14/arrow/internal/flatbuf/List.go
new file mode 100644
index 000000000..ba84319d3
--- /dev/null
+++ b/vendor/github.com/apache/arrow/go/v14/arrow/internal/flatbuf/List.go
@@ -0,0 +1,50 @@
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements. See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership. The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License. You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Code generated by the FlatBuffers compiler. DO NOT EDIT.
+
+package flatbuf
+
+import (
+ flatbuffers "github.com/google/flatbuffers/go"
+)
+
+type List struct {
+ _tab flatbuffers.Table
+}
+
+func GetRootAsList(buf []byte, offset flatbuffers.UOffsetT) *List {
+ n := flatbuffers.GetUOffsetT(buf[offset:])
+ x := &List{}
+ x.Init(buf, n+offset)
+ return x
+}
+
+func (rcv *List) Init(buf []byte, i flatbuffers.UOffsetT) {
+ rcv._tab.Bytes = buf
+ rcv._tab.Pos = i
+}
+
+func (rcv *List) Table() flatbuffers.Table {
+ return rcv._tab
+}
+
+func ListStart(builder *flatbuffers.Builder) {
+ builder.StartObject(0)
+}
+func ListEnd(builder *flatbuffers.Builder) flatbuffers.UOffsetT {
+ return builder.EndObject()
+}
diff --git a/vendor/github.com/apache/arrow/go/v14/arrow/internal/flatbuf/ListView.go b/vendor/github.com/apache/arrow/go/v14/arrow/internal/flatbuf/ListView.go
new file mode 100644
index 000000000..46b1e0b3c
--- /dev/null
+++ b/vendor/github.com/apache/arrow/go/v14/arrow/internal/flatbuf/ListView.go
@@ -0,0 +1,53 @@
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements. See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership. The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License. You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Code generated by the FlatBuffers compiler. DO NOT EDIT.
+
+package flatbuf
+
+import (
+ flatbuffers "github.com/google/flatbuffers/go"
+)
+
+/// Represents the same logical types that List can, but contains offsets and
+/// sizes allowing for writes in any order and sharing of child values among
+/// list values.
+type ListView struct {
+ _tab flatbuffers.Table
+}
+
+func GetRootAsListView(buf []byte, offset flatbuffers.UOffsetT) *ListView {
+ n := flatbuffers.GetUOffsetT(buf[offset:])
+ x := &ListView{}
+ x.Init(buf, n+offset)
+ return x
+}
+
+func (rcv *ListView) Init(buf []byte, i flatbuffers.UOffsetT) {
+ rcv._tab.Bytes = buf
+ rcv._tab.Pos = i
+}
+
+func (rcv *ListView) Table() flatbuffers.Table {
+ return rcv._tab
+}
+
+func ListViewStart(builder *flatbuffers.Builder) {
+ builder.StartObject(0)
+}
+func ListViewEnd(builder *flatbuffers.Builder) flatbuffers.UOffsetT {
+ return builder.EndObject()
+}
diff --git a/vendor/github.com/apache/arrow/go/v14/arrow/internal/flatbuf/Map.go b/vendor/github.com/apache/arrow/go/v14/arrow/internal/flatbuf/Map.go
new file mode 100644
index 000000000..8802aba1e
--- /dev/null
+++ b/vendor/github.com/apache/arrow/go/v14/arrow/internal/flatbuf/Map.go
@@ -0,0 +1,92 @@
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements. See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership. The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License. You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Code generated by the FlatBuffers compiler. DO NOT EDIT.
+
+package flatbuf
+
+import (
+ flatbuffers "github.com/google/flatbuffers/go"
+)
+
+/// A Map is a logical nested type that is represented as
+///
+/// List<entries: Struct<key: K, value: V>>
+///
+/// In this layout, the keys and values are each respectively contiguous. We do
+/// not constrain the key and value types, so the application is responsible
+/// for ensuring that the keys are hashable and unique. Whether the keys are sorted
+/// may be set in the metadata for this field.
+///
+/// In a field with Map type, the field has a child Struct field, which then
+/// has two children: key type and the second the value type. The names of the
+/// child fields may be respectively "entries", "key", and "value", but this is
+/// not enforced.
+///
+/// Map
+/// ```text
+/// - child[0] entries: Struct
+/// - child[0] key: K
+/// - child[1] value: V
+/// ```
+/// Neither the "entries" field nor the "key" field may be nullable.
+///
+/// The metadata is structured so that Arrow systems without special handling
+/// for Map can make Map an alias for List. The "layout" attribute for the Map
+/// field must have the same contents as a List.
+type Map struct {
+ _tab flatbuffers.Table
+}
+
+func GetRootAsMap(buf []byte, offset flatbuffers.UOffsetT) *Map {
+ n := flatbuffers.GetUOffsetT(buf[offset:])
+ x := &Map{}
+ x.Init(buf, n+offset)
+ return x
+}
+
+func (rcv *Map) Init(buf []byte, i flatbuffers.UOffsetT) {
+ rcv._tab.Bytes = buf
+ rcv._tab.Pos = i
+}
+
+func (rcv *Map) Table() flatbuffers.Table {
+ return rcv._tab
+}
+
+/// Set to true if the keys within each value are sorted
+func (rcv *Map) KeysSorted() bool {
+ o := flatbuffers.UOffsetT(rcv._tab.Offset(4))
+ if o != 0 {
+ return rcv._tab.GetBool(o + rcv._tab.Pos)
+ }
+ return false
+}
+
+/// Set to true if the keys within each value are sorted
+func (rcv *Map) MutateKeysSorted(n bool) bool {
+ return rcv._tab.MutateBoolSlot(4, n)
+}
+
+func MapStart(builder *flatbuffers.Builder) {
+ builder.StartObject(1)
+}
+func MapAddKeysSorted(builder *flatbuffers.Builder, keysSorted bool) {
+ builder.PrependBoolSlot(0, keysSorted, false)
+}
+func MapEnd(builder *flatbuffers.Builder) flatbuffers.UOffsetT {
+ return builder.EndObject()
+}
diff --git a/vendor/github.com/apache/arrow/go/v14/arrow/internal/flatbuf/Message.go b/vendor/github.com/apache/arrow/go/v14/arrow/internal/flatbuf/Message.go
new file mode 100644
index 000000000..f4b4a0ff8
--- /dev/null
+++ b/vendor/github.com/apache/arrow/go/v14/arrow/internal/flatbuf/Message.go
@@ -0,0 +1,133 @@
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements. See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership. The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License. You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Code generated by the FlatBuffers compiler. DO NOT EDIT.
+
+package flatbuf
+
+import (
+ flatbuffers "github.com/google/flatbuffers/go"
+)
+
+type Message struct {
+ _tab flatbuffers.Table
+}
+
+func GetRootAsMessage(buf []byte, offset flatbuffers.UOffsetT) *Message {
+ n := flatbuffers.GetUOffsetT(buf[offset:])
+ x := &Message{}
+ x.Init(buf, n+offset)
+ return x
+}
+
+func (rcv *Message) Init(buf []byte, i flatbuffers.UOffsetT) {
+ rcv._tab.Bytes = buf
+ rcv._tab.Pos = i
+}
+
+func (rcv *Message) Table() flatbuffers.Table {
+ return rcv._tab
+}
+
+func (rcv *Message) Version() MetadataVersion {
+ o := flatbuffers.UOffsetT(rcv._tab.Offset(4))
+ if o != 0 {
+ return MetadataVersion(rcv._tab.GetInt16(o + rcv._tab.Pos))
+ }
+ return 0
+}
+
+func (rcv *Message) MutateVersion(n MetadataVersion) bool {
+ return rcv._tab.MutateInt16Slot(4, int16(n))
+}
+
+func (rcv *Message) HeaderType() MessageHeader {
+ o := flatbuffers.UOffsetT(rcv._tab.Offset(6))
+ if o != 0 {
+ return MessageHeader(rcv._tab.GetByte(o + rcv._tab.Pos))
+ }
+ return 0
+}
+
+func (rcv *Message) MutateHeaderType(n MessageHeader) bool {
+ return rcv._tab.MutateByteSlot(6, byte(n))
+}
+
+func (rcv *Message) Header(obj *flatbuffers.Table) bool {
+ o := flatbuffers.UOffsetT(rcv._tab.Offset(8))
+ if o != 0 {
+ rcv._tab.Union(obj, o)
+ return true
+ }
+ return false
+}
+
+func (rcv *Message) BodyLength() int64 {
+ o := flatbuffers.UOffsetT(rcv._tab.Offset(10))
+ if o != 0 {
+ return rcv._tab.GetInt64(o + rcv._tab.Pos)
+ }
+ return 0
+}
+
+func (rcv *Message) MutateBodyLength(n int64) bool {
+ return rcv._tab.MutateInt64Slot(10, n)
+}
+
+func (rcv *Message) CustomMetadata(obj *KeyValue, j int) bool {
+ o := flatbuffers.UOffsetT(rcv._tab.Offset(12))
+ if o != 0 {
+ x := rcv._tab.Vector(o)
+ x += flatbuffers.UOffsetT(j) * 4
+ x = rcv._tab.Indirect(x)
+ obj.Init(rcv._tab.Bytes, x)
+ return true
+ }
+ return false
+}
+
+func (rcv *Message) CustomMetadataLength() int {
+ o := flatbuffers.UOffsetT(rcv._tab.Offset(12))
+ if o != 0 {
+ return rcv._tab.VectorLen(o)
+ }
+ return 0
+}
+
+func MessageStart(builder *flatbuffers.Builder) {
+ builder.StartObject(5)
+}
+func MessageAddVersion(builder *flatbuffers.Builder, version MetadataVersion) {
+ builder.PrependInt16Slot(0, int16(version), 0)
+}
+func MessageAddHeaderType(builder *flatbuffers.Builder, headerType MessageHeader) {
+ builder.PrependByteSlot(1, byte(headerType), 0)
+}
+func MessageAddHeader(builder *flatbuffers.Builder, header flatbuffers.UOffsetT) {
+ builder.PrependUOffsetTSlot(2, flatbuffers.UOffsetT(header), 0)
+}
+func MessageAddBodyLength(builder *flatbuffers.Builder, bodyLength int64) {
+ builder.PrependInt64Slot(3, bodyLength, 0)
+}
+func MessageAddCustomMetadata(builder *flatbuffers.Builder, customMetadata flatbuffers.UOffsetT) {
+ builder.PrependUOffsetTSlot(4, flatbuffers.UOffsetT(customMetadata), 0)
+}
+func MessageStartCustomMetadataVector(builder *flatbuffers.Builder, numElems int) flatbuffers.UOffsetT {
+ return builder.StartVector(4, numElems, 4)
+}
+func MessageEnd(builder *flatbuffers.Builder) flatbuffers.UOffsetT {
+ return builder.EndObject()
+}
diff --git a/vendor/github.com/apache/arrow/go/v14/arrow/internal/flatbuf/MessageHeader.go b/vendor/github.com/apache/arrow/go/v14/arrow/internal/flatbuf/MessageHeader.go
new file mode 100644
index 000000000..c12fc1058
--- /dev/null
+++ b/vendor/github.com/apache/arrow/go/v14/arrow/internal/flatbuf/MessageHeader.go
@@ -0,0 +1,65 @@
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements. See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership. The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License. You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Code generated by the FlatBuffers compiler. DO NOT EDIT.
+
+package flatbuf
+
+import "strconv"
+
+/// ----------------------------------------------------------------------
+/// The root Message type
+/// This union enables us to easily send different message types without
+/// redundant storage, and in the future we can easily add new message types.
+///
+/// Arrow implementations do not need to implement all of the message types,
+/// which may include experimental metadata types. For maximum compatibility,
+/// it is best to send data using RecordBatch
+type MessageHeader byte
+
+const (
+ MessageHeaderNONE MessageHeader = 0
+ MessageHeaderSchema MessageHeader = 1
+ MessageHeaderDictionaryBatch MessageHeader = 2
+ MessageHeaderRecordBatch MessageHeader = 3
+ MessageHeaderTensor MessageHeader = 4
+ MessageHeaderSparseTensor MessageHeader = 5
+)
+
+var EnumNamesMessageHeader = map[MessageHeader]string{
+ MessageHeaderNONE: "NONE",
+ MessageHeaderSchema: "Schema",
+ MessageHeaderDictionaryBatch: "DictionaryBatch",
+ MessageHeaderRecordBatch: "RecordBatch",
+ MessageHeaderTensor: "Tensor",
+ MessageHeaderSparseTensor: "SparseTensor",
+}
+
+var EnumValuesMessageHeader = map[string]MessageHeader{
+ "NONE": MessageHeaderNONE,
+ "Schema": MessageHeaderSchema,
+ "DictionaryBatch": MessageHeaderDictionaryBatch,
+ "RecordBatch": MessageHeaderRecordBatch,
+ "Tensor": MessageHeaderTensor,
+ "SparseTensor": MessageHeaderSparseTensor,
+}
+
+func (v MessageHeader) String() string {
+ if s, ok := EnumNamesMessageHeader[v]; ok {
+ return s
+ }
+ return "MessageHeader(" + strconv.FormatInt(int64(v), 10) + ")"
+}
diff --git a/vendor/github.com/apache/arrow/go/v14/arrow/internal/flatbuf/MetadataVersion.go b/vendor/github.com/apache/arrow/go/v14/arrow/internal/flatbuf/MetadataVersion.go
new file mode 100644
index 000000000..21b234f9c
--- /dev/null
+++ b/vendor/github.com/apache/arrow/go/v14/arrow/internal/flatbuf/MetadataVersion.go
@@ -0,0 +1,65 @@
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements. See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership. The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License. You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Code generated by the FlatBuffers compiler. DO NOT EDIT.
+
+package flatbuf
+
+import "strconv"
+
+type MetadataVersion int16
+
+const (
+ /// 0.1.0 (October 2016).
+ MetadataVersionV1 MetadataVersion = 0
+ /// 0.2.0 (February 2017). Non-backwards compatible with V1.
+ MetadataVersionV2 MetadataVersion = 1
+ /// 0.3.0 -> 0.7.1 (May - December 2017). Non-backwards compatible with V2.
+ MetadataVersionV3 MetadataVersion = 2
+ /// >= 0.8.0 (December 2017). Non-backwards compatible with V3.
+ MetadataVersionV4 MetadataVersion = 3
+ /// >= 1.0.0 (July 2020. Backwards compatible with V4 (V5 readers can read V4
+ /// metadata and IPC messages). Implementations are recommended to provide a
+ /// V4 compatibility mode with V5 format changes disabled.
+ ///
+ /// Incompatible changes between V4 and V5:
+ /// - Union buffer layout has changed. In V5, Unions don't have a validity
+ /// bitmap buffer.
+ MetadataVersionV5 MetadataVersion = 4
+)
+
+var EnumNamesMetadataVersion = map[MetadataVersion]string{
+ MetadataVersionV1: "V1",
+ MetadataVersionV2: "V2",
+ MetadataVersionV3: "V3",
+ MetadataVersionV4: "V4",
+ MetadataVersionV5: "V5",
+}
+
+var EnumValuesMetadataVersion = map[string]MetadataVersion{
+ "V1": MetadataVersionV1,
+ "V2": MetadataVersionV2,
+ "V3": MetadataVersionV3,
+ "V4": MetadataVersionV4,
+ "V5": MetadataVersionV5,
+}
+
+func (v MetadataVersion) String() string {
+ if s, ok := EnumNamesMetadataVersion[v]; ok {
+ return s
+ }
+ return "MetadataVersion(" + strconv.FormatInt(int64(v), 10) + ")"
+}
diff --git a/vendor/github.com/apache/arrow/go/v14/arrow/internal/flatbuf/Null.go b/vendor/github.com/apache/arrow/go/v14/arrow/internal/flatbuf/Null.go
new file mode 100644
index 000000000..3c3eb4bda
--- /dev/null
+++ b/vendor/github.com/apache/arrow/go/v14/arrow/internal/flatbuf/Null.go
@@ -0,0 +1,51 @@
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements. See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership. The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License. You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Code generated by the FlatBuffers compiler. DO NOT EDIT.
+
+package flatbuf
+
+import (
+ flatbuffers "github.com/google/flatbuffers/go"
+)
+
+/// These are stored in the flatbuffer in the Type union below
+type Null struct {
+ _tab flatbuffers.Table
+}
+
+func GetRootAsNull(buf []byte, offset flatbuffers.UOffsetT) *Null {
+ n := flatbuffers.GetUOffsetT(buf[offset:])
+ x := &Null{}
+ x.Init(buf, n+offset)
+ return x
+}
+
+func (rcv *Null) Init(buf []byte, i flatbuffers.UOffsetT) {
+ rcv._tab.Bytes = buf
+ rcv._tab.Pos = i
+}
+
+func (rcv *Null) Table() flatbuffers.Table {
+ return rcv._tab
+}
+
+func NullStart(builder *flatbuffers.Builder) {
+ builder.StartObject(0)
+}
+func NullEnd(builder *flatbuffers.Builder) flatbuffers.UOffsetT {
+ return builder.EndObject()
+}
diff --git a/vendor/github.com/apache/arrow/go/v14/arrow/internal/flatbuf/Precision.go b/vendor/github.com/apache/arrow/go/v14/arrow/internal/flatbuf/Precision.go
new file mode 100644
index 000000000..d8021ccc4
--- /dev/null
+++ b/vendor/github.com/apache/arrow/go/v14/arrow/internal/flatbuf/Precision.go
@@ -0,0 +1,48 @@
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements. See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership. The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License. You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Code generated by the FlatBuffers compiler. DO NOT EDIT.
+
+package flatbuf
+
+import "strconv"
+
+type Precision int16
+
+const (
+ PrecisionHALF Precision = 0
+ PrecisionSINGLE Precision = 1
+ PrecisionDOUBLE Precision = 2
+)
+
+var EnumNamesPrecision = map[Precision]string{
+ PrecisionHALF: "HALF",
+ PrecisionSINGLE: "SINGLE",
+ PrecisionDOUBLE: "DOUBLE",
+}
+
+var EnumValuesPrecision = map[string]Precision{
+ "HALF": PrecisionHALF,
+ "SINGLE": PrecisionSINGLE,
+ "DOUBLE": PrecisionDOUBLE,
+}
+
+func (v Precision) String() string {
+ if s, ok := EnumNamesPrecision[v]; ok {
+ return s
+ }
+ return "Precision(" + strconv.FormatInt(int64(v), 10) + ")"
+}
diff --git a/vendor/github.com/apache/arrow/go/v14/arrow/internal/flatbuf/RecordBatch.go b/vendor/github.com/apache/arrow/go/v14/arrow/internal/flatbuf/RecordBatch.go
new file mode 100644
index 000000000..c50f4a6e8
--- /dev/null
+++ b/vendor/github.com/apache/arrow/go/v14/arrow/internal/flatbuf/RecordBatch.go
@@ -0,0 +1,214 @@
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements. See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership. The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License. You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Code generated by the FlatBuffers compiler. DO NOT EDIT.
+
+package flatbuf
+
+import (
+ flatbuffers "github.com/google/flatbuffers/go"
+)
+
+/// A data header describing the shared memory layout of a "record" or "row"
+/// batch. Some systems call this a "row batch" internally and others a "record
+/// batch".
+type RecordBatch struct {
+ _tab flatbuffers.Table
+}
+
+func GetRootAsRecordBatch(buf []byte, offset flatbuffers.UOffsetT) *RecordBatch {
+ n := flatbuffers.GetUOffsetT(buf[offset:])
+ x := &RecordBatch{}
+ x.Init(buf, n+offset)
+ return x
+}
+
+func (rcv *RecordBatch) Init(buf []byte, i flatbuffers.UOffsetT) {
+ rcv._tab.Bytes = buf
+ rcv._tab.Pos = i
+}
+
+func (rcv *RecordBatch) Table() flatbuffers.Table {
+ return rcv._tab
+}
+
+/// number of records / rows. The arrays in the batch should all have this
+/// length
+func (rcv *RecordBatch) Length() int64 {
+ o := flatbuffers.UOffsetT(rcv._tab.Offset(4))
+ if o != 0 {
+ return rcv._tab.GetInt64(o + rcv._tab.Pos)
+ }
+ return 0
+}
+
+/// number of records / rows. The arrays in the batch should all have this
+/// length
+func (rcv *RecordBatch) MutateLength(n int64) bool {
+ return rcv._tab.MutateInt64Slot(4, n)
+}
+
+/// Nodes correspond to the pre-ordered flattened logical schema
+func (rcv *RecordBatch) Nodes(obj *FieldNode, j int) bool {
+ o := flatbuffers.UOffsetT(rcv._tab.Offset(6))
+ if o != 0 {
+ x := rcv._tab.Vector(o)
+ x += flatbuffers.UOffsetT(j) * 16
+ obj.Init(rcv._tab.Bytes, x)
+ return true
+ }
+ return false
+}
+
+func (rcv *RecordBatch) NodesLength() int {
+ o := flatbuffers.UOffsetT(rcv._tab.Offset(6))
+ if o != 0 {
+ return rcv._tab.VectorLen(o)
+ }
+ return 0
+}
+
+/// Nodes correspond to the pre-ordered flattened logical schema
+/// Buffers correspond to the pre-ordered flattened buffer tree
+///
+/// The number of buffers appended to this list depends on the schema. For
+/// example, most primitive arrays will have 2 buffers, 1 for the validity
+/// bitmap and 1 for the values. For struct arrays, there will only be a
+/// single buffer for the validity (nulls) bitmap
+func (rcv *RecordBatch) Buffers(obj *Buffer, j int) bool {
+ o := flatbuffers.UOffsetT(rcv._tab.Offset(8))
+ if o != 0 {
+ x := rcv._tab.Vector(o)
+ x += flatbuffers.UOffsetT(j) * 16
+ obj.Init(rcv._tab.Bytes, x)
+ return true
+ }
+ return false
+}
+
+func (rcv *RecordBatch) BuffersLength() int {
+ o := flatbuffers.UOffsetT(rcv._tab.Offset(8))
+ if o != 0 {
+ return rcv._tab.VectorLen(o)
+ }
+ return 0
+}
+
+/// Buffers correspond to the pre-ordered flattened buffer tree
+///
+/// The number of buffers appended to this list depends on the schema. For
+/// example, most primitive arrays will have 2 buffers, 1 for the validity
+/// bitmap and 1 for the values. For struct arrays, there will only be a
+/// single buffer for the validity (nulls) bitmap
+/// Optional compression of the message body
+func (rcv *RecordBatch) Compression(obj *BodyCompression) *BodyCompression {
+ o := flatbuffers.UOffsetT(rcv._tab.Offset(10))
+ if o != 0 {
+ x := rcv._tab.Indirect(o + rcv._tab.Pos)
+ if obj == nil {
+ obj = new(BodyCompression)
+ }
+ obj.Init(rcv._tab.Bytes, x)
+ return obj
+ }
+ return nil
+}
+
+/// Optional compression of the message body
+/// Some types such as Utf8View are represented using a variable number of buffers.
+/// For each such Field in the pre-ordered flattened logical schema, there will be
+/// an entry in variadicBufferCounts to indicate the number of number of variadic
+/// buffers which belong to that Field in the current RecordBatch.
+///
+/// For example, the schema
+/// col1: Struct<alpha: Int32, beta: BinaryView, gamma: Float64>
+/// col2: Utf8View
+/// contains two Fields with variadic buffers so variadicBufferCounts will have
+/// two entries, the first counting the variadic buffers of `col1.beta` and the
+/// second counting `col2`'s.
+///
+/// This field may be omitted if and only if the schema contains no Fields with
+/// a variable number of buffers, such as BinaryView and Utf8View.
+func (rcv *RecordBatch) VariadicBufferCounts(j int) int64 {
+ o := flatbuffers.UOffsetT(rcv._tab.Offset(12))
+ if o != 0 {
+ a := rcv._tab.Vector(o)
+ return rcv._tab.GetInt64(a + flatbuffers.UOffsetT(j*8))
+ }
+ return 0
+}
+
+func (rcv *RecordBatch) VariadicBufferCountsLength() int {
+ o := flatbuffers.UOffsetT(rcv._tab.Offset(12))
+ if o != 0 {
+ return rcv._tab.VectorLen(o)
+ }
+ return 0
+}
+
+/// Some types such as Utf8View are represented using a variable number of buffers.
+/// For each such Field in the pre-ordered flattened logical schema, there will be
+/// an entry in variadicBufferCounts to indicate the number of number of variadic
+/// buffers which belong to that Field in the current RecordBatch.
+///
+/// For example, the schema
+/// col1: Struct<alpha: Int32, beta: BinaryView, gamma: Float64>
+/// col2: Utf8View
+/// contains two Fields with variadic buffers so variadicBufferCounts will have
+/// two entries, the first counting the variadic buffers of `col1.beta` and the
+/// second counting `col2`'s.
+///
+/// This field may be omitted if and only if the schema contains no Fields with
+/// a variable number of buffers, such as BinaryView and Utf8View.
+func (rcv *RecordBatch) MutateVariadicBufferCounts(j int, n int64) bool {
+ o := flatbuffers.UOffsetT(rcv._tab.Offset(12))
+ if o != 0 {
+ a := rcv._tab.Vector(o)
+ return rcv._tab.MutateInt64(a+flatbuffers.UOffsetT(j*8), n)
+ }
+ return false
+}
+
+func RecordBatchStart(builder *flatbuffers.Builder) {
+ builder.StartObject(5)
+}
+func RecordBatchAddLength(builder *flatbuffers.Builder, length int64) {
+ builder.PrependInt64Slot(0, length, 0)
+}
+func RecordBatchAddNodes(builder *flatbuffers.Builder, nodes flatbuffers.UOffsetT) {
+ builder.PrependUOffsetTSlot(1, flatbuffers.UOffsetT(nodes), 0)
+}
+func RecordBatchStartNodesVector(builder *flatbuffers.Builder, numElems int) flatbuffers.UOffsetT {
+ return builder.StartVector(16, numElems, 8)
+}
+func RecordBatchAddBuffers(builder *flatbuffers.Builder, buffers flatbuffers.UOffsetT) {
+ builder.PrependUOffsetTSlot(2, flatbuffers.UOffsetT(buffers), 0)
+}
+func RecordBatchStartBuffersVector(builder *flatbuffers.Builder, numElems int) flatbuffers.UOffsetT {
+ return builder.StartVector(16, numElems, 8)
+}
+func RecordBatchAddCompression(builder *flatbuffers.Builder, compression flatbuffers.UOffsetT) {
+ builder.PrependUOffsetTSlot(3, flatbuffers.UOffsetT(compression), 0)
+}
+func RecordBatchAddVariadicBufferCounts(builder *flatbuffers.Builder, variadicBufferCounts flatbuffers.UOffsetT) {
+ builder.PrependUOffsetTSlot(4, flatbuffers.UOffsetT(variadicBufferCounts), 0)
+}
+func RecordBatchStartVariadicBufferCountsVector(builder *flatbuffers.Builder, numElems int) flatbuffers.UOffsetT {
+ return builder.StartVector(8, numElems, 8)
+}
+func RecordBatchEnd(builder *flatbuffers.Builder) flatbuffers.UOffsetT {
+ return builder.EndObject()
+}
diff --git a/vendor/github.com/apache/arrow/go/v14/arrow/internal/flatbuf/RunEndEncoded.go b/vendor/github.com/apache/arrow/go/v14/arrow/internal/flatbuf/RunEndEncoded.go
new file mode 100644
index 000000000..fa414c1bf
--- /dev/null
+++ b/vendor/github.com/apache/arrow/go/v14/arrow/internal/flatbuf/RunEndEncoded.go
@@ -0,0 +1,55 @@
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements. See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership. The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License. You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Code generated by the FlatBuffers compiler. DO NOT EDIT.
+
+package flatbuf
+
+import (
+ flatbuffers "github.com/google/flatbuffers/go"
+)
+
+/// Contains two child arrays, run_ends and values.
+/// The run_ends child array must be a 16/32/64-bit integer array
+/// which encodes the indices at which the run with the value in
+/// each corresponding index in the values child array ends.
+/// Like list/struct types, the value array can be of any type.
+type RunEndEncoded struct {
+ _tab flatbuffers.Table
+}
+
+func GetRootAsRunEndEncoded(buf []byte, offset flatbuffers.UOffsetT) *RunEndEncoded {
+ n := flatbuffers.GetUOffsetT(buf[offset:])
+ x := &RunEndEncoded{}
+ x.Init(buf, n+offset)
+ return x
+}
+
+func (rcv *RunEndEncoded) Init(buf []byte, i flatbuffers.UOffsetT) {
+ rcv._tab.Bytes = buf
+ rcv._tab.Pos = i
+}
+
+func (rcv *RunEndEncoded) Table() flatbuffers.Table {
+ return rcv._tab
+}
+
+func RunEndEncodedStart(builder *flatbuffers.Builder) {
+ builder.StartObject(0)
+}
+func RunEndEncodedEnd(builder *flatbuffers.Builder) flatbuffers.UOffsetT {
+ return builder.EndObject()
+}
diff --git a/vendor/github.com/apache/arrow/go/v14/arrow/internal/flatbuf/RunLengthEncoded.go b/vendor/github.com/apache/arrow/go/v14/arrow/internal/flatbuf/RunLengthEncoded.go
new file mode 100644
index 000000000..8822c0660
--- /dev/null
+++ b/vendor/github.com/apache/arrow/go/v14/arrow/internal/flatbuf/RunLengthEncoded.go
@@ -0,0 +1,50 @@
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements. See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership. The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License. You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Code generated by the FlatBuffers compiler. DO NOT EDIT.
+
+package flatbuf
+
+import (
+ flatbuffers "github.com/google/flatbuffers/go"
+)
+
+type RunLengthEncoded struct {
+ _tab flatbuffers.Table
+}
+
+func GetRootAsRunLengthEncoded(buf []byte, offset flatbuffers.UOffsetT) *RunLengthEncoded {
+ n := flatbuffers.GetUOffsetT(buf[offset:])
+ x := &RunLengthEncoded{}
+ x.Init(buf, n+offset)
+ return x
+}
+
+func (rcv *RunLengthEncoded) Init(buf []byte, i flatbuffers.UOffsetT) {
+ rcv._tab.Bytes = buf
+ rcv._tab.Pos = i
+}
+
+func (rcv *RunLengthEncoded) Table() flatbuffers.Table {
+ return rcv._tab
+}
+
+func RunLengthEncodedStart(builder *flatbuffers.Builder) {
+ builder.StartObject(0)
+}
+func RunLengthEncodedEnd(builder *flatbuffers.Builder) flatbuffers.UOffsetT {
+ return builder.EndObject()
+}
diff --git a/vendor/github.com/apache/arrow/go/v14/arrow/internal/flatbuf/Schema.go b/vendor/github.com/apache/arrow/go/v14/arrow/internal/flatbuf/Schema.go
new file mode 100644
index 000000000..4ee5ecc9e
--- /dev/null
+++ b/vendor/github.com/apache/arrow/go/v14/arrow/internal/flatbuf/Schema.go
@@ -0,0 +1,159 @@
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements. See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership. The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License. You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Code generated by the FlatBuffers compiler. DO NOT EDIT.
+
+package flatbuf
+
+import (
+ flatbuffers "github.com/google/flatbuffers/go"
+)
+
+/// ----------------------------------------------------------------------
+/// A Schema describes the columns in a row batch
+type Schema struct {
+ _tab flatbuffers.Table
+}
+
+func GetRootAsSchema(buf []byte, offset flatbuffers.UOffsetT) *Schema {
+ n := flatbuffers.GetUOffsetT(buf[offset:])
+ x := &Schema{}
+ x.Init(buf, n+offset)
+ return x
+}
+
+func (rcv *Schema) Init(buf []byte, i flatbuffers.UOffsetT) {
+ rcv._tab.Bytes = buf
+ rcv._tab.Pos = i
+}
+
+func (rcv *Schema) Table() flatbuffers.Table {
+ return rcv._tab
+}
+
+/// endianness of the buffer
+/// it is Little Endian by default
+/// if endianness doesn't match the underlying system then the vectors need to be converted
+func (rcv *Schema) Endianness() Endianness {
+ o := flatbuffers.UOffsetT(rcv._tab.Offset(4))
+ if o != 0 {
+ return Endianness(rcv._tab.GetInt16(o + rcv._tab.Pos))
+ }
+ return 0
+}
+
+/// endianness of the buffer
+/// it is Little Endian by default
+/// if endianness doesn't match the underlying system then the vectors need to be converted
+func (rcv *Schema) MutateEndianness(n Endianness) bool {
+ return rcv._tab.MutateInt16Slot(4, int16(n))
+}
+
+func (rcv *Schema) Fields(obj *Field, j int) bool {
+ o := flatbuffers.UOffsetT(rcv._tab.Offset(6))
+ if o != 0 {
+ x := rcv._tab.Vector(o)
+ x += flatbuffers.UOffsetT(j) * 4
+ x = rcv._tab.Indirect(x)
+ obj.Init(rcv._tab.Bytes, x)
+ return true
+ }
+ return false
+}
+
+func (rcv *Schema) FieldsLength() int {
+ o := flatbuffers.UOffsetT(rcv._tab.Offset(6))
+ if o != 0 {
+ return rcv._tab.VectorLen(o)
+ }
+ return 0
+}
+
+func (rcv *Schema) CustomMetadata(obj *KeyValue, j int) bool {
+ o := flatbuffers.UOffsetT(rcv._tab.Offset(8))
+ if o != 0 {
+ x := rcv._tab.Vector(o)
+ x += flatbuffers.UOffsetT(j) * 4
+ x = rcv._tab.Indirect(x)
+ obj.Init(rcv._tab.Bytes, x)
+ return true
+ }
+ return false
+}
+
+func (rcv *Schema) CustomMetadataLength() int {
+ o := flatbuffers.UOffsetT(rcv._tab.Offset(8))
+ if o != 0 {
+ return rcv._tab.VectorLen(o)
+ }
+ return 0
+}
+
+/// Features used in the stream/file.
+func (rcv *Schema) Features(j int) Feature {
+ o := flatbuffers.UOffsetT(rcv._tab.Offset(10))
+ if o != 0 {
+ a := rcv._tab.Vector(o)
+ return Feature(rcv._tab.GetInt64(a + flatbuffers.UOffsetT(j*8)))
+ }
+ return 0
+}
+
+func (rcv *Schema) FeaturesLength() int {
+ o := flatbuffers.UOffsetT(rcv._tab.Offset(10))
+ if o != 0 {
+ return rcv._tab.VectorLen(o)
+ }
+ return 0
+}
+
+/// Features used in the stream/file.
+func (rcv *Schema) MutateFeatures(j int, n Feature) bool {
+ o := flatbuffers.UOffsetT(rcv._tab.Offset(10))
+ if o != 0 {
+ a := rcv._tab.Vector(o)
+ return rcv._tab.MutateInt64(a+flatbuffers.UOffsetT(j*8), int64(n))
+ }
+ return false
+}
+
+func SchemaStart(builder *flatbuffers.Builder) {
+ builder.StartObject(4)
+}
+func SchemaAddEndianness(builder *flatbuffers.Builder, endianness Endianness) {
+ builder.PrependInt16Slot(0, int16(endianness), 0)
+}
+func SchemaAddFields(builder *flatbuffers.Builder, fields flatbuffers.UOffsetT) {
+ builder.PrependUOffsetTSlot(1, flatbuffers.UOffsetT(fields), 0)
+}
+func SchemaStartFieldsVector(builder *flatbuffers.Builder, numElems int) flatbuffers.UOffsetT {
+ return builder.StartVector(4, numElems, 4)
+}
+func SchemaAddCustomMetadata(builder *flatbuffers.Builder, customMetadata flatbuffers.UOffsetT) {
+ builder.PrependUOffsetTSlot(2, flatbuffers.UOffsetT(customMetadata), 0)
+}
+func SchemaStartCustomMetadataVector(builder *flatbuffers.Builder, numElems int) flatbuffers.UOffsetT {
+ return builder.StartVector(4, numElems, 4)
+}
+func SchemaAddFeatures(builder *flatbuffers.Builder, features flatbuffers.UOffsetT) {
+ builder.PrependUOffsetTSlot(3, flatbuffers.UOffsetT(features), 0)
+}
+func SchemaStartFeaturesVector(builder *flatbuffers.Builder, numElems int) flatbuffers.UOffsetT {
+ return builder.StartVector(8, numElems, 8)
+}
+func SchemaEnd(builder *flatbuffers.Builder) flatbuffers.UOffsetT {
+ return builder.EndObject()
+}
diff --git a/vendor/github.com/apache/arrow/go/v14/arrow/internal/flatbuf/SparseMatrixCompressedAxis.go b/vendor/github.com/apache/arrow/go/v14/arrow/internal/flatbuf/SparseMatrixCompressedAxis.go
new file mode 100644
index 000000000..2d86fdef7
--- /dev/null
+++ b/vendor/github.com/apache/arrow/go/v14/arrow/internal/flatbuf/SparseMatrixCompressedAxis.go
@@ -0,0 +1,45 @@
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements. See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership. The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License. You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Code generated by the FlatBuffers compiler. DO NOT EDIT.
+
+package flatbuf
+
+import "strconv"
+
+type SparseMatrixCompressedAxis int16
+
+const (
+ SparseMatrixCompressedAxisRow SparseMatrixCompressedAxis = 0
+ SparseMatrixCompressedAxisColumn SparseMatrixCompressedAxis = 1
+)
+
+var EnumNamesSparseMatrixCompressedAxis = map[SparseMatrixCompressedAxis]string{
+ SparseMatrixCompressedAxisRow: "Row",
+ SparseMatrixCompressedAxisColumn: "Column",
+}
+
+var EnumValuesSparseMatrixCompressedAxis = map[string]SparseMatrixCompressedAxis{
+ "Row": SparseMatrixCompressedAxisRow,
+ "Column": SparseMatrixCompressedAxisColumn,
+}
+
+func (v SparseMatrixCompressedAxis) String() string {
+ if s, ok := EnumNamesSparseMatrixCompressedAxis[v]; ok {
+ return s
+ }
+ return "SparseMatrixCompressedAxis(" + strconv.FormatInt(int64(v), 10) + ")"
+}
diff --git a/vendor/github.com/apache/arrow/go/v14/arrow/internal/flatbuf/SparseMatrixIndexCSR.go b/vendor/github.com/apache/arrow/go/v14/arrow/internal/flatbuf/SparseMatrixIndexCSR.go
new file mode 100644
index 000000000..de8217650
--- /dev/null
+++ b/vendor/github.com/apache/arrow/go/v14/arrow/internal/flatbuf/SparseMatrixIndexCSR.go
@@ -0,0 +1,181 @@
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements. See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership. The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License. You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Code generated by the FlatBuffers compiler. DO NOT EDIT.
+
+package flatbuf
+
+import (
+ flatbuffers "github.com/google/flatbuffers/go"
+)
+
+/// Compressed Sparse Row format, that is matrix-specific.
+type SparseMatrixIndexCSR struct {
+ _tab flatbuffers.Table
+}
+
+func GetRootAsSparseMatrixIndexCSR(buf []byte, offset flatbuffers.UOffsetT) *SparseMatrixIndexCSR {
+ n := flatbuffers.GetUOffsetT(buf[offset:])
+ x := &SparseMatrixIndexCSR{}
+ x.Init(buf, n+offset)
+ return x
+}
+
+func (rcv *SparseMatrixIndexCSR) Init(buf []byte, i flatbuffers.UOffsetT) {
+ rcv._tab.Bytes = buf
+ rcv._tab.Pos = i
+}
+
+func (rcv *SparseMatrixIndexCSR) Table() flatbuffers.Table {
+ return rcv._tab
+}
+
+/// The type of values in indptrBuffer
+func (rcv *SparseMatrixIndexCSR) IndptrType(obj *Int) *Int {
+ o := flatbuffers.UOffsetT(rcv._tab.Offset(4))
+ if o != 0 {
+ x := rcv._tab.Indirect(o + rcv._tab.Pos)
+ if obj == nil {
+ obj = new(Int)
+ }
+ obj.Init(rcv._tab.Bytes, x)
+ return obj
+ }
+ return nil
+}
+
+/// The type of values in indptrBuffer
+/// indptrBuffer stores the location and size of indptr array that
+/// represents the range of the rows.
+/// The i-th row spans from indptr[i] to indptr[i+1] in the data.
+/// The length of this array is 1 + (the number of rows), and the type
+/// of index value is long.
+///
+/// For example, let X be the following 6x4 matrix:
+///
+/// X := [[0, 1, 2, 0],
+/// [0, 0, 3, 0],
+/// [0, 4, 0, 5],
+/// [0, 0, 0, 0],
+/// [6, 0, 7, 8],
+/// [0, 9, 0, 0]].
+///
+/// The array of non-zero values in X is:
+///
+/// values(X) = [1, 2, 3, 4, 5, 6, 7, 8, 9].
+///
+/// And the indptr of X is:
+///
+/// indptr(X) = [0, 2, 3, 5, 5, 8, 10].
+func (rcv *SparseMatrixIndexCSR) IndptrBuffer(obj *Buffer) *Buffer {
+ o := flatbuffers.UOffsetT(rcv._tab.Offset(6))
+ if o != 0 {
+ x := o + rcv._tab.Pos
+ if obj == nil {
+ obj = new(Buffer)
+ }
+ obj.Init(rcv._tab.Bytes, x)
+ return obj
+ }
+ return nil
+}
+
+/// indptrBuffer stores the location and size of indptr array that
+/// represents the range of the rows.
+/// The i-th row spans from indptr[i] to indptr[i+1] in the data.
+/// The length of this array is 1 + (the number of rows), and the type
+/// of index value is long.
+///
+/// For example, let X be the following 6x4 matrix:
+///
+/// X := [[0, 1, 2, 0],
+/// [0, 0, 3, 0],
+/// [0, 4, 0, 5],
+/// [0, 0, 0, 0],
+/// [6, 0, 7, 8],
+/// [0, 9, 0, 0]].
+///
+/// The array of non-zero values in X is:
+///
+/// values(X) = [1, 2, 3, 4, 5, 6, 7, 8, 9].
+///
+/// And the indptr of X is:
+///
+/// indptr(X) = [0, 2, 3, 5, 5, 8, 10].
+/// The type of values in indicesBuffer
+func (rcv *SparseMatrixIndexCSR) IndicesType(obj *Int) *Int {
+ o := flatbuffers.UOffsetT(rcv._tab.Offset(8))
+ if o != 0 {
+ x := rcv._tab.Indirect(o + rcv._tab.Pos)
+ if obj == nil {
+ obj = new(Int)
+ }
+ obj.Init(rcv._tab.Bytes, x)
+ return obj
+ }
+ return nil
+}
+
+/// The type of values in indicesBuffer
+/// indicesBuffer stores the location and size of the array that
+/// contains the column indices of the corresponding non-zero values.
+/// The type of index value is long.
+///
+/// For example, the indices of the above X is:
+///
+/// indices(X) = [1, 2, 2, 1, 3, 0, 2, 3, 1].
+///
+/// Note that the indices are sorted in lexicographical order for each row.
+func (rcv *SparseMatrixIndexCSR) IndicesBuffer(obj *Buffer) *Buffer {
+ o := flatbuffers.UOffsetT(rcv._tab.Offset(10))
+ if o != 0 {
+ x := o + rcv._tab.Pos
+ if obj == nil {
+ obj = new(Buffer)
+ }
+ obj.Init(rcv._tab.Bytes, x)
+ return obj
+ }
+ return nil
+}
+
+/// indicesBuffer stores the location and size of the array that
+/// contains the column indices of the corresponding non-zero values.
+/// The type of index value is long.
+///
+/// For example, the indices of the above X is:
+///
+/// indices(X) = [1, 2, 2, 1, 3, 0, 2, 3, 1].
+///
+/// Note that the indices are sorted in lexicographical order for each row.
+func SparseMatrixIndexCSRStart(builder *flatbuffers.Builder) {
+ builder.StartObject(4)
+}
+func SparseMatrixIndexCSRAddIndptrType(builder *flatbuffers.Builder, indptrType flatbuffers.UOffsetT) {
+ builder.PrependUOffsetTSlot(0, flatbuffers.UOffsetT(indptrType), 0)
+}
+func SparseMatrixIndexCSRAddIndptrBuffer(builder *flatbuffers.Builder, indptrBuffer flatbuffers.UOffsetT) {
+ builder.PrependStructSlot(1, flatbuffers.UOffsetT(indptrBuffer), 0)
+}
+func SparseMatrixIndexCSRAddIndicesType(builder *flatbuffers.Builder, indicesType flatbuffers.UOffsetT) {
+ builder.PrependUOffsetTSlot(2, flatbuffers.UOffsetT(indicesType), 0)
+}
+func SparseMatrixIndexCSRAddIndicesBuffer(builder *flatbuffers.Builder, indicesBuffer flatbuffers.UOffsetT) {
+ builder.PrependStructSlot(3, flatbuffers.UOffsetT(indicesBuffer), 0)
+}
+func SparseMatrixIndexCSREnd(builder *flatbuffers.Builder) flatbuffers.UOffsetT {
+ return builder.EndObject()
+}
diff --git a/vendor/github.com/apache/arrow/go/v14/arrow/internal/flatbuf/SparseMatrixIndexCSX.go b/vendor/github.com/apache/arrow/go/v14/arrow/internal/flatbuf/SparseMatrixIndexCSX.go
new file mode 100644
index 000000000..c28cc5d08
--- /dev/null
+++ b/vendor/github.com/apache/arrow/go/v14/arrow/internal/flatbuf/SparseMatrixIndexCSX.go
@@ -0,0 +1,200 @@
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements. See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership. The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License. You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Code generated by the FlatBuffers compiler. DO NOT EDIT.
+
+package flatbuf
+
+import (
+ flatbuffers "github.com/google/flatbuffers/go"
+)
+
+/// Compressed Sparse format, that is matrix-specific.
+type SparseMatrixIndexCSX struct {
+ _tab flatbuffers.Table
+}
+
+func GetRootAsSparseMatrixIndexCSX(buf []byte, offset flatbuffers.UOffsetT) *SparseMatrixIndexCSX {
+ n := flatbuffers.GetUOffsetT(buf[offset:])
+ x := &SparseMatrixIndexCSX{}
+ x.Init(buf, n+offset)
+ return x
+}
+
+func (rcv *SparseMatrixIndexCSX) Init(buf []byte, i flatbuffers.UOffsetT) {
+ rcv._tab.Bytes = buf
+ rcv._tab.Pos = i
+}
+
+func (rcv *SparseMatrixIndexCSX) Table() flatbuffers.Table {
+ return rcv._tab
+}
+
+/// Which axis, row or column, is compressed
+func (rcv *SparseMatrixIndexCSX) CompressedAxis() SparseMatrixCompressedAxis {
+ o := flatbuffers.UOffsetT(rcv._tab.Offset(4))
+ if o != 0 {
+ return SparseMatrixCompressedAxis(rcv._tab.GetInt16(o + rcv._tab.Pos))
+ }
+ return 0
+}
+
+/// Which axis, row or column, is compressed
+func (rcv *SparseMatrixIndexCSX) MutateCompressedAxis(n SparseMatrixCompressedAxis) bool {
+ return rcv._tab.MutateInt16Slot(4, int16(n))
+}
+
+/// The type of values in indptrBuffer
+func (rcv *SparseMatrixIndexCSX) IndptrType(obj *Int) *Int {
+ o := flatbuffers.UOffsetT(rcv._tab.Offset(6))
+ if o != 0 {
+ x := rcv._tab.Indirect(o + rcv._tab.Pos)
+ if obj == nil {
+ obj = new(Int)
+ }
+ obj.Init(rcv._tab.Bytes, x)
+ return obj
+ }
+ return nil
+}
+
+/// The type of values in indptrBuffer
+/// indptrBuffer stores the location and size of indptr array that
+/// represents the range of the rows.
+/// The i-th row spans from `indptr[i]` to `indptr[i+1]` in the data.
+/// The length of this array is 1 + (the number of rows), and the type
+/// of index value is long.
+///
+/// For example, let X be the following 6x4 matrix:
+/// ```text
+/// X := [[0, 1, 2, 0],
+/// [0, 0, 3, 0],
+/// [0, 4, 0, 5],
+/// [0, 0, 0, 0],
+/// [6, 0, 7, 8],
+/// [0, 9, 0, 0]].
+/// ```
+/// The array of non-zero values in X is:
+/// ```text
+/// values(X) = [1, 2, 3, 4, 5, 6, 7, 8, 9].
+/// ```
+/// And the indptr of X is:
+/// ```text
+/// indptr(X) = [0, 2, 3, 5, 5, 8, 10].
+/// ```
+func (rcv *SparseMatrixIndexCSX) IndptrBuffer(obj *Buffer) *Buffer {
+ o := flatbuffers.UOffsetT(rcv._tab.Offset(8))
+ if o != 0 {
+ x := o + rcv._tab.Pos
+ if obj == nil {
+ obj = new(Buffer)
+ }
+ obj.Init(rcv._tab.Bytes, x)
+ return obj
+ }
+ return nil
+}
+
+/// indptrBuffer stores the location and size of indptr array that
+/// represents the range of the rows.
+/// The i-th row spans from `indptr[i]` to `indptr[i+1]` in the data.
+/// The length of this array is 1 + (the number of rows), and the type
+/// of index value is long.
+///
+/// For example, let X be the following 6x4 matrix:
+/// ```text
+/// X := [[0, 1, 2, 0],
+/// [0, 0, 3, 0],
+/// [0, 4, 0, 5],
+/// [0, 0, 0, 0],
+/// [6, 0, 7, 8],
+/// [0, 9, 0, 0]].
+/// ```
+/// The array of non-zero values in X is:
+/// ```text
+/// values(X) = [1, 2, 3, 4, 5, 6, 7, 8, 9].
+/// ```
+/// And the indptr of X is:
+/// ```text
+/// indptr(X) = [0, 2, 3, 5, 5, 8, 10].
+/// ```
+/// The type of values in indicesBuffer
+func (rcv *SparseMatrixIndexCSX) IndicesType(obj *Int) *Int {
+ o := flatbuffers.UOffsetT(rcv._tab.Offset(10))
+ if o != 0 {
+ x := rcv._tab.Indirect(o + rcv._tab.Pos)
+ if obj == nil {
+ obj = new(Int)
+ }
+ obj.Init(rcv._tab.Bytes, x)
+ return obj
+ }
+ return nil
+}
+
+/// The type of values in indicesBuffer
+/// indicesBuffer stores the location and size of the array that
+/// contains the column indices of the corresponding non-zero values.
+/// The type of index value is long.
+///
+/// For example, the indices of the above X is:
+/// ```text
+/// indices(X) = [1, 2, 2, 1, 3, 0, 2, 3, 1].
+/// ```
+/// Note that the indices are sorted in lexicographical order for each row.
+func (rcv *SparseMatrixIndexCSX) IndicesBuffer(obj *Buffer) *Buffer {
+ o := flatbuffers.UOffsetT(rcv._tab.Offset(12))
+ if o != 0 {
+ x := o + rcv._tab.Pos
+ if obj == nil {
+ obj = new(Buffer)
+ }
+ obj.Init(rcv._tab.Bytes, x)
+ return obj
+ }
+ return nil
+}
+
+/// indicesBuffer stores the location and size of the array that
+/// contains the column indices of the corresponding non-zero values.
+/// The type of index value is long.
+///
+/// For example, the indices of the above X is:
+/// ```text
+/// indices(X) = [1, 2, 2, 1, 3, 0, 2, 3, 1].
+/// ```
+/// Note that the indices are sorted in lexicographical order for each row.
+func SparseMatrixIndexCSXStart(builder *flatbuffers.Builder) {
+ builder.StartObject(5)
+}
+func SparseMatrixIndexCSXAddCompressedAxis(builder *flatbuffers.Builder, compressedAxis SparseMatrixCompressedAxis) {
+ builder.PrependInt16Slot(0, int16(compressedAxis), 0)
+}
+func SparseMatrixIndexCSXAddIndptrType(builder *flatbuffers.Builder, indptrType flatbuffers.UOffsetT) {
+ builder.PrependUOffsetTSlot(1, flatbuffers.UOffsetT(indptrType), 0)
+}
+func SparseMatrixIndexCSXAddIndptrBuffer(builder *flatbuffers.Builder, indptrBuffer flatbuffers.UOffsetT) {
+ builder.PrependStructSlot(2, flatbuffers.UOffsetT(indptrBuffer), 0)
+}
+func SparseMatrixIndexCSXAddIndicesType(builder *flatbuffers.Builder, indicesType flatbuffers.UOffsetT) {
+ builder.PrependUOffsetTSlot(3, flatbuffers.UOffsetT(indicesType), 0)
+}
+func SparseMatrixIndexCSXAddIndicesBuffer(builder *flatbuffers.Builder, indicesBuffer flatbuffers.UOffsetT) {
+ builder.PrependStructSlot(4, flatbuffers.UOffsetT(indicesBuffer), 0)
+}
+func SparseMatrixIndexCSXEnd(builder *flatbuffers.Builder) flatbuffers.UOffsetT {
+ return builder.EndObject()
+}
diff --git a/vendor/github.com/apache/arrow/go/v14/arrow/internal/flatbuf/SparseTensor.go b/vendor/github.com/apache/arrow/go/v14/arrow/internal/flatbuf/SparseTensor.go
new file mode 100644
index 000000000..6f3f55797
--- /dev/null
+++ b/vendor/github.com/apache/arrow/go/v14/arrow/internal/flatbuf/SparseTensor.go
@@ -0,0 +1,175 @@
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements. See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership. The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License. You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Code generated by the FlatBuffers compiler. DO NOT EDIT.
+
+package flatbuf
+
+import (
+ flatbuffers "github.com/google/flatbuffers/go"
+)
+
+type SparseTensor struct {
+ _tab flatbuffers.Table
+}
+
+func GetRootAsSparseTensor(buf []byte, offset flatbuffers.UOffsetT) *SparseTensor {
+ n := flatbuffers.GetUOffsetT(buf[offset:])
+ x := &SparseTensor{}
+ x.Init(buf, n+offset)
+ return x
+}
+
+func (rcv *SparseTensor) Init(buf []byte, i flatbuffers.UOffsetT) {
+ rcv._tab.Bytes = buf
+ rcv._tab.Pos = i
+}
+
+func (rcv *SparseTensor) Table() flatbuffers.Table {
+ return rcv._tab
+}
+
+func (rcv *SparseTensor) TypeType() Type {
+ o := flatbuffers.UOffsetT(rcv._tab.Offset(4))
+ if o != 0 {
+ return Type(rcv._tab.GetByte(o + rcv._tab.Pos))
+ }
+ return 0
+}
+
+func (rcv *SparseTensor) MutateTypeType(n Type) bool {
+ return rcv._tab.MutateByteSlot(4, byte(n))
+}
+
+/// The type of data contained in a value cell.
+/// Currently only fixed-width value types are supported,
+/// no strings or nested types.
+func (rcv *SparseTensor) Type(obj *flatbuffers.Table) bool {
+ o := flatbuffers.UOffsetT(rcv._tab.Offset(6))
+ if o != 0 {
+ rcv._tab.Union(obj, o)
+ return true
+ }
+ return false
+}
+
+/// The type of data contained in a value cell.
+/// Currently only fixed-width value types are supported,
+/// no strings or nested types.
+/// The dimensions of the tensor, optionally named.
+func (rcv *SparseTensor) Shape(obj *TensorDim, j int) bool {
+ o := flatbuffers.UOffsetT(rcv._tab.Offset(8))
+ if o != 0 {
+ x := rcv._tab.Vector(o)
+ x += flatbuffers.UOffsetT(j) * 4
+ x = rcv._tab.Indirect(x)
+ obj.Init(rcv._tab.Bytes, x)
+ return true
+ }
+ return false
+}
+
+func (rcv *SparseTensor) ShapeLength() int {
+ o := flatbuffers.UOffsetT(rcv._tab.Offset(8))
+ if o != 0 {
+ return rcv._tab.VectorLen(o)
+ }
+ return 0
+}
+
+/// The dimensions of the tensor, optionally named.
+/// The number of non-zero values in a sparse tensor.
+func (rcv *SparseTensor) NonZeroLength() int64 {
+ o := flatbuffers.UOffsetT(rcv._tab.Offset(10))
+ if o != 0 {
+ return rcv._tab.GetInt64(o + rcv._tab.Pos)
+ }
+ return 0
+}
+
+/// The number of non-zero values in a sparse tensor.
+func (rcv *SparseTensor) MutateNonZeroLength(n int64) bool {
+ return rcv._tab.MutateInt64Slot(10, n)
+}
+
+func (rcv *SparseTensor) SparseIndexType() SparseTensorIndex {
+ o := flatbuffers.UOffsetT(rcv._tab.Offset(12))
+ if o != 0 {
+ return SparseTensorIndex(rcv._tab.GetByte(o + rcv._tab.Pos))
+ }
+ return 0
+}
+
+func (rcv *SparseTensor) MutateSparseIndexType(n SparseTensorIndex) bool {
+ return rcv._tab.MutateByteSlot(12, byte(n))
+}
+
+/// Sparse tensor index
+func (rcv *SparseTensor) SparseIndex(obj *flatbuffers.Table) bool {
+ o := flatbuffers.UOffsetT(rcv._tab.Offset(14))
+ if o != 0 {
+ rcv._tab.Union(obj, o)
+ return true
+ }
+ return false
+}
+
+/// Sparse tensor index
+/// The location and size of the tensor's data
+func (rcv *SparseTensor) Data(obj *Buffer) *Buffer {
+ o := flatbuffers.UOffsetT(rcv._tab.Offset(16))
+ if o != 0 {
+ x := o + rcv._tab.Pos
+ if obj == nil {
+ obj = new(Buffer)
+ }
+ obj.Init(rcv._tab.Bytes, x)
+ return obj
+ }
+ return nil
+}
+
+/// The location and size of the tensor's data
+func SparseTensorStart(builder *flatbuffers.Builder) {
+ builder.StartObject(7)
+}
+func SparseTensorAddTypeType(builder *flatbuffers.Builder, typeType Type) {
+ builder.PrependByteSlot(0, byte(typeType), 0)
+}
+func SparseTensorAddType(builder *flatbuffers.Builder, type_ flatbuffers.UOffsetT) {
+ builder.PrependUOffsetTSlot(1, flatbuffers.UOffsetT(type_), 0)
+}
+func SparseTensorAddShape(builder *flatbuffers.Builder, shape flatbuffers.UOffsetT) {
+ builder.PrependUOffsetTSlot(2, flatbuffers.UOffsetT(shape), 0)
+}
+func SparseTensorStartShapeVector(builder *flatbuffers.Builder, numElems int) flatbuffers.UOffsetT {
+ return builder.StartVector(4, numElems, 4)
+}
+func SparseTensorAddNonZeroLength(builder *flatbuffers.Builder, nonZeroLength int64) {
+ builder.PrependInt64Slot(3, nonZeroLength, 0)
+}
+func SparseTensorAddSparseIndexType(builder *flatbuffers.Builder, sparseIndexType SparseTensorIndex) {
+ builder.PrependByteSlot(4, byte(sparseIndexType), 0)
+}
+func SparseTensorAddSparseIndex(builder *flatbuffers.Builder, sparseIndex flatbuffers.UOffsetT) {
+ builder.PrependUOffsetTSlot(5, flatbuffers.UOffsetT(sparseIndex), 0)
+}
+func SparseTensorAddData(builder *flatbuffers.Builder, data flatbuffers.UOffsetT) {
+ builder.PrependStructSlot(6, flatbuffers.UOffsetT(data), 0)
+}
+func SparseTensorEnd(builder *flatbuffers.Builder) flatbuffers.UOffsetT {
+ return builder.EndObject()
+}
diff --git a/vendor/github.com/apache/arrow/go/v14/arrow/internal/flatbuf/SparseTensorIndex.go b/vendor/github.com/apache/arrow/go/v14/arrow/internal/flatbuf/SparseTensorIndex.go
new file mode 100644
index 000000000..42aa818b0
--- /dev/null
+++ b/vendor/github.com/apache/arrow/go/v14/arrow/internal/flatbuf/SparseTensorIndex.go
@@ -0,0 +1,51 @@
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements. See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership. The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License. You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Code generated by the FlatBuffers compiler. DO NOT EDIT.
+
+package flatbuf
+
+import "strconv"
+
+type SparseTensorIndex byte
+
+const (
+ SparseTensorIndexNONE SparseTensorIndex = 0
+ SparseTensorIndexSparseTensorIndexCOO SparseTensorIndex = 1
+ SparseTensorIndexSparseMatrixIndexCSX SparseTensorIndex = 2
+ SparseTensorIndexSparseTensorIndexCSF SparseTensorIndex = 3
+)
+
+var EnumNamesSparseTensorIndex = map[SparseTensorIndex]string{
+ SparseTensorIndexNONE: "NONE",
+ SparseTensorIndexSparseTensorIndexCOO: "SparseTensorIndexCOO",
+ SparseTensorIndexSparseMatrixIndexCSX: "SparseMatrixIndexCSX",
+ SparseTensorIndexSparseTensorIndexCSF: "SparseTensorIndexCSF",
+}
+
+var EnumValuesSparseTensorIndex = map[string]SparseTensorIndex{
+ "NONE": SparseTensorIndexNONE,
+ "SparseTensorIndexCOO": SparseTensorIndexSparseTensorIndexCOO,
+ "SparseMatrixIndexCSX": SparseTensorIndexSparseMatrixIndexCSX,
+ "SparseTensorIndexCSF": SparseTensorIndexSparseTensorIndexCSF,
+}
+
+func (v SparseTensorIndex) String() string {
+ if s, ok := EnumNamesSparseTensorIndex[v]; ok {
+ return s
+ }
+ return "SparseTensorIndex(" + strconv.FormatInt(int64(v), 10) + ")"
+}
diff --git a/vendor/github.com/apache/arrow/go/v14/arrow/internal/flatbuf/SparseTensorIndexCOO.go b/vendor/github.com/apache/arrow/go/v14/arrow/internal/flatbuf/SparseTensorIndexCOO.go
new file mode 100644
index 000000000..f8eee99fa
--- /dev/null
+++ b/vendor/github.com/apache/arrow/go/v14/arrow/internal/flatbuf/SparseTensorIndexCOO.go
@@ -0,0 +1,179 @@
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements. See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership. The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License. You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Code generated by the FlatBuffers compiler. DO NOT EDIT.
+
+package flatbuf
+
+import (
+ flatbuffers "github.com/google/flatbuffers/go"
+)
+
+/// ----------------------------------------------------------------------
+/// EXPERIMENTAL: Data structures for sparse tensors
+/// Coordinate (COO) format of sparse tensor index.
+///
+/// COO's index list are represented as a NxM matrix,
+/// where N is the number of non-zero values,
+/// and M is the number of dimensions of a sparse tensor.
+///
+/// indicesBuffer stores the location and size of the data of this indices
+/// matrix. The value type and the stride of the indices matrix is
+/// specified in indicesType and indicesStrides fields.
+///
+/// For example, let X be a 2x3x4x5 tensor, and it has the following
+/// 6 non-zero values:
+/// ```text
+/// X[0, 1, 2, 0] := 1
+/// X[1, 1, 2, 3] := 2
+/// X[0, 2, 1, 0] := 3
+/// X[0, 1, 3, 0] := 4
+/// X[0, 1, 2, 1] := 5
+/// X[1, 2, 0, 4] := 6
+/// ```
+/// In COO format, the index matrix of X is the following 4x6 matrix:
+/// ```text
+/// [[0, 0, 0, 0, 1, 1],
+/// [1, 1, 1, 2, 1, 2],
+/// [2, 2, 3, 1, 2, 0],
+/// [0, 1, 0, 0, 3, 4]]
+/// ```
+/// When isCanonical is true, the indices is sorted in lexicographical order
+/// (row-major order), and it does not have duplicated entries. Otherwise,
+/// the indices may not be sorted, or may have duplicated entries.
+type SparseTensorIndexCOO struct {
+ _tab flatbuffers.Table
+}
+
+func GetRootAsSparseTensorIndexCOO(buf []byte, offset flatbuffers.UOffsetT) *SparseTensorIndexCOO {
+ n := flatbuffers.GetUOffsetT(buf[offset:])
+ x := &SparseTensorIndexCOO{}
+ x.Init(buf, n+offset)
+ return x
+}
+
+func (rcv *SparseTensorIndexCOO) Init(buf []byte, i flatbuffers.UOffsetT) {
+ rcv._tab.Bytes = buf
+ rcv._tab.Pos = i
+}
+
+func (rcv *SparseTensorIndexCOO) Table() flatbuffers.Table {
+ return rcv._tab
+}
+
+/// The type of values in indicesBuffer
+func (rcv *SparseTensorIndexCOO) IndicesType(obj *Int) *Int {
+ o := flatbuffers.UOffsetT(rcv._tab.Offset(4))
+ if o != 0 {
+ x := rcv._tab.Indirect(o + rcv._tab.Pos)
+ if obj == nil {
+ obj = new(Int)
+ }
+ obj.Init(rcv._tab.Bytes, x)
+ return obj
+ }
+ return nil
+}
+
+/// The type of values in indicesBuffer
+/// Non-negative byte offsets to advance one value cell along each dimension
+/// If omitted, default to row-major order (C-like).
+func (rcv *SparseTensorIndexCOO) IndicesStrides(j int) int64 {
+ o := flatbuffers.UOffsetT(rcv._tab.Offset(6))
+ if o != 0 {
+ a := rcv._tab.Vector(o)
+ return rcv._tab.GetInt64(a + flatbuffers.UOffsetT(j*8))
+ }
+ return 0
+}
+
+func (rcv *SparseTensorIndexCOO) IndicesStridesLength() int {
+ o := flatbuffers.UOffsetT(rcv._tab.Offset(6))
+ if o != 0 {
+ return rcv._tab.VectorLen(o)
+ }
+ return 0
+}
+
+/// Non-negative byte offsets to advance one value cell along each dimension
+/// If omitted, default to row-major order (C-like).
+func (rcv *SparseTensorIndexCOO) MutateIndicesStrides(j int, n int64) bool {
+ o := flatbuffers.UOffsetT(rcv._tab.Offset(6))
+ if o != 0 {
+ a := rcv._tab.Vector(o)
+ return rcv._tab.MutateInt64(a+flatbuffers.UOffsetT(j*8), n)
+ }
+ return false
+}
+
+/// The location and size of the indices matrix's data
+func (rcv *SparseTensorIndexCOO) IndicesBuffer(obj *Buffer) *Buffer {
+ o := flatbuffers.UOffsetT(rcv._tab.Offset(8))
+ if o != 0 {
+ x := o + rcv._tab.Pos
+ if obj == nil {
+ obj = new(Buffer)
+ }
+ obj.Init(rcv._tab.Bytes, x)
+ return obj
+ }
+ return nil
+}
+
+/// The location and size of the indices matrix's data
+/// This flag is true if and only if the indices matrix is sorted in
+/// row-major order, and does not have duplicated entries.
+/// This sort order is the same as of Tensorflow's SparseTensor,
+/// but it is inverse order of SciPy's canonical coo_matrix
+/// (SciPy employs column-major order for its coo_matrix).
+func (rcv *SparseTensorIndexCOO) IsCanonical() bool {
+ o := flatbuffers.UOffsetT(rcv._tab.Offset(10))
+ if o != 0 {
+ return rcv._tab.GetBool(o + rcv._tab.Pos)
+ }
+ return false
+}
+
+/// This flag is true if and only if the indices matrix is sorted in
+/// row-major order, and does not have duplicated entries.
+/// This sort order is the same as of Tensorflow's SparseTensor,
+/// but it is inverse order of SciPy's canonical coo_matrix
+/// (SciPy employs column-major order for its coo_matrix).
+func (rcv *SparseTensorIndexCOO) MutateIsCanonical(n bool) bool {
+ return rcv._tab.MutateBoolSlot(10, n)
+}
+
+func SparseTensorIndexCOOStart(builder *flatbuffers.Builder) {
+ builder.StartObject(4)
+}
+func SparseTensorIndexCOOAddIndicesType(builder *flatbuffers.Builder, indicesType flatbuffers.UOffsetT) {
+ builder.PrependUOffsetTSlot(0, flatbuffers.UOffsetT(indicesType), 0)
+}
+func SparseTensorIndexCOOAddIndicesStrides(builder *flatbuffers.Builder, indicesStrides flatbuffers.UOffsetT) {
+ builder.PrependUOffsetTSlot(1, flatbuffers.UOffsetT(indicesStrides), 0)
+}
+func SparseTensorIndexCOOStartIndicesStridesVector(builder *flatbuffers.Builder, numElems int) flatbuffers.UOffsetT {
+ return builder.StartVector(8, numElems, 8)
+}
+func SparseTensorIndexCOOAddIndicesBuffer(builder *flatbuffers.Builder, indicesBuffer flatbuffers.UOffsetT) {
+ builder.PrependStructSlot(2, flatbuffers.UOffsetT(indicesBuffer), 0)
+}
+func SparseTensorIndexCOOAddIsCanonical(builder *flatbuffers.Builder, isCanonical bool) {
+ builder.PrependBoolSlot(3, isCanonical, false)
+}
+func SparseTensorIndexCOOEnd(builder *flatbuffers.Builder) flatbuffers.UOffsetT {
+ return builder.EndObject()
+}
diff --git a/vendor/github.com/apache/arrow/go/v14/arrow/internal/flatbuf/SparseTensorIndexCSF.go b/vendor/github.com/apache/arrow/go/v14/arrow/internal/flatbuf/SparseTensorIndexCSF.go
new file mode 100644
index 000000000..a824c84eb
--- /dev/null
+++ b/vendor/github.com/apache/arrow/go/v14/arrow/internal/flatbuf/SparseTensorIndexCSF.go
@@ -0,0 +1,291 @@
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements. See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership. The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License. You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Code generated by the FlatBuffers compiler. DO NOT EDIT.
+
+package flatbuf
+
+import (
+ flatbuffers "github.com/google/flatbuffers/go"
+)
+
+/// Compressed Sparse Fiber (CSF) sparse tensor index.
+type SparseTensorIndexCSF struct {
+ _tab flatbuffers.Table
+}
+
+func GetRootAsSparseTensorIndexCSF(buf []byte, offset flatbuffers.UOffsetT) *SparseTensorIndexCSF {
+ n := flatbuffers.GetUOffsetT(buf[offset:])
+ x := &SparseTensorIndexCSF{}
+ x.Init(buf, n+offset)
+ return x
+}
+
+func (rcv *SparseTensorIndexCSF) Init(buf []byte, i flatbuffers.UOffsetT) {
+ rcv._tab.Bytes = buf
+ rcv._tab.Pos = i
+}
+
+func (rcv *SparseTensorIndexCSF) Table() flatbuffers.Table {
+ return rcv._tab
+}
+
+/// CSF is a generalization of compressed sparse row (CSR) index.
+/// See [smith2017knl](http://shaden.io/pub-files/smith2017knl.pdf)
+///
+/// CSF index recursively compresses each dimension of a tensor into a set
+/// of prefix trees. Each path from a root to leaf forms one tensor
+/// non-zero index. CSF is implemented with two arrays of buffers and one
+/// arrays of integers.
+///
+/// For example, let X be a 2x3x4x5 tensor and let it have the following
+/// 8 non-zero values:
+/// ```text
+/// X[0, 0, 0, 1] := 1
+/// X[0, 0, 0, 2] := 2
+/// X[0, 1, 0, 0] := 3
+/// X[0, 1, 0, 2] := 4
+/// X[0, 1, 1, 0] := 5
+/// X[1, 1, 1, 0] := 6
+/// X[1, 1, 1, 1] := 7
+/// X[1, 1, 1, 2] := 8
+/// ```
+/// As a prefix tree this would be represented as:
+/// ```text
+/// 0 1
+/// / \ |
+/// 0 1 1
+/// / / \ |
+/// 0 0 1 1
+/// /| /| | /| |
+/// 1 2 0 2 0 0 1 2
+/// ```
+/// The type of values in indptrBuffers
+func (rcv *SparseTensorIndexCSF) IndptrType(obj *Int) *Int {
+ o := flatbuffers.UOffsetT(rcv._tab.Offset(4))
+ if o != 0 {
+ x := rcv._tab.Indirect(o + rcv._tab.Pos)
+ if obj == nil {
+ obj = new(Int)
+ }
+ obj.Init(rcv._tab.Bytes, x)
+ return obj
+ }
+ return nil
+}
+
+/// CSF is a generalization of compressed sparse row (CSR) index.
+/// See [smith2017knl](http://shaden.io/pub-files/smith2017knl.pdf)
+///
+/// CSF index recursively compresses each dimension of a tensor into a set
+/// of prefix trees. Each path from a root to leaf forms one tensor
+/// non-zero index. CSF is implemented with two arrays of buffers and one
+/// arrays of integers.
+///
+/// For example, let X be a 2x3x4x5 tensor and let it have the following
+/// 8 non-zero values:
+/// ```text
+/// X[0, 0, 0, 1] := 1
+/// X[0, 0, 0, 2] := 2
+/// X[0, 1, 0, 0] := 3
+/// X[0, 1, 0, 2] := 4
+/// X[0, 1, 1, 0] := 5
+/// X[1, 1, 1, 0] := 6
+/// X[1, 1, 1, 1] := 7
+/// X[1, 1, 1, 2] := 8
+/// ```
+/// As a prefix tree this would be represented as:
+/// ```text
+/// 0 1
+/// / \ |
+/// 0 1 1
+/// / / \ |
+/// 0 0 1 1
+/// /| /| | /| |
+/// 1 2 0 2 0 0 1 2
+/// ```
+/// The type of values in indptrBuffers
+/// indptrBuffers stores the sparsity structure.
+/// Each two consecutive dimensions in a tensor correspond to a buffer in
+/// indptrBuffers. A pair of consecutive values at `indptrBuffers[dim][i]`
+/// and `indptrBuffers[dim][i + 1]` signify a range of nodes in
+/// `indicesBuffers[dim + 1]` who are children of `indicesBuffers[dim][i]` node.
+///
+/// For example, the indptrBuffers for the above X is:
+/// ```text
+/// indptrBuffer(X) = [
+/// [0, 2, 3],
+/// [0, 1, 3, 4],
+/// [0, 2, 4, 5, 8]
+/// ].
+/// ```
+func (rcv *SparseTensorIndexCSF) IndptrBuffers(obj *Buffer, j int) bool {
+ o := flatbuffers.UOffsetT(rcv._tab.Offset(6))
+ if o != 0 {
+ x := rcv._tab.Vector(o)
+ x += flatbuffers.UOffsetT(j) * 16
+ obj.Init(rcv._tab.Bytes, x)
+ return true
+ }
+ return false
+}
+
+func (rcv *SparseTensorIndexCSF) IndptrBuffersLength() int {
+ o := flatbuffers.UOffsetT(rcv._tab.Offset(6))
+ if o != 0 {
+ return rcv._tab.VectorLen(o)
+ }
+ return 0
+}
+
+/// indptrBuffers stores the sparsity structure.
+/// Each two consecutive dimensions in a tensor correspond to a buffer in
+/// indptrBuffers. A pair of consecutive values at `indptrBuffers[dim][i]`
+/// and `indptrBuffers[dim][i + 1]` signify a range of nodes in
+/// `indicesBuffers[dim + 1]` who are children of `indicesBuffers[dim][i]` node.
+///
+/// For example, the indptrBuffers for the above X is:
+/// ```text
+/// indptrBuffer(X) = [
+/// [0, 2, 3],
+/// [0, 1, 3, 4],
+/// [0, 2, 4, 5, 8]
+/// ].
+/// ```
+/// The type of values in indicesBuffers
+func (rcv *SparseTensorIndexCSF) IndicesType(obj *Int) *Int {
+ o := flatbuffers.UOffsetT(rcv._tab.Offset(8))
+ if o != 0 {
+ x := rcv._tab.Indirect(o + rcv._tab.Pos)
+ if obj == nil {
+ obj = new(Int)
+ }
+ obj.Init(rcv._tab.Bytes, x)
+ return obj
+ }
+ return nil
+}
+
+/// The type of values in indicesBuffers
+/// indicesBuffers stores values of nodes.
+/// Each tensor dimension corresponds to a buffer in indicesBuffers.
+/// For example, the indicesBuffers for the above X is:
+/// ```text
+/// indicesBuffer(X) = [
+/// [0, 1],
+/// [0, 1, 1],
+/// [0, 0, 1, 1],
+/// [1, 2, 0, 2, 0, 0, 1, 2]
+/// ].
+/// ```
+func (rcv *SparseTensorIndexCSF) IndicesBuffers(obj *Buffer, j int) bool {
+ o := flatbuffers.UOffsetT(rcv._tab.Offset(10))
+ if o != 0 {
+ x := rcv._tab.Vector(o)
+ x += flatbuffers.UOffsetT(j) * 16
+ obj.Init(rcv._tab.Bytes, x)
+ return true
+ }
+ return false
+}
+
+func (rcv *SparseTensorIndexCSF) IndicesBuffersLength() int {
+ o := flatbuffers.UOffsetT(rcv._tab.Offset(10))
+ if o != 0 {
+ return rcv._tab.VectorLen(o)
+ }
+ return 0
+}
+
+/// indicesBuffers stores values of nodes.
+/// Each tensor dimension corresponds to a buffer in indicesBuffers.
+/// For example, the indicesBuffers for the above X is:
+/// ```text
+/// indicesBuffer(X) = [
+/// [0, 1],
+/// [0, 1, 1],
+/// [0, 0, 1, 1],
+/// [1, 2, 0, 2, 0, 0, 1, 2]
+/// ].
+/// ```
+/// axisOrder stores the sequence in which dimensions were traversed to
+/// produce the prefix tree.
+/// For example, the axisOrder for the above X is:
+/// ```text
+/// axisOrder(X) = [0, 1, 2, 3].
+/// ```
+func (rcv *SparseTensorIndexCSF) AxisOrder(j int) int32 {
+ o := flatbuffers.UOffsetT(rcv._tab.Offset(12))
+ if o != 0 {
+ a := rcv._tab.Vector(o)
+ return rcv._tab.GetInt32(a + flatbuffers.UOffsetT(j*4))
+ }
+ return 0
+}
+
+func (rcv *SparseTensorIndexCSF) AxisOrderLength() int {
+ o := flatbuffers.UOffsetT(rcv._tab.Offset(12))
+ if o != 0 {
+ return rcv._tab.VectorLen(o)
+ }
+ return 0
+}
+
+/// axisOrder stores the sequence in which dimensions were traversed to
+/// produce the prefix tree.
+/// For example, the axisOrder for the above X is:
+/// ```text
+/// axisOrder(X) = [0, 1, 2, 3].
+/// ```
+func (rcv *SparseTensorIndexCSF) MutateAxisOrder(j int, n int32) bool {
+ o := flatbuffers.UOffsetT(rcv._tab.Offset(12))
+ if o != 0 {
+ a := rcv._tab.Vector(o)
+ return rcv._tab.MutateInt32(a+flatbuffers.UOffsetT(j*4), n)
+ }
+ return false
+}
+
+func SparseTensorIndexCSFStart(builder *flatbuffers.Builder) {
+ builder.StartObject(5)
+}
+func SparseTensorIndexCSFAddIndptrType(builder *flatbuffers.Builder, indptrType flatbuffers.UOffsetT) {
+ builder.PrependUOffsetTSlot(0, flatbuffers.UOffsetT(indptrType), 0)
+}
+func SparseTensorIndexCSFAddIndptrBuffers(builder *flatbuffers.Builder, indptrBuffers flatbuffers.UOffsetT) {
+ builder.PrependUOffsetTSlot(1, flatbuffers.UOffsetT(indptrBuffers), 0)
+}
+func SparseTensorIndexCSFStartIndptrBuffersVector(builder *flatbuffers.Builder, numElems int) flatbuffers.UOffsetT {
+ return builder.StartVector(16, numElems, 8)
+}
+func SparseTensorIndexCSFAddIndicesType(builder *flatbuffers.Builder, indicesType flatbuffers.UOffsetT) {
+ builder.PrependUOffsetTSlot(2, flatbuffers.UOffsetT(indicesType), 0)
+}
+func SparseTensorIndexCSFAddIndicesBuffers(builder *flatbuffers.Builder, indicesBuffers flatbuffers.UOffsetT) {
+ builder.PrependUOffsetTSlot(3, flatbuffers.UOffsetT(indicesBuffers), 0)
+}
+func SparseTensorIndexCSFStartIndicesBuffersVector(builder *flatbuffers.Builder, numElems int) flatbuffers.UOffsetT {
+ return builder.StartVector(16, numElems, 8)
+}
+func SparseTensorIndexCSFAddAxisOrder(builder *flatbuffers.Builder, axisOrder flatbuffers.UOffsetT) {
+ builder.PrependUOffsetTSlot(4, flatbuffers.UOffsetT(axisOrder), 0)
+}
+func SparseTensorIndexCSFStartAxisOrderVector(builder *flatbuffers.Builder, numElems int) flatbuffers.UOffsetT {
+ return builder.StartVector(4, numElems, 4)
+}
+func SparseTensorIndexCSFEnd(builder *flatbuffers.Builder) flatbuffers.UOffsetT {
+ return builder.EndObject()
+}
diff --git a/vendor/github.com/apache/arrow/go/v14/arrow/internal/flatbuf/Struct_.go b/vendor/github.com/apache/arrow/go/v14/arrow/internal/flatbuf/Struct_.go
new file mode 100644
index 000000000..427e70603
--- /dev/null
+++ b/vendor/github.com/apache/arrow/go/v14/arrow/internal/flatbuf/Struct_.go
@@ -0,0 +1,53 @@
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements. See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership. The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License. You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Code generated by the FlatBuffers compiler. DO NOT EDIT.
+
+package flatbuf
+
+import (
+ flatbuffers "github.com/google/flatbuffers/go"
+)
+
+/// A Struct_ in the flatbuffer metadata is the same as an Arrow Struct
+/// (according to the physical memory layout). We used Struct_ here as
+/// Struct is a reserved word in Flatbuffers
+type Struct_ struct {
+ _tab flatbuffers.Table
+}
+
+func GetRootAsStruct_(buf []byte, offset flatbuffers.UOffsetT) *Struct_ {
+ n := flatbuffers.GetUOffsetT(buf[offset:])
+ x := &Struct_{}
+ x.Init(buf, n+offset)
+ return x
+}
+
+func (rcv *Struct_) Init(buf []byte, i flatbuffers.UOffsetT) {
+ rcv._tab.Bytes = buf
+ rcv._tab.Pos = i
+}
+
+func (rcv *Struct_) Table() flatbuffers.Table {
+ return rcv._tab
+}
+
+func Struct_Start(builder *flatbuffers.Builder) {
+ builder.StartObject(0)
+}
+func Struct_End(builder *flatbuffers.Builder) flatbuffers.UOffsetT {
+ return builder.EndObject()
+}
diff --git a/vendor/github.com/apache/arrow/go/v14/arrow/internal/flatbuf/Tensor.go b/vendor/github.com/apache/arrow/go/v14/arrow/internal/flatbuf/Tensor.go
new file mode 100644
index 000000000..39d70e351
--- /dev/null
+++ b/vendor/github.com/apache/arrow/go/v14/arrow/internal/flatbuf/Tensor.go
@@ -0,0 +1,163 @@
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements. See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership. The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License. You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Code generated by the FlatBuffers compiler. DO NOT EDIT.
+
+package flatbuf
+
+import (
+ flatbuffers "github.com/google/flatbuffers/go"
+)
+
+type Tensor struct {
+ _tab flatbuffers.Table
+}
+
+func GetRootAsTensor(buf []byte, offset flatbuffers.UOffsetT) *Tensor {
+ n := flatbuffers.GetUOffsetT(buf[offset:])
+ x := &Tensor{}
+ x.Init(buf, n+offset)
+ return x
+}
+
+func (rcv *Tensor) Init(buf []byte, i flatbuffers.UOffsetT) {
+ rcv._tab.Bytes = buf
+ rcv._tab.Pos = i
+}
+
+func (rcv *Tensor) Table() flatbuffers.Table {
+ return rcv._tab
+}
+
+func (rcv *Tensor) TypeType() Type {
+ o := flatbuffers.UOffsetT(rcv._tab.Offset(4))
+ if o != 0 {
+ return Type(rcv._tab.GetByte(o + rcv._tab.Pos))
+ }
+ return 0
+}
+
+func (rcv *Tensor) MutateTypeType(n Type) bool {
+ return rcv._tab.MutateByteSlot(4, byte(n))
+}
+
+/// The type of data contained in a value cell. Currently only fixed-width
+/// value types are supported, no strings or nested types
+func (rcv *Tensor) Type(obj *flatbuffers.Table) bool {
+ o := flatbuffers.UOffsetT(rcv._tab.Offset(6))
+ if o != 0 {
+ rcv._tab.Union(obj, o)
+ return true
+ }
+ return false
+}
+
+/// The type of data contained in a value cell. Currently only fixed-width
+/// value types are supported, no strings or nested types
+/// The dimensions of the tensor, optionally named
+func (rcv *Tensor) Shape(obj *TensorDim, j int) bool {
+ o := flatbuffers.UOffsetT(rcv._tab.Offset(8))
+ if o != 0 {
+ x := rcv._tab.Vector(o)
+ x += flatbuffers.UOffsetT(j) * 4
+ x = rcv._tab.Indirect(x)
+ obj.Init(rcv._tab.Bytes, x)
+ return true
+ }
+ return false
+}
+
+func (rcv *Tensor) ShapeLength() int {
+ o := flatbuffers.UOffsetT(rcv._tab.Offset(8))
+ if o != 0 {
+ return rcv._tab.VectorLen(o)
+ }
+ return 0
+}
+
+/// The dimensions of the tensor, optionally named
+/// Non-negative byte offsets to advance one value cell along each dimension
+/// If omitted, default to row-major order (C-like).
+func (rcv *Tensor) Strides(j int) int64 {
+ o := flatbuffers.UOffsetT(rcv._tab.Offset(10))
+ if o != 0 {
+ a := rcv._tab.Vector(o)
+ return rcv._tab.GetInt64(a + flatbuffers.UOffsetT(j*8))
+ }
+ return 0
+}
+
+func (rcv *Tensor) StridesLength() int {
+ o := flatbuffers.UOffsetT(rcv._tab.Offset(10))
+ if o != 0 {
+ return rcv._tab.VectorLen(o)
+ }
+ return 0
+}
+
+/// Non-negative byte offsets to advance one value cell along each dimension
+/// If omitted, default to row-major order (C-like).
+func (rcv *Tensor) MutateStrides(j int, n int64) bool {
+ o := flatbuffers.UOffsetT(rcv._tab.Offset(10))
+ if o != 0 {
+ a := rcv._tab.Vector(o)
+ return rcv._tab.MutateInt64(a+flatbuffers.UOffsetT(j*8), n)
+ }
+ return false
+}
+
+/// The location and size of the tensor's data
+func (rcv *Tensor) Data(obj *Buffer) *Buffer {
+ o := flatbuffers.UOffsetT(rcv._tab.Offset(12))
+ if o != 0 {
+ x := o + rcv._tab.Pos
+ if obj == nil {
+ obj = new(Buffer)
+ }
+ obj.Init(rcv._tab.Bytes, x)
+ return obj
+ }
+ return nil
+}
+
+/// The location and size of the tensor's data
+func TensorStart(builder *flatbuffers.Builder) {
+ builder.StartObject(5)
+}
+func TensorAddTypeType(builder *flatbuffers.Builder, typeType Type) {
+ builder.PrependByteSlot(0, byte(typeType), 0)
+}
+func TensorAddType(builder *flatbuffers.Builder, type_ flatbuffers.UOffsetT) {
+ builder.PrependUOffsetTSlot(1, flatbuffers.UOffsetT(type_), 0)
+}
+func TensorAddShape(builder *flatbuffers.Builder, shape flatbuffers.UOffsetT) {
+ builder.PrependUOffsetTSlot(2, flatbuffers.UOffsetT(shape), 0)
+}
+func TensorStartShapeVector(builder *flatbuffers.Builder, numElems int) flatbuffers.UOffsetT {
+ return builder.StartVector(4, numElems, 4)
+}
+func TensorAddStrides(builder *flatbuffers.Builder, strides flatbuffers.UOffsetT) {
+ builder.PrependUOffsetTSlot(3, flatbuffers.UOffsetT(strides), 0)
+}
+func TensorStartStridesVector(builder *flatbuffers.Builder, numElems int) flatbuffers.UOffsetT {
+ return builder.StartVector(8, numElems, 8)
+}
+func TensorAddData(builder *flatbuffers.Builder, data flatbuffers.UOffsetT) {
+ builder.PrependStructSlot(4, flatbuffers.UOffsetT(data), 0)
+}
+func TensorEnd(builder *flatbuffers.Builder) flatbuffers.UOffsetT {
+ return builder.EndObject()
+}
diff --git a/vendor/github.com/apache/arrow/go/v14/arrow/internal/flatbuf/TensorDim.go b/vendor/github.com/apache/arrow/go/v14/arrow/internal/flatbuf/TensorDim.go
new file mode 100644
index 000000000..14b821208
--- /dev/null
+++ b/vendor/github.com/apache/arrow/go/v14/arrow/internal/flatbuf/TensorDim.go
@@ -0,0 +1,83 @@
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements. See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership. The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License. You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Code generated by the FlatBuffers compiler. DO NOT EDIT.
+
+package flatbuf
+
+import (
+ flatbuffers "github.com/google/flatbuffers/go"
+)
+
+/// ----------------------------------------------------------------------
+/// Data structures for dense tensors
+/// Shape data for a single axis in a tensor
+type TensorDim struct {
+ _tab flatbuffers.Table
+}
+
+func GetRootAsTensorDim(buf []byte, offset flatbuffers.UOffsetT) *TensorDim {
+ n := flatbuffers.GetUOffsetT(buf[offset:])
+ x := &TensorDim{}
+ x.Init(buf, n+offset)
+ return x
+}
+
+func (rcv *TensorDim) Init(buf []byte, i flatbuffers.UOffsetT) {
+ rcv._tab.Bytes = buf
+ rcv._tab.Pos = i
+}
+
+func (rcv *TensorDim) Table() flatbuffers.Table {
+ return rcv._tab
+}
+
+/// Length of dimension
+func (rcv *TensorDim) Size() int64 {
+ o := flatbuffers.UOffsetT(rcv._tab.Offset(4))
+ if o != 0 {
+ return rcv._tab.GetInt64(o + rcv._tab.Pos)
+ }
+ return 0
+}
+
+/// Length of dimension
+func (rcv *TensorDim) MutateSize(n int64) bool {
+ return rcv._tab.MutateInt64Slot(4, n)
+}
+
+/// Name of the dimension, optional
+func (rcv *TensorDim) Name() []byte {
+ o := flatbuffers.UOffsetT(rcv._tab.Offset(6))
+ if o != 0 {
+ return rcv._tab.ByteVector(o + rcv._tab.Pos)
+ }
+ return nil
+}
+
+/// Name of the dimension, optional
+func TensorDimStart(builder *flatbuffers.Builder) {
+ builder.StartObject(2)
+}
+func TensorDimAddSize(builder *flatbuffers.Builder, size int64) {
+ builder.PrependInt64Slot(0, size, 0)
+}
+func TensorDimAddName(builder *flatbuffers.Builder, name flatbuffers.UOffsetT) {
+ builder.PrependUOffsetTSlot(1, flatbuffers.UOffsetT(name), 0)
+}
+func TensorDimEnd(builder *flatbuffers.Builder) flatbuffers.UOffsetT {
+ return builder.EndObject()
+}
diff --git a/vendor/github.com/apache/arrow/go/v14/arrow/internal/flatbuf/Time.go b/vendor/github.com/apache/arrow/go/v14/arrow/internal/flatbuf/Time.go
new file mode 100644
index 000000000..2fb6e4c11
--- /dev/null
+++ b/vendor/github.com/apache/arrow/go/v14/arrow/internal/flatbuf/Time.go
@@ -0,0 +1,94 @@
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements. See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership. The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License. You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Code generated by the FlatBuffers compiler. DO NOT EDIT.
+
+package flatbuf
+
+import (
+ flatbuffers "github.com/google/flatbuffers/go"
+)
+
+/// Time is either a 32-bit or 64-bit signed integer type representing an
+/// elapsed time since midnight, stored in either of four units: seconds,
+/// milliseconds, microseconds or nanoseconds.
+///
+/// The integer `bitWidth` depends on the `unit` and must be one of the following:
+/// * SECOND and MILLISECOND: 32 bits
+/// * MICROSECOND and NANOSECOND: 64 bits
+///
+/// The allowed values are between 0 (inclusive) and 86400 (=24*60*60) seconds
+/// (exclusive), adjusted for the time unit (for example, up to 86400000
+/// exclusive for the MILLISECOND unit).
+/// This definition doesn't allow for leap seconds. Time values from
+/// measurements with leap seconds will need to be corrected when ingesting
+/// into Arrow (for example by replacing the value 86400 with 86399).
+type Time struct {
+ _tab flatbuffers.Table
+}
+
+func GetRootAsTime(buf []byte, offset flatbuffers.UOffsetT) *Time {
+ n := flatbuffers.GetUOffsetT(buf[offset:])
+ x := &Time{}
+ x.Init(buf, n+offset)
+ return x
+}
+
+func (rcv *Time) Init(buf []byte, i flatbuffers.UOffsetT) {
+ rcv._tab.Bytes = buf
+ rcv._tab.Pos = i
+}
+
+func (rcv *Time) Table() flatbuffers.Table {
+ return rcv._tab
+}
+
+func (rcv *Time) Unit() TimeUnit {
+ o := flatbuffers.UOffsetT(rcv._tab.Offset(4))
+ if o != 0 {
+ return TimeUnit(rcv._tab.GetInt16(o + rcv._tab.Pos))
+ }
+ return 1
+}
+
+func (rcv *Time) MutateUnit(n TimeUnit) bool {
+ return rcv._tab.MutateInt16Slot(4, int16(n))
+}
+
+func (rcv *Time) BitWidth() int32 {
+ o := flatbuffers.UOffsetT(rcv._tab.Offset(6))
+ if o != 0 {
+ return rcv._tab.GetInt32(o + rcv._tab.Pos)
+ }
+ return 32
+}
+
+func (rcv *Time) MutateBitWidth(n int32) bool {
+ return rcv._tab.MutateInt32Slot(6, n)
+}
+
+func TimeStart(builder *flatbuffers.Builder) {
+ builder.StartObject(2)
+}
+func TimeAddUnit(builder *flatbuffers.Builder, unit TimeUnit) {
+ builder.PrependInt16Slot(0, int16(unit), 1)
+}
+func TimeAddBitWidth(builder *flatbuffers.Builder, bitWidth int32) {
+ builder.PrependInt32Slot(1, bitWidth, 32)
+}
+func TimeEnd(builder *flatbuffers.Builder) flatbuffers.UOffsetT {
+ return builder.EndObject()
+}
diff --git a/vendor/github.com/apache/arrow/go/v14/arrow/internal/flatbuf/TimeUnit.go b/vendor/github.com/apache/arrow/go/v14/arrow/internal/flatbuf/TimeUnit.go
new file mode 100644
index 000000000..df14ece4f
--- /dev/null
+++ b/vendor/github.com/apache/arrow/go/v14/arrow/internal/flatbuf/TimeUnit.go
@@ -0,0 +1,51 @@
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements. See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership. The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License. You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Code generated by the FlatBuffers compiler. DO NOT EDIT.
+
+package flatbuf
+
+import "strconv"
+
+type TimeUnit int16
+
+const (
+ TimeUnitSECOND TimeUnit = 0
+ TimeUnitMILLISECOND TimeUnit = 1
+ TimeUnitMICROSECOND TimeUnit = 2
+ TimeUnitNANOSECOND TimeUnit = 3
+)
+
+var EnumNamesTimeUnit = map[TimeUnit]string{
+ TimeUnitSECOND: "SECOND",
+ TimeUnitMILLISECOND: "MILLISECOND",
+ TimeUnitMICROSECOND: "MICROSECOND",
+ TimeUnitNANOSECOND: "NANOSECOND",
+}
+
+var EnumValuesTimeUnit = map[string]TimeUnit{
+ "SECOND": TimeUnitSECOND,
+ "MILLISECOND": TimeUnitMILLISECOND,
+ "MICROSECOND": TimeUnitMICROSECOND,
+ "NANOSECOND": TimeUnitNANOSECOND,
+}
+
+func (v TimeUnit) String() string {
+ if s, ok := EnumNamesTimeUnit[v]; ok {
+ return s
+ }
+ return "TimeUnit(" + strconv.FormatInt(int64(v), 10) + ")"
+}
diff --git a/vendor/github.com/apache/arrow/go/v14/arrow/internal/flatbuf/Timestamp.go b/vendor/github.com/apache/arrow/go/v14/arrow/internal/flatbuf/Timestamp.go
new file mode 100644
index 000000000..f53211455
--- /dev/null
+++ b/vendor/github.com/apache/arrow/go/v14/arrow/internal/flatbuf/Timestamp.go
@@ -0,0 +1,201 @@
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements. See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership. The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License. You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Code generated by the FlatBuffers compiler. DO NOT EDIT.
+
+package flatbuf
+
+import (
+ flatbuffers "github.com/google/flatbuffers/go"
+)
+
+/// Timestamp is a 64-bit signed integer representing an elapsed time since a
+/// fixed epoch, stored in either of four units: seconds, milliseconds,
+/// microseconds or nanoseconds, and is optionally annotated with a timezone.
+///
+/// Timestamp values do not include any leap seconds (in other words, all
+/// days are considered 86400 seconds long).
+///
+/// Timestamps with a non-empty timezone
+/// ------------------------------------
+///
+/// If a Timestamp column has a non-empty timezone value, its epoch is
+/// 1970-01-01 00:00:00 (January 1st 1970, midnight) in the *UTC* timezone
+/// (the Unix epoch), regardless of the Timestamp's own timezone.
+///
+/// Therefore, timestamp values with a non-empty timezone correspond to
+/// physical points in time together with some additional information about
+/// how the data was obtained and/or how to display it (the timezone).
+///
+/// For example, the timestamp value 0 with the timezone string "Europe/Paris"
+/// corresponds to "January 1st 1970, 00h00" in the UTC timezone, but the
+/// application may prefer to display it as "January 1st 1970, 01h00" in
+/// the Europe/Paris timezone (which is the same physical point in time).
+///
+/// One consequence is that timestamp values with a non-empty timezone
+/// can be compared and ordered directly, since they all share the same
+/// well-known point of reference (the Unix epoch).
+///
+/// Timestamps with an unset / empty timezone
+/// -----------------------------------------
+///
+/// If a Timestamp column has no timezone value, its epoch is
+/// 1970-01-01 00:00:00 (January 1st 1970, midnight) in an *unknown* timezone.
+///
+/// Therefore, timestamp values without a timezone cannot be meaningfully
+/// interpreted as physical points in time, but only as calendar / clock
+/// indications ("wall clock time") in an unspecified timezone.
+///
+/// For example, the timestamp value 0 with an empty timezone string
+/// corresponds to "January 1st 1970, 00h00" in an unknown timezone: there
+/// is not enough information to interpret it as a well-defined physical
+/// point in time.
+///
+/// One consequence is that timestamp values without a timezone cannot
+/// be reliably compared or ordered, since they may have different points of
+/// reference. In particular, it is *not* possible to interpret an unset
+/// or empty timezone as the same as "UTC".
+///
+/// Conversion between timezones
+/// ----------------------------
+///
+/// If a Timestamp column has a non-empty timezone, changing the timezone
+/// to a different non-empty value is a metadata-only operation:
+/// the timestamp values need not change as their point of reference remains
+/// the same (the Unix epoch).
+///
+/// However, if a Timestamp column has no timezone value, changing it to a
+/// non-empty value requires to think about the desired semantics.
+/// One possibility is to assume that the original timestamp values are
+/// relative to the epoch of the timezone being set; timestamp values should
+/// then adjusted to the Unix epoch (for example, changing the timezone from
+/// empty to "Europe/Paris" would require converting the timestamp values
+/// from "Europe/Paris" to "UTC", which seems counter-intuitive but is
+/// nevertheless correct).
+///
+/// Guidelines for encoding data from external libraries
+/// ----------------------------------------------------
+///
+/// Date & time libraries often have multiple different data types for temporal
+/// data. In order to ease interoperability between different implementations the
+/// Arrow project has some recommendations for encoding these types into a Timestamp
+/// column.
+///
+/// An "instant" represents a physical point in time that has no relevant timezone
+/// (for example, astronomical data). To encode an instant, use a Timestamp with
+/// the timezone string set to "UTC", and make sure the Timestamp values
+/// are relative to the UTC epoch (January 1st 1970, midnight).
+///
+/// A "zoned date-time" represents a physical point in time annotated with an
+/// informative timezone (for example, the timezone in which the data was
+/// recorded). To encode a zoned date-time, use a Timestamp with the timezone
+/// string set to the name of the timezone, and make sure the Timestamp values
+/// are relative to the UTC epoch (January 1st 1970, midnight).
+///
+/// (There is some ambiguity between an instant and a zoned date-time with the
+/// UTC timezone. Both of these are stored the same in Arrow. Typically,
+/// this distinction does not matter. If it does, then an application should
+/// use custom metadata or an extension type to distinguish between the two cases.)
+///
+/// An "offset date-time" represents a physical point in time combined with an
+/// explicit offset from UTC. To encode an offset date-time, use a Timestamp
+/// with the timezone string set to the numeric timezone offset string
+/// (e.g. "+03:00"), and make sure the Timestamp values are relative to
+/// the UTC epoch (January 1st 1970, midnight).
+///
+/// A "naive date-time" (also called "local date-time" in some libraries)
+/// represents a wall clock time combined with a calendar date, but with
+/// no indication of how to map this information to a physical point in time.
+/// Naive date-times must be handled with care because of this missing
+/// information, and also because daylight saving time (DST) may make
+/// some values ambiguous or non-existent. A naive date-time may be
+/// stored as a struct with Date and Time fields. However, it may also be
+/// encoded into a Timestamp column with an empty timezone. The timestamp
+/// values should be computed "as if" the timezone of the date-time values
+/// was UTC; for example, the naive date-time "January 1st 1970, 00h00" would
+/// be encoded as timestamp value 0.
+type Timestamp struct {
+ _tab flatbuffers.Table
+}
+
+func GetRootAsTimestamp(buf []byte, offset flatbuffers.UOffsetT) *Timestamp {
+ n := flatbuffers.GetUOffsetT(buf[offset:])
+ x := &Timestamp{}
+ x.Init(buf, n+offset)
+ return x
+}
+
+func (rcv *Timestamp) Init(buf []byte, i flatbuffers.UOffsetT) {
+ rcv._tab.Bytes = buf
+ rcv._tab.Pos = i
+}
+
+func (rcv *Timestamp) Table() flatbuffers.Table {
+ return rcv._tab
+}
+
+func (rcv *Timestamp) Unit() TimeUnit {
+ o := flatbuffers.UOffsetT(rcv._tab.Offset(4))
+ if o != 0 {
+ return TimeUnit(rcv._tab.GetInt16(o + rcv._tab.Pos))
+ }
+ return 0
+}
+
+func (rcv *Timestamp) MutateUnit(n TimeUnit) bool {
+ return rcv._tab.MutateInt16Slot(4, int16(n))
+}
+
+/// The timezone is an optional string indicating the name of a timezone,
+/// one of:
+///
+/// * As used in the Olson timezone database (the "tz database" or
+/// "tzdata"), such as "America/New_York".
+/// * An absolute timezone offset of the form "+XX:XX" or "-XX:XX",
+/// such as "+07:30".
+///
+/// Whether a timezone string is present indicates different semantics about
+/// the data (see above).
+func (rcv *Timestamp) Timezone() []byte {
+ o := flatbuffers.UOffsetT(rcv._tab.Offset(6))
+ if o != 0 {
+ return rcv._tab.ByteVector(o + rcv._tab.Pos)
+ }
+ return nil
+}
+
+/// The timezone is an optional string indicating the name of a timezone,
+/// one of:
+///
+/// * As used in the Olson timezone database (the "tz database" or
+/// "tzdata"), such as "America/New_York".
+/// * An absolute timezone offset of the form "+XX:XX" or "-XX:XX",
+/// such as "+07:30".
+///
+/// Whether a timezone string is present indicates different semantics about
+/// the data (see above).
+func TimestampStart(builder *flatbuffers.Builder) {
+ builder.StartObject(2)
+}
+func TimestampAddUnit(builder *flatbuffers.Builder, unit TimeUnit) {
+ builder.PrependInt16Slot(0, int16(unit), 0)
+}
+func TimestampAddTimezone(builder *flatbuffers.Builder, timezone flatbuffers.UOffsetT) {
+ builder.PrependUOffsetTSlot(1, flatbuffers.UOffsetT(timezone), 0)
+}
+func TimestampEnd(builder *flatbuffers.Builder) flatbuffers.UOffsetT {
+ return builder.EndObject()
+}
diff --git a/vendor/github.com/apache/arrow/go/v14/arrow/internal/flatbuf/Type.go b/vendor/github.com/apache/arrow/go/v14/arrow/internal/flatbuf/Type.go
new file mode 100644
index 000000000..ab2bce9c6
--- /dev/null
+++ b/vendor/github.com/apache/arrow/go/v14/arrow/internal/flatbuf/Type.go
@@ -0,0 +1,123 @@
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements. See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership. The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License. You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Code generated by the FlatBuffers compiler. DO NOT EDIT.
+
+package flatbuf
+
+import "strconv"
+
+/// ----------------------------------------------------------------------
+/// Top-level Type value, enabling extensible type-specific metadata. We can
+/// add new logical types to Type without breaking backwards compatibility
+type Type byte
+
+const (
+ TypeNONE Type = 0
+ TypeNull Type = 1
+ TypeInt Type = 2
+ TypeFloatingPoint Type = 3
+ TypeBinary Type = 4
+ TypeUtf8 Type = 5
+ TypeBool Type = 6
+ TypeDecimal Type = 7
+ TypeDate Type = 8
+ TypeTime Type = 9
+ TypeTimestamp Type = 10
+ TypeInterval Type = 11
+ TypeList Type = 12
+ TypeStruct_ Type = 13
+ TypeUnion Type = 14
+ TypeFixedSizeBinary Type = 15
+ TypeFixedSizeList Type = 16
+ TypeMap Type = 17
+ TypeDuration Type = 18
+ TypeLargeBinary Type = 19
+ TypeLargeUtf8 Type = 20
+ TypeLargeList Type = 21
+ TypeRunEndEncoded Type = 22
+ TypeBinaryView Type = 23
+ TypeUtf8View Type = 24
+ TypeListView Type = 25
+ TypeLargeListView Type = 26
+)
+
+var EnumNamesType = map[Type]string{
+ TypeNONE: "NONE",
+ TypeNull: "Null",
+ TypeInt: "Int",
+ TypeFloatingPoint: "FloatingPoint",
+ TypeBinary: "Binary",
+ TypeUtf8: "Utf8",
+ TypeBool: "Bool",
+ TypeDecimal: "Decimal",
+ TypeDate: "Date",
+ TypeTime: "Time",
+ TypeTimestamp: "Timestamp",
+ TypeInterval: "Interval",
+ TypeList: "List",
+ TypeStruct_: "Struct_",
+ TypeUnion: "Union",
+ TypeFixedSizeBinary: "FixedSizeBinary",
+ TypeFixedSizeList: "FixedSizeList",
+ TypeMap: "Map",
+ TypeDuration: "Duration",
+ TypeLargeBinary: "LargeBinary",
+ TypeLargeUtf8: "LargeUtf8",
+ TypeLargeList: "LargeList",
+ TypeRunEndEncoded: "RunEndEncoded",
+ TypeBinaryView: "BinaryView",
+ TypeUtf8View: "Utf8View",
+ TypeListView: "ListView",
+ TypeLargeListView: "LargeListView",
+}
+
+var EnumValuesType = map[string]Type{
+ "NONE": TypeNONE,
+ "Null": TypeNull,
+ "Int": TypeInt,
+ "FloatingPoint": TypeFloatingPoint,
+ "Binary": TypeBinary,
+ "Utf8": TypeUtf8,
+ "Bool": TypeBool,
+ "Decimal": TypeDecimal,
+ "Date": TypeDate,
+ "Time": TypeTime,
+ "Timestamp": TypeTimestamp,
+ "Interval": TypeInterval,
+ "List": TypeList,
+ "Struct_": TypeStruct_,
+ "Union": TypeUnion,
+ "FixedSizeBinary": TypeFixedSizeBinary,
+ "FixedSizeList": TypeFixedSizeList,
+ "Map": TypeMap,
+ "Duration": TypeDuration,
+ "LargeBinary": TypeLargeBinary,
+ "LargeUtf8": TypeLargeUtf8,
+ "LargeList": TypeLargeList,
+ "RunEndEncoded": TypeRunEndEncoded,
+ "BinaryView": TypeBinaryView,
+ "Utf8View": TypeUtf8View,
+ "ListView": TypeListView,
+ "LargeListView": TypeLargeListView,
+}
+
+func (v Type) String() string {
+ if s, ok := EnumNamesType[v]; ok {
+ return s
+ }
+ return "Type(" + strconv.FormatInt(int64(v), 10) + ")"
+}
diff --git a/vendor/github.com/apache/arrow/go/v14/arrow/internal/flatbuf/Union.go b/vendor/github.com/apache/arrow/go/v14/arrow/internal/flatbuf/Union.go
new file mode 100644
index 000000000..e34121d47
--- /dev/null
+++ b/vendor/github.com/apache/arrow/go/v14/arrow/internal/flatbuf/Union.go
@@ -0,0 +1,101 @@
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements. See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership. The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License. You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Code generated by the FlatBuffers compiler. DO NOT EDIT.
+
+package flatbuf
+
+import (
+ flatbuffers "github.com/google/flatbuffers/go"
+)
+
+/// A union is a complex type with children in Field
+/// By default ids in the type vector refer to the offsets in the children
+/// optionally typeIds provides an indirection between the child offset and the type id
+/// for each child `typeIds[offset]` is the id used in the type vector
+type Union struct {
+ _tab flatbuffers.Table
+}
+
+func GetRootAsUnion(buf []byte, offset flatbuffers.UOffsetT) *Union {
+ n := flatbuffers.GetUOffsetT(buf[offset:])
+ x := &Union{}
+ x.Init(buf, n+offset)
+ return x
+}
+
+func (rcv *Union) Init(buf []byte, i flatbuffers.UOffsetT) {
+ rcv._tab.Bytes = buf
+ rcv._tab.Pos = i
+}
+
+func (rcv *Union) Table() flatbuffers.Table {
+ return rcv._tab
+}
+
+func (rcv *Union) Mode() UnionMode {
+ o := flatbuffers.UOffsetT(rcv._tab.Offset(4))
+ if o != 0 {
+ return UnionMode(rcv._tab.GetInt16(o + rcv._tab.Pos))
+ }
+ return 0
+}
+
+func (rcv *Union) MutateMode(n UnionMode) bool {
+ return rcv._tab.MutateInt16Slot(4, int16(n))
+}
+
+func (rcv *Union) TypeIds(j int) int32 {
+ o := flatbuffers.UOffsetT(rcv._tab.Offset(6))
+ if o != 0 {
+ a := rcv._tab.Vector(o)
+ return rcv._tab.GetInt32(a + flatbuffers.UOffsetT(j*4))
+ }
+ return 0
+}
+
+func (rcv *Union) TypeIdsLength() int {
+ o := flatbuffers.UOffsetT(rcv._tab.Offset(6))
+ if o != 0 {
+ return rcv._tab.VectorLen(o)
+ }
+ return 0
+}
+
+func (rcv *Union) MutateTypeIds(j int, n int32) bool {
+ o := flatbuffers.UOffsetT(rcv._tab.Offset(6))
+ if o != 0 {
+ a := rcv._tab.Vector(o)
+ return rcv._tab.MutateInt32(a+flatbuffers.UOffsetT(j*4), n)
+ }
+ return false
+}
+
+func UnionStart(builder *flatbuffers.Builder) {
+ builder.StartObject(2)
+}
+func UnionAddMode(builder *flatbuffers.Builder, mode UnionMode) {
+ builder.PrependInt16Slot(0, int16(mode), 0)
+}
+func UnionAddTypeIds(builder *flatbuffers.Builder, typeIds flatbuffers.UOffsetT) {
+ builder.PrependUOffsetTSlot(1, flatbuffers.UOffsetT(typeIds), 0)
+}
+func UnionStartTypeIdsVector(builder *flatbuffers.Builder, numElems int) flatbuffers.UOffsetT {
+ return builder.StartVector(4, numElems, 4)
+}
+func UnionEnd(builder *flatbuffers.Builder) flatbuffers.UOffsetT {
+ return builder.EndObject()
+}
diff --git a/vendor/github.com/apache/arrow/go/v14/arrow/internal/flatbuf/UnionMode.go b/vendor/github.com/apache/arrow/go/v14/arrow/internal/flatbuf/UnionMode.go
new file mode 100644
index 000000000..357c1f3cb
--- /dev/null
+++ b/vendor/github.com/apache/arrow/go/v14/arrow/internal/flatbuf/UnionMode.go
@@ -0,0 +1,45 @@
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements. See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership. The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License. You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Code generated by the FlatBuffers compiler. DO NOT EDIT.
+
+package flatbuf
+
+import "strconv"
+
+type UnionMode int16
+
+const (
+ UnionModeSparse UnionMode = 0
+ UnionModeDense UnionMode = 1
+)
+
+var EnumNamesUnionMode = map[UnionMode]string{
+ UnionModeSparse: "Sparse",
+ UnionModeDense: "Dense",
+}
+
+var EnumValuesUnionMode = map[string]UnionMode{
+ "Sparse": UnionModeSparse,
+ "Dense": UnionModeDense,
+}
+
+func (v UnionMode) String() string {
+ if s, ok := EnumNamesUnionMode[v]; ok {
+ return s
+ }
+ return "UnionMode(" + strconv.FormatInt(int64(v), 10) + ")"
+}
diff --git a/vendor/github.com/apache/arrow/go/v14/arrow/internal/flatbuf/Utf8.go b/vendor/github.com/apache/arrow/go/v14/arrow/internal/flatbuf/Utf8.go
new file mode 100644
index 000000000..4ff365a37
--- /dev/null
+++ b/vendor/github.com/apache/arrow/go/v14/arrow/internal/flatbuf/Utf8.go
@@ -0,0 +1,51 @@
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements. See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership. The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License. You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Code generated by the FlatBuffers compiler. DO NOT EDIT.
+
+package flatbuf
+
+import (
+ flatbuffers "github.com/google/flatbuffers/go"
+)
+
+/// Unicode with UTF-8 encoding
+type Utf8 struct {
+ _tab flatbuffers.Table
+}
+
+func GetRootAsUtf8(buf []byte, offset flatbuffers.UOffsetT) *Utf8 {
+ n := flatbuffers.GetUOffsetT(buf[offset:])
+ x := &Utf8{}
+ x.Init(buf, n+offset)
+ return x
+}
+
+func (rcv *Utf8) Init(buf []byte, i flatbuffers.UOffsetT) {
+ rcv._tab.Bytes = buf
+ rcv._tab.Pos = i
+}
+
+func (rcv *Utf8) Table() flatbuffers.Table {
+ return rcv._tab
+}
+
+func Utf8Start(builder *flatbuffers.Builder) {
+ builder.StartObject(0)
+}
+func Utf8End(builder *flatbuffers.Builder) flatbuffers.UOffsetT {
+ return builder.EndObject()
+}
diff --git a/vendor/github.com/apache/arrow/go/v14/arrow/internal/flatbuf/Utf8View.go b/vendor/github.com/apache/arrow/go/v14/arrow/internal/flatbuf/Utf8View.go
new file mode 100644
index 000000000..9cf821490
--- /dev/null
+++ b/vendor/github.com/apache/arrow/go/v14/arrow/internal/flatbuf/Utf8View.go
@@ -0,0 +1,57 @@
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements. See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership. The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License. You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Code generated by the FlatBuffers compiler. DO NOT EDIT.
+
+package flatbuf
+
+import (
+ flatbuffers "github.com/google/flatbuffers/go"
+)
+
+/// Logically the same as Utf8, but the internal representation uses a view
+/// struct that contains the string length and either the string's entire data
+/// inline (for small strings) or an inlined prefix, an index of another buffer,
+/// and an offset pointing to a slice in that buffer (for non-small strings).
+///
+/// Since it uses a variable number of data buffers, each Field with this type
+/// must have a corresponding entry in `variadicBufferCounts`.
+type Utf8View struct {
+ _tab flatbuffers.Table
+}
+
+func GetRootAsUtf8View(buf []byte, offset flatbuffers.UOffsetT) *Utf8View {
+ n := flatbuffers.GetUOffsetT(buf[offset:])
+ x := &Utf8View{}
+ x.Init(buf, n+offset)
+ return x
+}
+
+func (rcv *Utf8View) Init(buf []byte, i flatbuffers.UOffsetT) {
+ rcv._tab.Bytes = buf
+ rcv._tab.Pos = i
+}
+
+func (rcv *Utf8View) Table() flatbuffers.Table {
+ return rcv._tab
+}
+
+func Utf8ViewStart(builder *flatbuffers.Builder) {
+ builder.StartObject(0)
+}
+func Utf8ViewEnd(builder *flatbuffers.Builder) flatbuffers.UOffsetT {
+ return builder.EndObject()
+}
diff --git a/vendor/github.com/apache/arrow/go/v14/arrow/internal/utils.go b/vendor/github.com/apache/arrow/go/v14/arrow/internal/utils.go
new file mode 100644
index 000000000..265f030df
--- /dev/null
+++ b/vendor/github.com/apache/arrow/go/v14/arrow/internal/utils.go
@@ -0,0 +1,47 @@
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements. See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership. The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License. You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package internal
+
+import (
+ "github.com/apache/arrow/go/v14/arrow"
+ "github.com/apache/arrow/go/v14/arrow/internal/flatbuf"
+)
+
+const CurMetadataVersion = flatbuf.MetadataVersionV5
+
+// DefaultHasValidityBitmap is a convenience function equivalent to
+// calling HasValidityBitmap with CurMetadataVersion.
+func DefaultHasValidityBitmap(id arrow.Type) bool { return HasValidityBitmap(id, CurMetadataVersion) }
+
+// HasValidityBitmap returns whether the given type at the provided version is
+// expected to have a validity bitmap in it's representation.
+//
+// Typically this is necessary because of the change between V4 and V5
+// where union types no longer have validity bitmaps.
+func HasValidityBitmap(id arrow.Type, version flatbuf.MetadataVersion) bool {
+ // in <=V4 Null types had no validity bitmap
+ // in >=V5 Null and Union types have no validity bitmap
+ if version < flatbuf.MetadataVersionV5 {
+ return id != arrow.NULL
+ }
+
+ switch id {
+ case arrow.NULL, arrow.DENSE_UNION, arrow.SPARSE_UNION, arrow.RUN_END_ENCODED:
+ return false
+ }
+ return true
+}
diff --git a/vendor/github.com/apache/arrow/go/v14/arrow/ipc/compression.go b/vendor/github.com/apache/arrow/go/v14/arrow/ipc/compression.go
new file mode 100644
index 000000000..73fb91650
--- /dev/null
+++ b/vendor/github.com/apache/arrow/go/v14/arrow/ipc/compression.go
@@ -0,0 +1,135 @@
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements. See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership. The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License. You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package ipc
+
+import (
+ "io"
+
+ "github.com/apache/arrow/go/v14/arrow/internal/debug"
+ "github.com/apache/arrow/go/v14/arrow/internal/flatbuf"
+ "github.com/apache/arrow/go/v14/arrow/memory"
+ "github.com/klauspost/compress/zstd"
+ "github.com/pierrec/lz4/v4"
+)
+
+type compressor interface {
+ MaxCompressedLen(n int) int
+ Reset(io.Writer)
+ io.WriteCloser
+ Type() flatbuf.CompressionType
+}
+
+type lz4Compressor struct {
+ *lz4.Writer
+}
+
+func (lz4Compressor) MaxCompressedLen(n int) int {
+ return lz4.CompressBlockBound(n)
+}
+
+func (lz4Compressor) Type() flatbuf.CompressionType {
+ return flatbuf.CompressionTypeLZ4_FRAME
+}
+
+type zstdCompressor struct {
+ *zstd.Encoder
+}
+
+// from zstd.h, ZSTD_COMPRESSBOUND
+func (zstdCompressor) MaxCompressedLen(len int) int {
+ debug.Assert(len >= 0, "MaxCompressedLen called with len less than 0")
+ extra := uint((uint(128<<10) - uint(len)) >> 11)
+ if len >= (128 << 10) {
+ extra = 0
+ }
+ return int(uint(len+(len>>8)) + extra)
+}
+
+func (zstdCompressor) Type() flatbuf.CompressionType {
+ return flatbuf.CompressionTypeZSTD
+}
+
+func getCompressor(codec flatbuf.CompressionType) compressor {
+ switch codec {
+ case flatbuf.CompressionTypeLZ4_FRAME:
+ w := lz4.NewWriter(nil)
+ // options here chosen in order to match the C++ implementation
+ w.Apply(lz4.ChecksumOption(false), lz4.BlockSizeOption(lz4.Block64Kb))
+ return &lz4Compressor{w}
+ case flatbuf.CompressionTypeZSTD:
+ enc, err := zstd.NewWriter(nil)
+ if err != nil {
+ panic(err)
+ }
+ return zstdCompressor{enc}
+ }
+ return nil
+}
+
+type decompressor interface {
+ io.Reader
+ Reset(io.Reader)
+ Close()
+}
+
+type zstdDecompressor struct {
+ *zstd.Decoder
+}
+
+func (z *zstdDecompressor) Reset(r io.Reader) {
+ if err := z.Decoder.Reset(r); err != nil {
+ panic(err)
+ }
+}
+
+func (z *zstdDecompressor) Close() {
+ z.Decoder.Close()
+}
+
+type lz4Decompressor struct {
+ *lz4.Reader
+}
+
+func (z *lz4Decompressor) Close() {}
+
+func getDecompressor(codec flatbuf.CompressionType) decompressor {
+ switch codec {
+ case flatbuf.CompressionTypeLZ4_FRAME:
+ return &lz4Decompressor{lz4.NewReader(nil)}
+ case flatbuf.CompressionTypeZSTD:
+ dec, err := zstd.NewReader(nil)
+ if err != nil {
+ panic(err)
+ }
+ return &zstdDecompressor{dec}
+ }
+ return nil
+}
+
+type bufferWriter struct {
+ buf *memory.Buffer
+ pos int
+}
+
+func (bw *bufferWriter) Write(p []byte) (n int, err error) {
+ if bw.pos+len(p) >= bw.buf.Cap() {
+ bw.buf.Reserve(bw.pos + len(p))
+ }
+ n = copy(bw.buf.Buf()[bw.pos:], p)
+ bw.pos += n
+ return
+}
diff --git a/vendor/github.com/apache/arrow/go/v14/arrow/ipc/endian_swap.go b/vendor/github.com/apache/arrow/go/v14/arrow/ipc/endian_swap.go
new file mode 100644
index 000000000..d98fec108
--- /dev/null
+++ b/vendor/github.com/apache/arrow/go/v14/arrow/ipc/endian_swap.go
@@ -0,0 +1,162 @@
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements. See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership. The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License. You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package ipc
+
+import (
+ "errors"
+ "math/bits"
+
+ "github.com/apache/arrow/go/v14/arrow"
+ "github.com/apache/arrow/go/v14/arrow/array"
+ "github.com/apache/arrow/go/v14/arrow/memory"
+)
+
+// swap the endianness of the array's buffers as needed in-place to save
+// the cost of reallocation.
+//
+// assumes that nested data buffers are never re-used, if an *array.Data
+// child is re-used among the children or the dictionary then this might
+// end up double-swapping (putting it back into the original endianness).
+// if it is needed to support re-using the buffers, then this can be
+// re-factored to instead return a NEW array.Data object with newly
+// allocated buffers, rather than doing it in place.
+//
+// For now this is intended to be used by the IPC readers after loading
+// arrays from an IPC message which currently is guaranteed to not re-use
+// buffers between arrays.
+func swapEndianArrayData(data *array.Data) error {
+ if data.Offset() != 0 {
+ return errors.New("unsupported data format: data.offset != 0")
+ }
+ if err := swapType(data.DataType(), data); err != nil {
+ return err
+ }
+ return swapChildren(data.Children())
+}
+
+func swapChildren(children []arrow.ArrayData) (err error) {
+ for i := range children {
+ if err = swapEndianArrayData(children[i].(*array.Data)); err != nil {
+ break
+ }
+ }
+ return
+}
+
+func swapType(dt arrow.DataType, data *array.Data) (err error) {
+ switch dt.ID() {
+ case arrow.BINARY, arrow.STRING:
+ swapOffsets(1, 32, data)
+ return
+ case arrow.LARGE_BINARY, arrow.LARGE_STRING:
+ swapOffsets(1, 64, data)
+ return
+ case arrow.NULL, arrow.BOOL, arrow.INT8, arrow.UINT8,
+ arrow.FIXED_SIZE_BINARY, arrow.FIXED_SIZE_LIST, arrow.STRUCT:
+ return
+ }
+
+ switch dt := dt.(type) {
+ case *arrow.Decimal128Type:
+ rawdata := arrow.Uint64Traits.CastFromBytes(data.Buffers()[1].Bytes())
+ length := data.Buffers()[1].Len() / arrow.Decimal128SizeBytes
+ for i := 0; i < length; i++ {
+ idx := i * 2
+ tmp := bits.ReverseBytes64(rawdata[idx])
+ rawdata[idx] = bits.ReverseBytes64(rawdata[idx+1])
+ rawdata[idx+1] = tmp
+ }
+ case *arrow.Decimal256Type:
+ rawdata := arrow.Uint64Traits.CastFromBytes(data.Buffers()[1].Bytes())
+ length := data.Buffers()[1].Len() / arrow.Decimal256SizeBytes
+ for i := 0; i < length; i++ {
+ idx := i * 4
+ tmp0 := bits.ReverseBytes64(rawdata[idx])
+ tmp1 := bits.ReverseBytes64(rawdata[idx+1])
+ tmp2 := bits.ReverseBytes64(rawdata[idx+2])
+ rawdata[idx] = bits.ReverseBytes64(rawdata[idx+3])
+ rawdata[idx+1] = tmp2
+ rawdata[idx+2] = tmp1
+ rawdata[idx+3] = tmp0
+ }
+ case arrow.UnionType:
+ if dt.Mode() == arrow.DenseMode {
+ swapOffsets(2, 32, data)
+ }
+ case *arrow.ListType:
+ swapOffsets(1, 32, data)
+ case *arrow.LargeListType:
+ swapOffsets(1, 64, data)
+ case *arrow.MapType:
+ swapOffsets(1, 32, data)
+ case *arrow.DayTimeIntervalType:
+ byteSwapBuffer(32, data.Buffers()[1])
+ case *arrow.MonthDayNanoIntervalType:
+ rawdata := arrow.MonthDayNanoIntervalTraits.CastFromBytes(data.Buffers()[1].Bytes())
+ for i, tmp := range rawdata {
+ rawdata[i].Days = int32(bits.ReverseBytes32(uint32(tmp.Days)))
+ rawdata[i].Months = int32(bits.ReverseBytes32(uint32(tmp.Months)))
+ rawdata[i].Nanoseconds = int64(bits.ReverseBytes64(uint64(tmp.Nanoseconds)))
+ }
+ case arrow.ExtensionType:
+ return swapType(dt.StorageType(), data)
+ case *arrow.DictionaryType:
+ // dictionary itself was already swapped in ReadDictionary calls
+ return swapType(dt.IndexType, data)
+ case arrow.FixedWidthDataType:
+ byteSwapBuffer(dt.BitWidth(), data.Buffers()[1])
+ }
+ return
+}
+
+// this can get called on an invalid Array Data object by the IPC reader,
+// so we won't rely on the data.length and will instead rely on the buffer's
+// own size instead.
+func byteSwapBuffer(bw int, buf *memory.Buffer) {
+ if bw == 1 || buf == nil {
+ // if byte width == 1, no need to swap anything
+ return
+ }
+
+ switch bw {
+ case 16:
+ data := arrow.Uint16Traits.CastFromBytes(buf.Bytes())
+ for i := range data {
+ data[i] = bits.ReverseBytes16(data[i])
+ }
+ case 32:
+ data := arrow.Uint32Traits.CastFromBytes(buf.Bytes())
+ for i := range data {
+ data[i] = bits.ReverseBytes32(data[i])
+ }
+ case 64:
+ data := arrow.Uint64Traits.CastFromBytes(buf.Bytes())
+ for i := range data {
+ data[i] = bits.ReverseBytes64(data[i])
+ }
+ }
+}
+
+func swapOffsets(index int, bitWidth int, data *array.Data) {
+ if data.Buffers()[index] == nil || data.Buffers()[index].Len() == 0 {
+ return
+ }
+
+ // other than unions, offset has one more element than the data.length
+ // don't yet implement large types, so hardcode 32bit offsets for now
+ byteSwapBuffer(bitWidth, data.Buffers()[index])
+}
diff --git a/vendor/github.com/apache/arrow/go/v14/arrow/ipc/file_reader.go b/vendor/github.com/apache/arrow/go/v14/arrow/ipc/file_reader.go
new file mode 100644
index 000000000..10cb2cae7
--- /dev/null
+++ b/vendor/github.com/apache/arrow/go/v14/arrow/ipc/file_reader.go
@@ -0,0 +1,751 @@
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements. See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership. The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License. You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package ipc
+
+import (
+ "bytes"
+ "encoding/binary"
+ "errors"
+ "fmt"
+ "io"
+
+ "github.com/apache/arrow/go/v14/arrow"
+ "github.com/apache/arrow/go/v14/arrow/array"
+ "github.com/apache/arrow/go/v14/arrow/bitutil"
+ "github.com/apache/arrow/go/v14/arrow/endian"
+ "github.com/apache/arrow/go/v14/arrow/internal"
+ "github.com/apache/arrow/go/v14/arrow/internal/dictutils"
+ "github.com/apache/arrow/go/v14/arrow/internal/flatbuf"
+ "github.com/apache/arrow/go/v14/arrow/memory"
+)
+
+// FileReader is an Arrow file reader.
+type FileReader struct {
+ r ReadAtSeeker
+
+ footer struct {
+ offset int64
+ buffer *memory.Buffer
+ data *flatbuf.Footer
+ }
+
+ // fields dictTypeMap
+ memo dictutils.Memo
+
+ schema *arrow.Schema
+ record arrow.Record
+
+ irec int // current record index. used for the arrio.Reader interface
+ err error // last error
+
+ mem memory.Allocator
+ swapEndianness bool
+}
+
+// NewFileReader opens an Arrow file using the provided reader r.
+func NewFileReader(r ReadAtSeeker, opts ...Option) (*FileReader, error) {
+ var (
+ cfg = newConfig(opts...)
+ err error
+
+ f = FileReader{
+ r: r,
+ memo: dictutils.NewMemo(),
+ mem: cfg.alloc,
+ }
+ )
+
+ if cfg.footer.offset <= 0 {
+ cfg.footer.offset, err = f.r.Seek(0, io.SeekEnd)
+ if err != nil {
+ return nil, fmt.Errorf("arrow/ipc: could retrieve footer offset: %w", err)
+ }
+ }
+ f.footer.offset = cfg.footer.offset
+
+ err = f.readFooter()
+ if err != nil {
+ return nil, fmt.Errorf("arrow/ipc: could not decode footer: %w", err)
+ }
+
+ err = f.readSchema(cfg.ensureNativeEndian)
+ if err != nil {
+ return nil, fmt.Errorf("arrow/ipc: could not decode schema: %w", err)
+ }
+
+ if cfg.schema != nil && !cfg.schema.Equal(f.schema) {
+ return nil, fmt.Errorf("arrow/ipc: inconsistent schema for reading (got: %v, want: %v)", f.schema, cfg.schema)
+ }
+
+ return &f, err
+}
+
+func (f *FileReader) readFooter() error {
+ var err error
+
+ if f.footer.offset <= int64(len(Magic)*2+4) {
+ return fmt.Errorf("arrow/ipc: file too small (size=%d)", f.footer.offset)
+ }
+
+ eof := int64(len(Magic) + 4)
+ buf := make([]byte, eof)
+ n, err := f.r.ReadAt(buf, f.footer.offset-eof)
+ if err != nil {
+ return fmt.Errorf("arrow/ipc: could not read footer: %w", err)
+ }
+ if n != len(buf) {
+ return fmt.Errorf("arrow/ipc: could not read %d bytes from end of file", len(buf))
+ }
+
+ if !bytes.Equal(buf[4:], Magic) {
+ return errNotArrowFile
+ }
+
+ size := int64(binary.LittleEndian.Uint32(buf[:4]))
+ if size <= 0 || size+int64(len(Magic)*2+4) > f.footer.offset {
+ return errInconsistentFileMetadata
+ }
+
+ buf = make([]byte, size)
+ n, err = f.r.ReadAt(buf, f.footer.offset-size-eof)
+ if err != nil {
+ return fmt.Errorf("arrow/ipc: could not read footer data: %w", err)
+ }
+ if n != len(buf) {
+ return fmt.Errorf("arrow/ipc: could not read %d bytes from footer data", len(buf))
+ }
+
+ f.footer.buffer = memory.NewBufferBytes(buf)
+ f.footer.data = flatbuf.GetRootAsFooter(buf, 0)
+ return err
+}
+
+func (f *FileReader) readSchema(ensureNativeEndian bool) error {
+ var (
+ err error
+ kind dictutils.Kind
+ )
+
+ schema := f.footer.data.Schema(nil)
+ if schema == nil {
+ return fmt.Errorf("arrow/ipc: could not load schema from flatbuffer data")
+ }
+ f.schema, err = schemaFromFB(schema, &f.memo)
+ if err != nil {
+ return fmt.Errorf("arrow/ipc: could not read schema: %w", err)
+ }
+
+ if ensureNativeEndian && !f.schema.IsNativeEndian() {
+ f.swapEndianness = true
+ f.schema = f.schema.WithEndianness(endian.NativeEndian)
+ }
+
+ for i := 0; i < f.NumDictionaries(); i++ {
+ blk, err := f.dict(i)
+ if err != nil {
+ return fmt.Errorf("arrow/ipc: could not read dictionary[%d]: %w", i, err)
+ }
+ switch {
+ case !bitutil.IsMultipleOf8(blk.Offset):
+ return fmt.Errorf("arrow/ipc: invalid file offset=%d for dictionary %d", blk.Offset, i)
+ case !bitutil.IsMultipleOf8(int64(blk.Meta)):
+ return fmt.Errorf("arrow/ipc: invalid file metadata=%d position for dictionary %d", blk.Meta, i)
+ case !bitutil.IsMultipleOf8(blk.Body):
+ return fmt.Errorf("arrow/ipc: invalid file body=%d position for dictionary %d", blk.Body, i)
+ }
+
+ msg, err := blk.NewMessage()
+ if err != nil {
+ return err
+ }
+
+ kind, err = readDictionary(&f.memo, msg.meta, bytes.NewReader(msg.body.Bytes()), f.swapEndianness, f.mem)
+ if err != nil {
+ return err
+ }
+ if kind == dictutils.KindReplacement {
+ return errors.New("arrow/ipc: unsupported dictionary replacement in IPC file")
+ }
+ }
+
+ return err
+}
+
+func (f *FileReader) block(i int) (fileBlock, error) {
+ var blk flatbuf.Block
+ if !f.footer.data.RecordBatches(&blk, i) {
+ return fileBlock{}, fmt.Errorf("arrow/ipc: could not extract file block %d", i)
+ }
+
+ return fileBlock{
+ Offset: blk.Offset(),
+ Meta: blk.MetaDataLength(),
+ Body: blk.BodyLength(),
+ r: f.r,
+ mem: f.mem,
+ }, nil
+}
+
+func (f *FileReader) dict(i int) (fileBlock, error) {
+ var blk flatbuf.Block
+ if !f.footer.data.Dictionaries(&blk, i) {
+ return fileBlock{}, fmt.Errorf("arrow/ipc: could not extract dictionary block %d", i)
+ }
+
+ return fileBlock{
+ Offset: blk.Offset(),
+ Meta: blk.MetaDataLength(),
+ Body: blk.BodyLength(),
+ r: f.r,
+ mem: f.mem,
+ }, nil
+}
+
+func (f *FileReader) Schema() *arrow.Schema {
+ return f.schema
+}
+
+func (f *FileReader) NumDictionaries() int {
+ if f.footer.data == nil {
+ return 0
+ }
+ return f.footer.data.DictionariesLength()
+}
+
+func (f *FileReader) NumRecords() int {
+ return f.footer.data.RecordBatchesLength()
+}
+
+func (f *FileReader) Version() MetadataVersion {
+ return MetadataVersion(f.footer.data.Version())
+}
+
+// Close cleans up resources used by the File.
+// Close does not close the underlying reader.
+func (f *FileReader) Close() error {
+ if f.footer.data != nil {
+ f.footer.data = nil
+ }
+
+ if f.footer.buffer != nil {
+ f.footer.buffer.Release()
+ f.footer.buffer = nil
+ }
+
+ if f.record != nil {
+ f.record.Release()
+ f.record = nil
+ }
+ return nil
+}
+
+// Record returns the i-th record from the file.
+// The returned value is valid until the next call to Record.
+// Users need to call Retain on that Record to keep it valid for longer.
+func (f *FileReader) Record(i int) (arrow.Record, error) {
+ record, err := f.RecordAt(i)
+ if err != nil {
+ return nil, err
+ }
+
+ if f.record != nil {
+ f.record.Release()
+ }
+
+ f.record = record
+ return record, nil
+}
+
+// Record returns the i-th record from the file. Ownership is transferred to the
+// caller and must call Release() to free the memory. This method is safe to
+// call concurrently.
+func (f *FileReader) RecordAt(i int) (arrow.Record, error) {
+ if i < 0 || i > f.NumRecords() {
+ panic("arrow/ipc: record index out of bounds")
+ }
+
+ blk, err := f.block(i)
+ if err != nil {
+ return nil, err
+ }
+ switch {
+ case !bitutil.IsMultipleOf8(blk.Offset):
+ return nil, fmt.Errorf("arrow/ipc: invalid file offset=%d for record %d", blk.Offset, i)
+ case !bitutil.IsMultipleOf8(int64(blk.Meta)):
+ return nil, fmt.Errorf("arrow/ipc: invalid file metadata=%d position for record %d", blk.Meta, i)
+ case !bitutil.IsMultipleOf8(blk.Body):
+ return nil, fmt.Errorf("arrow/ipc: invalid file body=%d position for record %d", blk.Body, i)
+ }
+
+ msg, err := blk.NewMessage()
+ if err != nil {
+ return nil, err
+ }
+ defer msg.Release()
+
+ if msg.Type() != MessageRecordBatch {
+ return nil, fmt.Errorf("arrow/ipc: message %d is not a Record", i)
+ }
+
+ return newRecord(f.schema, &f.memo, msg.meta, bytes.NewReader(msg.body.Bytes()), f.swapEndianness, f.mem), nil
+}
+
+// Read reads the current record from the underlying stream and an error, if any.
+// When the Reader reaches the end of the underlying stream, it returns (nil, io.EOF).
+//
+// The returned record value is valid until the next call to Read.
+// Users need to call Retain on that Record to keep it valid for longer.
+func (f *FileReader) Read() (rec arrow.Record, err error) {
+ if f.irec == f.NumRecords() {
+ return nil, io.EOF
+ }
+ rec, f.err = f.Record(f.irec)
+ f.irec++
+ return rec, f.err
+}
+
+// ReadAt reads the i-th record from the underlying stream and an error, if any.
+func (f *FileReader) ReadAt(i int64) (arrow.Record, error) {
+ return f.Record(int(i))
+}
+
+func newRecord(schema *arrow.Schema, memo *dictutils.Memo, meta *memory.Buffer, body ReadAtSeeker, swapEndianness bool, mem memory.Allocator) arrow.Record {
+ var (
+ msg = flatbuf.GetRootAsMessage(meta.Bytes(), 0)
+ md flatbuf.RecordBatch
+ codec decompressor
+ )
+ initFB(&md, msg.Header)
+ rows := md.Length()
+
+ bodyCompress := md.Compression(nil)
+ if bodyCompress != nil {
+ codec = getDecompressor(bodyCompress.Codec())
+ defer codec.Close()
+ }
+
+ ctx := &arrayLoaderContext{
+ src: ipcSource{
+ meta: &md,
+ r: body,
+ codec: codec,
+ mem: mem,
+ },
+ memo: memo,
+ max: kMaxNestingDepth,
+ version: MetadataVersion(msg.Version()),
+ }
+
+ pos := dictutils.NewFieldPos()
+ cols := make([]arrow.Array, len(schema.Fields()))
+ for i, field := range schema.Fields() {
+ data := ctx.loadArray(field.Type)
+ defer data.Release()
+
+ if err := dictutils.ResolveFieldDict(memo, data, pos.Child(int32(i)), mem); err != nil {
+ panic(err)
+ }
+
+ if swapEndianness {
+ swapEndianArrayData(data.(*array.Data))
+ }
+
+ cols[i] = array.MakeFromData(data)
+ defer cols[i].Release()
+ }
+
+ return array.NewRecord(schema, cols, rows)
+}
+
+type ipcSource struct {
+ meta *flatbuf.RecordBatch
+ r ReadAtSeeker
+ codec decompressor
+ mem memory.Allocator
+}
+
+func (src *ipcSource) buffer(i int) *memory.Buffer {
+ var buf flatbuf.Buffer
+ if !src.meta.Buffers(&buf, i) {
+ panic("arrow/ipc: buffer index out of bound")
+ }
+
+ if buf.Length() == 0 {
+ return memory.NewBufferBytes(nil)
+ }
+
+ raw := memory.NewResizableBuffer(src.mem)
+ if src.codec == nil {
+ raw.Resize(int(buf.Length()))
+ _, err := src.r.ReadAt(raw.Bytes(), buf.Offset())
+ if err != nil {
+ panic(err)
+ }
+ } else {
+ sr := io.NewSectionReader(src.r, buf.Offset(), buf.Length())
+ var uncompressedSize uint64
+
+ err := binary.Read(sr, binary.LittleEndian, &uncompressedSize)
+ if err != nil {
+ panic(err)
+ }
+
+ var r io.Reader = sr
+ // check for an uncompressed buffer
+ if int64(uncompressedSize) != -1 {
+ raw.Resize(int(uncompressedSize))
+ src.codec.Reset(sr)
+ r = src.codec
+ } else {
+ raw.Resize(int(buf.Length() - 8))
+ }
+
+ if _, err = io.ReadFull(r, raw.Bytes()); err != nil {
+ panic(err)
+ }
+ }
+
+ return raw
+}
+
+func (src *ipcSource) fieldMetadata(i int) *flatbuf.FieldNode {
+ var node flatbuf.FieldNode
+ if !src.meta.Nodes(&node, i) {
+ panic("arrow/ipc: field metadata out of bound")
+ }
+ return &node
+}
+
+type arrayLoaderContext struct {
+ src ipcSource
+ ifield int
+ ibuffer int
+ max int
+ memo *dictutils.Memo
+ version MetadataVersion
+}
+
+func (ctx *arrayLoaderContext) field() *flatbuf.FieldNode {
+ field := ctx.src.fieldMetadata(ctx.ifield)
+ ctx.ifield++
+ return field
+}
+
+func (ctx *arrayLoaderContext) buffer() *memory.Buffer {
+ buf := ctx.src.buffer(ctx.ibuffer)
+ ctx.ibuffer++
+ return buf
+}
+
+func (ctx *arrayLoaderContext) loadArray(dt arrow.DataType) arrow.ArrayData {
+ switch dt := dt.(type) {
+ case *arrow.NullType:
+ return ctx.loadNull()
+
+ case *arrow.DictionaryType:
+ indices := ctx.loadPrimitive(dt.IndexType)
+ defer indices.Release()
+ return array.NewData(dt, indices.Len(), indices.Buffers(), indices.Children(), indices.NullN(), indices.Offset())
+
+ case *arrow.BooleanType,
+ *arrow.Int8Type, *arrow.Int16Type, *arrow.Int32Type, *arrow.Int64Type,
+ *arrow.Uint8Type, *arrow.Uint16Type, *arrow.Uint32Type, *arrow.Uint64Type,
+ *arrow.Float16Type, *arrow.Float32Type, *arrow.Float64Type,
+ *arrow.Decimal128Type, *arrow.Decimal256Type,
+ *arrow.Time32Type, *arrow.Time64Type,
+ *arrow.TimestampType,
+ *arrow.Date32Type, *arrow.Date64Type,
+ *arrow.MonthIntervalType, *arrow.DayTimeIntervalType, *arrow.MonthDayNanoIntervalType,
+ *arrow.DurationType:
+ return ctx.loadPrimitive(dt)
+
+ case *arrow.BinaryType, *arrow.StringType, *arrow.LargeStringType, *arrow.LargeBinaryType:
+ return ctx.loadBinary(dt)
+
+ case *arrow.FixedSizeBinaryType:
+ return ctx.loadFixedSizeBinary(dt)
+
+ case *arrow.ListType:
+ return ctx.loadList(dt)
+
+ case *arrow.LargeListType:
+ return ctx.loadList(dt)
+
+ case *arrow.ListViewType:
+ return ctx.loadListView(dt)
+
+ case *arrow.LargeListViewType:
+ return ctx.loadListView(dt)
+
+ case *arrow.FixedSizeListType:
+ return ctx.loadFixedSizeList(dt)
+
+ case *arrow.StructType:
+ return ctx.loadStruct(dt)
+
+ case *arrow.MapType:
+ return ctx.loadMap(dt)
+
+ case arrow.ExtensionType:
+ storage := ctx.loadArray(dt.StorageType())
+ defer storage.Release()
+ return array.NewData(dt, storage.Len(), storage.Buffers(), storage.Children(), storage.NullN(), storage.Offset())
+
+ case *arrow.RunEndEncodedType:
+ field, buffers := ctx.loadCommon(dt.ID(), 1)
+ defer releaseBuffers(buffers)
+
+ runEnds := ctx.loadChild(dt.RunEnds())
+ defer runEnds.Release()
+ values := ctx.loadChild(dt.Encoded())
+ defer values.Release()
+
+ return array.NewData(dt, int(field.Length()), buffers, []arrow.ArrayData{runEnds, values}, int(field.NullCount()), 0)
+
+ case arrow.UnionType:
+ return ctx.loadUnion(dt)
+
+ default:
+ panic(fmt.Errorf("arrow/ipc: array type %T not handled yet", dt))
+ }
+}
+
+func (ctx *arrayLoaderContext) loadCommon(typ arrow.Type, nbufs int) (*flatbuf.FieldNode, []*memory.Buffer) {
+ buffers := make([]*memory.Buffer, 0, nbufs)
+ field := ctx.field()
+
+ var buf *memory.Buffer
+
+ if internal.HasValidityBitmap(typ, flatbuf.MetadataVersion(ctx.version)) {
+ switch field.NullCount() {
+ case 0:
+ ctx.ibuffer++
+ default:
+ buf = ctx.buffer()
+ }
+ }
+ buffers = append(buffers, buf)
+
+ return field, buffers
+}
+
+func (ctx *arrayLoaderContext) loadChild(dt arrow.DataType) arrow.ArrayData {
+ if ctx.max == 0 {
+ panic("arrow/ipc: nested type limit reached")
+ }
+ ctx.max--
+ sub := ctx.loadArray(dt)
+ ctx.max++
+ return sub
+}
+
+func (ctx *arrayLoaderContext) loadNull() arrow.ArrayData {
+ field := ctx.field()
+ return array.NewData(arrow.Null, int(field.Length()), nil, nil, int(field.NullCount()), 0)
+}
+
+func (ctx *arrayLoaderContext) loadPrimitive(dt arrow.DataType) arrow.ArrayData {
+ field, buffers := ctx.loadCommon(dt.ID(), 2)
+
+ switch field.Length() {
+ case 0:
+ buffers = append(buffers, nil)
+ ctx.ibuffer++
+ default:
+ buffers = append(buffers, ctx.buffer())
+ }
+
+ defer releaseBuffers(buffers)
+
+ return array.NewData(dt, int(field.Length()), buffers, nil, int(field.NullCount()), 0)
+}
+
+func (ctx *arrayLoaderContext) loadBinary(dt arrow.DataType) arrow.ArrayData {
+ field, buffers := ctx.loadCommon(dt.ID(), 3)
+ buffers = append(buffers, ctx.buffer(), ctx.buffer())
+ defer releaseBuffers(buffers)
+
+ return array.NewData(dt, int(field.Length()), buffers, nil, int(field.NullCount()), 0)
+}
+
+func (ctx *arrayLoaderContext) loadFixedSizeBinary(dt *arrow.FixedSizeBinaryType) arrow.ArrayData {
+ field, buffers := ctx.loadCommon(dt.ID(), 2)
+ buffers = append(buffers, ctx.buffer())
+ defer releaseBuffers(buffers)
+
+ return array.NewData(dt, int(field.Length()), buffers, nil, int(field.NullCount()), 0)
+}
+
+func (ctx *arrayLoaderContext) loadMap(dt *arrow.MapType) arrow.ArrayData {
+ field, buffers := ctx.loadCommon(dt.ID(), 2)
+ buffers = append(buffers, ctx.buffer())
+ defer releaseBuffers(buffers)
+
+ sub := ctx.loadChild(dt.Elem())
+ defer sub.Release()
+
+ return array.NewData(dt, int(field.Length()), buffers, []arrow.ArrayData{sub}, int(field.NullCount()), 0)
+}
+
+func (ctx *arrayLoaderContext) loadList(dt arrow.ListLikeType) arrow.ArrayData {
+ field, buffers := ctx.loadCommon(dt.ID(), 2)
+ buffers = append(buffers, ctx.buffer())
+ defer releaseBuffers(buffers)
+
+ sub := ctx.loadChild(dt.Elem())
+ defer sub.Release()
+
+ return array.NewData(dt, int(field.Length()), buffers, []arrow.ArrayData{sub}, int(field.NullCount()), 0)
+}
+
+func (ctx *arrayLoaderContext) loadListView(dt arrow.VarLenListLikeType) arrow.ArrayData {
+ field, buffers := ctx.loadCommon(dt.ID(), 3)
+ buffers = append(buffers, ctx.buffer(), ctx.buffer())
+ defer releaseBuffers(buffers)
+
+ sub := ctx.loadChild(dt.Elem())
+ defer sub.Release()
+
+ return array.NewData(dt, int(field.Length()), buffers, []arrow.ArrayData{sub}, int(field.NullCount()), 0)
+}
+
+func (ctx *arrayLoaderContext) loadFixedSizeList(dt *arrow.FixedSizeListType) arrow.ArrayData {
+ field, buffers := ctx.loadCommon(dt.ID(), 1)
+ defer releaseBuffers(buffers)
+
+ sub := ctx.loadChild(dt.Elem())
+ defer sub.Release()
+
+ return array.NewData(dt, int(field.Length()), buffers, []arrow.ArrayData{sub}, int(field.NullCount()), 0)
+}
+
+func (ctx *arrayLoaderContext) loadStruct(dt *arrow.StructType) arrow.ArrayData {
+ field, buffers := ctx.loadCommon(dt.ID(), 1)
+ defer releaseBuffers(buffers)
+
+ subs := make([]arrow.ArrayData, len(dt.Fields()))
+ for i, f := range dt.Fields() {
+ subs[i] = ctx.loadChild(f.Type)
+ }
+ defer func() {
+ for i := range subs {
+ subs[i].Release()
+ }
+ }()
+
+ return array.NewData(dt, int(field.Length()), buffers, subs, int(field.NullCount()), 0)
+}
+
+func (ctx *arrayLoaderContext) loadUnion(dt arrow.UnionType) arrow.ArrayData {
+ // Sparse unions have 2 buffers (a nil validity bitmap, and the type ids)
+ nBuffers := 2
+ // Dense unions have a third buffer, the offsets
+ if dt.Mode() == arrow.DenseMode {
+ nBuffers = 3
+ }
+
+ field, buffers := ctx.loadCommon(dt.ID(), nBuffers)
+ if field.NullCount() != 0 && buffers[0] != nil {
+ panic("arrow/ipc: cannot read pre-1.0.0 union array with top-level validity bitmap")
+ }
+
+ switch field.Length() {
+ case 0:
+ buffers = append(buffers, memory.NewBufferBytes([]byte{}))
+ ctx.ibuffer++
+ if dt.Mode() == arrow.DenseMode {
+ buffers = append(buffers, nil)
+ ctx.ibuffer++
+ }
+ default:
+ buffers = append(buffers, ctx.buffer())
+ if dt.Mode() == arrow.DenseMode {
+ buffers = append(buffers, ctx.buffer())
+ }
+ }
+
+ defer releaseBuffers(buffers)
+ subs := make([]arrow.ArrayData, len(dt.Fields()))
+ for i, f := range dt.Fields() {
+ subs[i] = ctx.loadChild(f.Type)
+ }
+ defer func() {
+ for i := range subs {
+ subs[i].Release()
+ }
+ }()
+ return array.NewData(dt, int(field.Length()), buffers, subs, 0, 0)
+}
+
+func readDictionary(memo *dictutils.Memo, meta *memory.Buffer, body ReadAtSeeker, swapEndianness bool, mem memory.Allocator) (dictutils.Kind, error) {
+ var (
+ msg = flatbuf.GetRootAsMessage(meta.Bytes(), 0)
+ md flatbuf.DictionaryBatch
+ data flatbuf.RecordBatch
+ codec decompressor
+ )
+ initFB(&md, msg.Header)
+
+ md.Data(&data)
+ bodyCompress := data.Compression(nil)
+ if bodyCompress != nil {
+ codec = getDecompressor(bodyCompress.Codec())
+ }
+
+ id := md.Id()
+ // look up the dictionary value type, which must have been added to the
+ // memo already before calling this function
+ valueType, ok := memo.Type(id)
+ if !ok {
+ return 0, fmt.Errorf("arrow/ipc: no dictionary type found with id: %d", id)
+ }
+
+ ctx := &arrayLoaderContext{
+ src: ipcSource{
+ meta: &data,
+ codec: codec,
+ r: body,
+ mem: mem,
+ },
+ memo: memo,
+ max: kMaxNestingDepth,
+ }
+
+ dict := ctx.loadArray(valueType)
+ defer dict.Release()
+
+ if swapEndianness {
+ swapEndianArrayData(dict.(*array.Data))
+ }
+
+ if md.IsDelta() {
+ memo.AddDelta(id, dict)
+ return dictutils.KindDelta, nil
+ }
+ if memo.AddOrReplace(id, dict) {
+ return dictutils.KindNew, nil
+ }
+ return dictutils.KindReplacement, nil
+}
+
+func releaseBuffers(buffers []*memory.Buffer) {
+ for _, b := range buffers {
+ if b != nil {
+ b.Release()
+ }
+ }
+}
diff --git a/vendor/github.com/apache/arrow/go/v14/arrow/ipc/file_writer.go b/vendor/github.com/apache/arrow/go/v14/arrow/ipc/file_writer.go
new file mode 100644
index 000000000..12384225b
--- /dev/null
+++ b/vendor/github.com/apache/arrow/go/v14/arrow/ipc/file_writer.go
@@ -0,0 +1,394 @@
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements. See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership. The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License. You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package ipc
+
+import (
+ "encoding/binary"
+ "fmt"
+ "io"
+
+ "github.com/apache/arrow/go/v14/arrow"
+ "github.com/apache/arrow/go/v14/arrow/bitutil"
+ "github.com/apache/arrow/go/v14/arrow/internal/dictutils"
+ "github.com/apache/arrow/go/v14/arrow/internal/flatbuf"
+ "github.com/apache/arrow/go/v14/arrow/memory"
+)
+
+// PayloadWriter is an interface for injecting a different payloadwriter
+// allowing more reusability with the Writer object with other scenarios,
+// such as with Flight data
+type PayloadWriter interface {
+ Start() error
+ WritePayload(Payload) error
+ Close() error
+}
+
+type pwriter struct {
+ w io.WriteSeeker
+ pos int64
+
+ schema *arrow.Schema
+ dicts []fileBlock
+ recs []fileBlock
+}
+
+func (w *pwriter) Start() error {
+ var err error
+
+ err = w.updatePos()
+ if err != nil {
+ return fmt.Errorf("arrow/ipc: could not update position while in start: %w", err)
+ }
+
+ // only necessary to align to 8-byte boundary at the start of the file
+ _, err = w.Write(Magic)
+ if err != nil {
+ return fmt.Errorf("arrow/ipc: could not write magic Arrow bytes: %w", err)
+ }
+
+ err = w.align(kArrowIPCAlignment)
+ if err != nil {
+ return fmt.Errorf("arrow/ipc: could not align start block: %w", err)
+ }
+
+ return err
+}
+
+func (w *pwriter) WritePayload(p Payload) error {
+ blk := fileBlock{Offset: w.pos, Meta: 0, Body: p.size}
+ n, err := writeIPCPayload(w, p)
+ if err != nil {
+ return err
+ }
+
+ blk.Meta = int32(n)
+
+ err = w.updatePos()
+ if err != nil {
+ return fmt.Errorf("arrow/ipc: could not update position while in write-payload: %w", err)
+ }
+
+ switch flatbuf.MessageHeader(p.msg) {
+ case flatbuf.MessageHeaderDictionaryBatch:
+ w.dicts = append(w.dicts, blk)
+ case flatbuf.MessageHeaderRecordBatch:
+ w.recs = append(w.recs, blk)
+ }
+
+ return nil
+}
+
+func (w *pwriter) Close() error {
+ var err error
+
+ // write file footer
+ err = w.updatePos()
+ if err != nil {
+ return fmt.Errorf("arrow/ipc: could not update position while in close: %w", err)
+ }
+
+ pos := w.pos
+ err = writeFileFooter(w.schema, w.dicts, w.recs, w)
+ if err != nil {
+ return fmt.Errorf("arrow/ipc: could not write file footer: %w", err)
+ }
+
+ // write file footer length
+ err = w.updatePos() // not strictly needed as we passed w to writeFileFooter...
+ if err != nil {
+ return fmt.Errorf("arrow/ipc: could not compute file footer length: %w", err)
+ }
+
+ size := w.pos - pos
+ if size <= 0 {
+ return fmt.Errorf("arrow/ipc: invalid file footer size (size=%d)", size)
+ }
+
+ buf := make([]byte, 4)
+ binary.LittleEndian.PutUint32(buf, uint32(size))
+ _, err = w.Write(buf)
+ if err != nil {
+ return fmt.Errorf("arrow/ipc: could not write file footer size: %w", err)
+ }
+
+ _, err = w.Write(Magic)
+ if err != nil {
+ return fmt.Errorf("arrow/ipc: could not write Arrow magic bytes: %w", err)
+ }
+
+ return nil
+}
+
+func (w *pwriter) updatePos() error {
+ var err error
+ w.pos, err = w.w.Seek(0, io.SeekCurrent)
+ return err
+}
+
+func (w *pwriter) align(align int32) error {
+ remainder := paddedLength(w.pos, align) - w.pos
+ if remainder == 0 {
+ return nil
+ }
+
+ _, err := w.Write(paddingBytes[:int(remainder)])
+ return err
+}
+
+func (w *pwriter) Write(p []byte) (int, error) {
+ n, err := w.w.Write(p)
+ w.pos += int64(n)
+ return n, err
+}
+
+func writeIPCPayload(w io.Writer, p Payload) (int, error) {
+ n, err := writeMessage(p.meta, kArrowIPCAlignment, w)
+ if err != nil {
+ return n, err
+ }
+
+ // now write the buffers
+ for _, buf := range p.body {
+ var (
+ size int64
+ padding int64
+ )
+
+ // the buffer might be null if we are handling zero row lengths.
+ if buf != nil {
+ size = int64(buf.Len())
+ padding = bitutil.CeilByte64(size) - size
+ }
+
+ if size > 0 {
+ _, err = w.Write(buf.Bytes())
+ if err != nil {
+ return n, fmt.Errorf("arrow/ipc: could not write payload message body: %w", err)
+ }
+ }
+
+ if padding > 0 {
+ _, err = w.Write(paddingBytes[:padding])
+ if err != nil {
+ return n, fmt.Errorf("arrow/ipc: could not write payload message padding: %w", err)
+ }
+ }
+ }
+
+ return n, err
+}
+
+// Payload is the underlying message object which is passed to the payload writer
+// for actually writing out ipc messages
+type Payload struct {
+ msg MessageType
+ meta *memory.Buffer
+ body []*memory.Buffer
+ size int64 // length of body
+}
+
+// Meta returns the buffer containing the metadata for this payload,
+// callers must call Release on the buffer
+func (p *Payload) Meta() *memory.Buffer {
+ if p.meta != nil {
+ p.meta.Retain()
+ }
+ return p.meta
+}
+
+// SerializeBody serializes the body buffers and writes them to the provided
+// writer.
+func (p *Payload) SerializeBody(w io.Writer) error {
+ for _, data := range p.body {
+ if data == nil {
+ continue
+ }
+
+ size := int64(data.Len())
+ padding := bitutil.CeilByte64(size) - size
+ if size > 0 {
+ if _, err := w.Write(data.Bytes()); err != nil {
+ return fmt.Errorf("arrow/ipc: could not write payload message body: %w", err)
+ }
+
+ if padding > 0 {
+ if _, err := w.Write(paddingBytes[:padding]); err != nil {
+ return fmt.Errorf("arrow/ipc: could not write payload message padding bytes: %w", err)
+ }
+ }
+ }
+ }
+ return nil
+}
+
+func (p *Payload) Release() {
+ if p.meta != nil {
+ p.meta.Release()
+ p.meta = nil
+ }
+ for i, b := range p.body {
+ if b == nil {
+ continue
+ }
+ b.Release()
+ p.body[i] = nil
+ }
+}
+
+type payloads []Payload
+
+func (ps payloads) Release() {
+ for i := range ps {
+ ps[i].Release()
+ }
+}
+
+// FileWriter is an Arrow file writer.
+type FileWriter struct {
+ w io.WriteSeeker
+
+ mem memory.Allocator
+
+ header struct {
+ started bool
+ offset int64
+ }
+
+ footer struct {
+ written bool
+ }
+
+ pw PayloadWriter
+
+ schema *arrow.Schema
+ mapper dictutils.Mapper
+ codec flatbuf.CompressionType
+ compressNP int
+ minSpaceSavings *float64
+
+ // map of the last written dictionaries by id
+ // so we can avoid writing the same dictionary over and over
+ // also needed for correctness when writing IPC format which
+ // does not allow replacements or deltas.
+ lastWrittenDicts map[int64]arrow.Array
+}
+
+// NewFileWriter opens an Arrow file using the provided writer w.
+func NewFileWriter(w io.WriteSeeker, opts ...Option) (*FileWriter, error) {
+ var (
+ cfg = newConfig(opts...)
+ err error
+ )
+
+ f := FileWriter{
+ w: w,
+ pw: &pwriter{w: w, schema: cfg.schema, pos: -1},
+ mem: cfg.alloc,
+ schema: cfg.schema,
+ codec: cfg.codec,
+ compressNP: cfg.compressNP,
+ minSpaceSavings: cfg.minSpaceSavings,
+ }
+
+ pos, err := f.w.Seek(0, io.SeekCurrent)
+ if err != nil {
+ return nil, fmt.Errorf("arrow/ipc: could not seek current position: %w", err)
+ }
+ f.header.offset = pos
+
+ return &f, err
+}
+
+func (f *FileWriter) Close() error {
+ err := f.checkStarted()
+ if err != nil {
+ return fmt.Errorf("arrow/ipc: could not write empty file: %w", err)
+ }
+
+ if f.footer.written {
+ return nil
+ }
+
+ err = f.pw.Close()
+ if err != nil {
+ return fmt.Errorf("arrow/ipc: could not close payload writer: %w", err)
+ }
+ f.footer.written = true
+
+ return nil
+}
+
+func (f *FileWriter) Write(rec arrow.Record) error {
+ schema := rec.Schema()
+ if schema == nil || !schema.Equal(f.schema) {
+ return errInconsistentSchema
+ }
+
+ if err := f.checkStarted(); err != nil {
+ return fmt.Errorf("arrow/ipc: could not write header: %w", err)
+ }
+
+ const allow64b = true
+ var (
+ data = Payload{msg: MessageRecordBatch}
+ enc = newRecordEncoder(f.mem, 0, kMaxNestingDepth, allow64b, f.codec, f.compressNP, f.minSpaceSavings)
+ )
+ defer data.Release()
+
+ err := writeDictionaryPayloads(f.mem, rec, true, false, &f.mapper, f.lastWrittenDicts, f.pw, enc)
+ if err != nil {
+ return fmt.Errorf("arrow/ipc: failure writing dictionary batches: %w", err)
+ }
+
+ enc.reset()
+ if err := enc.Encode(&data, rec); err != nil {
+ return fmt.Errorf("arrow/ipc: could not encode record to payload: %w", err)
+ }
+
+ return f.pw.WritePayload(data)
+}
+
+func (f *FileWriter) checkStarted() error {
+ if !f.header.started {
+ return f.start()
+ }
+ return nil
+}
+
+func (f *FileWriter) start() error {
+ f.header.started = true
+ err := f.pw.Start()
+ if err != nil {
+ return err
+ }
+
+ f.mapper.ImportSchema(f.schema)
+ f.lastWrittenDicts = make(map[int64]arrow.Array)
+
+ // write out schema payloads
+ ps := payloadFromSchema(f.schema, f.mem, &f.mapper)
+ defer ps.Release()
+
+ for _, data := range ps {
+ err = f.pw.WritePayload(data)
+ if err != nil {
+ return err
+ }
+ }
+
+ return nil
+}
diff --git a/vendor/github.com/apache/arrow/go/v14/arrow/ipc/ipc.go b/vendor/github.com/apache/arrow/go/v14/arrow/ipc/ipc.go
new file mode 100644
index 000000000..6c04b6f5a
--- /dev/null
+++ b/vendor/github.com/apache/arrow/go/v14/arrow/ipc/ipc.go
@@ -0,0 +1,199 @@
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements. See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership. The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License. You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package ipc
+
+import (
+ "io"
+
+ "github.com/apache/arrow/go/v14/arrow"
+ "github.com/apache/arrow/go/v14/arrow/arrio"
+ "github.com/apache/arrow/go/v14/arrow/internal/flatbuf"
+ "github.com/apache/arrow/go/v14/arrow/memory"
+)
+
+const (
+ errNotArrowFile = errString("arrow/ipc: not an Arrow file")
+ errInconsistentFileMetadata = errString("arrow/ipc: file is smaller than indicated metadata size")
+ errInconsistentSchema = errString("arrow/ipc: tried to write record batch with different schema")
+ errMaxRecursion = errString("arrow/ipc: max recursion depth reached")
+ errBigArray = errString("arrow/ipc: array larger than 2^31-1 in length")
+
+ kArrowAlignment = 64 // buffers are padded to 64b boundaries (for SIMD)
+ kTensorAlignment = 64 // tensors are padded to 64b boundaries
+ kArrowIPCAlignment = 8 // align on 8b boundaries in IPC
+)
+
+var (
+ paddingBytes [kArrowAlignment]byte
+ kEOS = [8]byte{0xFF, 0xFF, 0xFF, 0xFF, 0, 0, 0, 0} // end of stream message
+ kIPCContToken uint32 = 0xFFFFFFFF // 32b continuation indicator for FlatBuffers 8b alignment
+)
+
+func paddedLength(nbytes int64, alignment int32) int64 {
+ align := int64(alignment)
+ return ((nbytes + align - 1) / align) * align
+}
+
+type errString string
+
+func (s errString) Error() string {
+ return string(s)
+}
+
+type ReadAtSeeker interface {
+ io.Reader
+ io.Seeker
+ io.ReaderAt
+}
+
+type config struct {
+ alloc memory.Allocator
+ schema *arrow.Schema
+ footer struct {
+ offset int64
+ }
+ codec flatbuf.CompressionType
+ compressNP int
+ ensureNativeEndian bool
+ noAutoSchema bool
+ emitDictDeltas bool
+ minSpaceSavings *float64
+}
+
+func newConfig(opts ...Option) *config {
+ cfg := &config{
+ alloc: memory.NewGoAllocator(),
+ codec: -1, // uncompressed
+ ensureNativeEndian: true,
+ }
+
+ for _, opt := range opts {
+ opt(cfg)
+ }
+
+ return cfg
+}
+
+// Option is a functional option to configure opening or creating Arrow files
+// and streams.
+type Option func(*config)
+
+// WithFooterOffset specifies the Arrow footer position in bytes.
+func WithFooterOffset(offset int64) Option {
+ return func(cfg *config) {
+ cfg.footer.offset = offset
+ }
+}
+
+// WithAllocator specifies the Arrow memory allocator used while building records.
+func WithAllocator(mem memory.Allocator) Option {
+ return func(cfg *config) {
+ cfg.alloc = mem
+ }
+}
+
+// WithSchema specifies the Arrow schema to be used for reading or writing.
+func WithSchema(schema *arrow.Schema) Option {
+ return func(cfg *config) {
+ cfg.schema = schema
+ }
+}
+
+// WithLZ4 tells the writer to use LZ4 Frame compression on the data
+// buffers before writing. Requires >= Arrow 1.0.0 to read/decompress
+func WithLZ4() Option {
+ return func(cfg *config) {
+ cfg.codec = flatbuf.CompressionTypeLZ4_FRAME
+ }
+}
+
+// WithZstd tells the writer to use ZSTD compression on the data
+// buffers before writing. Requires >= Arrow 1.0.0 to read/decompress
+func WithZstd() Option {
+ return func(cfg *config) {
+ cfg.codec = flatbuf.CompressionTypeZSTD
+ }
+}
+
+// WithCompressConcurrency specifies a number of goroutines to spin up for
+// concurrent compression of the body buffers when writing compress IPC records.
+// If n <= 1 then compression will be done serially without goroutine
+// parallelization. Default is 0.
+func WithCompressConcurrency(n int) Option {
+ return func(cfg *config) {
+ cfg.compressNP = n
+ }
+}
+
+// WithEnsureNativeEndian specifies whether or not to automatically byte-swap
+// buffers with endian-sensitive data if the schema's endianness is not the
+// platform-native endianness. This includes all numeric types, temporal types,
+// decimal types, as well as the offset buffers of variable-sized binary and
+// list-like types.
+//
+// This is only relevant to ipc Reader objects, not to writers. This defaults
+// to true.
+func WithEnsureNativeEndian(v bool) Option {
+ return func(cfg *config) {
+ cfg.ensureNativeEndian = v
+ }
+}
+
+// WithDelayedReadSchema alters the ipc.Reader behavior to delay attempting
+// to read the schema from the stream until the first call to Next instead
+// of immediately attempting to read a schema from the stream when created.
+func WithDelayReadSchema(v bool) Option {
+ return func(cfg *config) {
+ cfg.noAutoSchema = v
+ }
+}
+
+// WithDictionaryDeltas specifies whether or not to emit dictionary deltas.
+func WithDictionaryDeltas(v bool) Option {
+ return func(cfg *config) {
+ cfg.emitDictDeltas = v
+ }
+}
+
+// WithMinSpaceSavings specifies a percentage of space savings for
+// compression to be applied to buffers.
+//
+// Space savings is calculated as (1.0 - compressedSize / uncompressedSize).
+//
+// For example, if minSpaceSavings = 0.1, a 100-byte body buffer won't
+// undergo compression if its expected compressed size exceeds 90 bytes.
+// If this option is unset, compression will be used indiscriminately. If
+// no codec was supplied, this option is ignored.
+//
+// Values outside of the range [0,1] are handled as errors.
+//
+// Note that enabling this option may result in unreadable data for Arrow
+// Go and C++ versions prior to 12.0.0.
+func WithMinSpaceSavings(savings float64) Option {
+ return func(cfg *config) {
+ cfg.minSpaceSavings = &savings
+ }
+}
+
+var (
+ _ arrio.Reader = (*Reader)(nil)
+ _ arrio.Writer = (*Writer)(nil)
+ _ arrio.Reader = (*FileReader)(nil)
+ _ arrio.Writer = (*FileWriter)(nil)
+
+ _ arrio.ReaderAt = (*FileReader)(nil)
+)
diff --git a/vendor/github.com/apache/arrow/go/v14/arrow/ipc/message.go b/vendor/github.com/apache/arrow/go/v14/arrow/ipc/message.go
new file mode 100644
index 000000000..c5d0ec68d
--- /dev/null
+++ b/vendor/github.com/apache/arrow/go/v14/arrow/ipc/message.go
@@ -0,0 +1,242 @@
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements. See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership. The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License. You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package ipc
+
+import (
+ "encoding/binary"
+ "fmt"
+ "io"
+ "sync/atomic"
+
+ "github.com/apache/arrow/go/v14/arrow/internal/debug"
+ "github.com/apache/arrow/go/v14/arrow/internal/flatbuf"
+ "github.com/apache/arrow/go/v14/arrow/memory"
+)
+
+// MetadataVersion represents the Arrow metadata version.
+type MetadataVersion flatbuf.MetadataVersion
+
+const (
+ MetadataV1 = MetadataVersion(flatbuf.MetadataVersionV1) // version for Arrow-0.1.0
+ MetadataV2 = MetadataVersion(flatbuf.MetadataVersionV2) // version for Arrow-0.2.0
+ MetadataV3 = MetadataVersion(flatbuf.MetadataVersionV3) // version for Arrow-0.3.0 to 0.7.1
+ MetadataV4 = MetadataVersion(flatbuf.MetadataVersionV4) // version for >= Arrow-0.8.0
+ MetadataV5 = MetadataVersion(flatbuf.MetadataVersionV5) // version for >= Arrow-1.0.0, backward compatible with v4
+)
+
+func (m MetadataVersion) String() string {
+ if v, ok := flatbuf.EnumNamesMetadataVersion[flatbuf.MetadataVersion(m)]; ok {
+ return v
+ }
+ return fmt.Sprintf("MetadataVersion(%d)", int16(m))
+}
+
+// MessageType represents the type of Message in an Arrow format.
+type MessageType flatbuf.MessageHeader
+
+const (
+ MessageNone = MessageType(flatbuf.MessageHeaderNONE)
+ MessageSchema = MessageType(flatbuf.MessageHeaderSchema)
+ MessageDictionaryBatch = MessageType(flatbuf.MessageHeaderDictionaryBatch)
+ MessageRecordBatch = MessageType(flatbuf.MessageHeaderRecordBatch)
+ MessageTensor = MessageType(flatbuf.MessageHeaderTensor)
+ MessageSparseTensor = MessageType(flatbuf.MessageHeaderSparseTensor)
+)
+
+func (m MessageType) String() string {
+ if v, ok := flatbuf.EnumNamesMessageHeader[flatbuf.MessageHeader(m)]; ok {
+ return v
+ }
+ return fmt.Sprintf("MessageType(%d)", int(m))
+}
+
+// Message is an IPC message, including metadata and body.
+type Message struct {
+ refCount int64
+ msg *flatbuf.Message
+ meta *memory.Buffer
+ body *memory.Buffer
+}
+
+// NewMessage creates a new message from the metadata and body buffers.
+// NewMessage panics if any of these buffers is nil.
+func NewMessage(meta, body *memory.Buffer) *Message {
+ if meta == nil || body == nil {
+ panic("arrow/ipc: nil buffers")
+ }
+ meta.Retain()
+ body.Retain()
+ return &Message{
+ refCount: 1,
+ msg: flatbuf.GetRootAsMessage(meta.Bytes(), 0),
+ meta: meta,
+ body: body,
+ }
+}
+
+func newMessageFromFB(meta *flatbuf.Message, body *memory.Buffer) *Message {
+ if meta == nil || body == nil {
+ panic("arrow/ipc: nil buffers")
+ }
+ body.Retain()
+ return &Message{
+ refCount: 1,
+ msg: meta,
+ meta: memory.NewBufferBytes(meta.Table().Bytes),
+ body: body,
+ }
+}
+
+// Retain increases the reference count by 1.
+// Retain may be called simultaneously from multiple goroutines.
+func (msg *Message) Retain() {
+ atomic.AddInt64(&msg.refCount, 1)
+}
+
+// Release decreases the reference count by 1.
+// Release may be called simultaneously from multiple goroutines.
+// When the reference count goes to zero, the memory is freed.
+func (msg *Message) Release() {
+ debug.Assert(atomic.LoadInt64(&msg.refCount) > 0, "too many releases")
+
+ if atomic.AddInt64(&msg.refCount, -1) == 0 {
+ msg.meta.Release()
+ msg.body.Release()
+ msg.msg = nil
+ msg.meta = nil
+ msg.body = nil
+ }
+}
+
+func (msg *Message) Version() MetadataVersion {
+ return MetadataVersion(msg.msg.Version())
+}
+
+func (msg *Message) Type() MessageType {
+ return MessageType(msg.msg.HeaderType())
+}
+
+func (msg *Message) BodyLen() int64 {
+ return msg.msg.BodyLength()
+}
+
+type MessageReader interface {
+ Message() (*Message, error)
+ Release()
+ Retain()
+}
+
+// MessageReader reads messages from an io.Reader.
+type messageReader struct {
+ r io.Reader
+
+ refCount int64
+ msg *Message
+
+ mem memory.Allocator
+}
+
+// NewMessageReader returns a reader that reads messages from an input stream.
+func NewMessageReader(r io.Reader, opts ...Option) MessageReader {
+ cfg := newConfig()
+ for _, opt := range opts {
+ opt(cfg)
+ }
+
+ return &messageReader{r: r, refCount: 1, mem: cfg.alloc}
+}
+
+// Retain increases the reference count by 1.
+// Retain may be called simultaneously from multiple goroutines.
+func (r *messageReader) Retain() {
+ atomic.AddInt64(&r.refCount, 1)
+}
+
+// Release decreases the reference count by 1.
+// When the reference count goes to zero, the memory is freed.
+// Release may be called simultaneously from multiple goroutines.
+func (r *messageReader) Release() {
+ debug.Assert(atomic.LoadInt64(&r.refCount) > 0, "too many releases")
+
+ if atomic.AddInt64(&r.refCount, -1) == 0 {
+ if r.msg != nil {
+ r.msg.Release()
+ r.msg = nil
+ }
+ }
+}
+
+// Message returns the current message that has been extracted from the
+// underlying stream.
+// It is valid until the next call to Message.
+func (r *messageReader) Message() (*Message, error) {
+ var buf = make([]byte, 4)
+ _, err := io.ReadFull(r.r, buf)
+ if err != nil {
+ return nil, fmt.Errorf("arrow/ipc: could not read continuation indicator: %w", err)
+ }
+ var (
+ cid = binary.LittleEndian.Uint32(buf)
+ msgLen int32
+ )
+ switch cid {
+ case 0:
+ // EOS message.
+ return nil, io.EOF // FIXME(sbinet): send nil instead? or a special EOS error?
+ case kIPCContToken:
+ _, err = io.ReadFull(r.r, buf)
+ if err != nil {
+ return nil, fmt.Errorf("arrow/ipc: could not read message length: %w", err)
+ }
+ msgLen = int32(binary.LittleEndian.Uint32(buf))
+ if msgLen == 0 {
+ // optional 0 EOS control message
+ return nil, io.EOF // FIXME(sbinet): send nil instead? or a special EOS error?
+ }
+
+ default:
+ // ARROW-6314: backwards compatibility for reading old IPC
+ // messages produced prior to version 0.15.0
+ msgLen = int32(cid)
+ }
+
+ buf = make([]byte, msgLen)
+ _, err = io.ReadFull(r.r, buf)
+ if err != nil {
+ return nil, fmt.Errorf("arrow/ipc: could not read message metadata: %w", err)
+ }
+
+ meta := flatbuf.GetRootAsMessage(buf, 0)
+ bodyLen := meta.BodyLength()
+
+ body := memory.NewResizableBuffer(r.mem)
+ defer body.Release()
+ body.Resize(int(bodyLen))
+
+ _, err = io.ReadFull(r.r, body.Bytes())
+ if err != nil {
+ return nil, fmt.Errorf("arrow/ipc: could not read message body: %w", err)
+ }
+
+ if r.msg != nil {
+ r.msg.Release()
+ r.msg = nil
+ }
+ r.msg = newMessageFromFB(meta, body)
+
+ return r.msg, nil
+}
diff --git a/vendor/github.com/apache/arrow/go/v14/arrow/ipc/metadata.go b/vendor/github.com/apache/arrow/go/v14/arrow/ipc/metadata.go
new file mode 100644
index 000000000..9bab47d6f
--- /dev/null
+++ b/vendor/github.com/apache/arrow/go/v14/arrow/ipc/metadata.go
@@ -0,0 +1,1287 @@
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements. See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership. The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License. You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package ipc
+
+import (
+ "encoding/binary"
+ "errors"
+ "fmt"
+ "io"
+ "sort"
+
+ "github.com/apache/arrow/go/v14/arrow"
+ "github.com/apache/arrow/go/v14/arrow/endian"
+ "github.com/apache/arrow/go/v14/arrow/internal/dictutils"
+ "github.com/apache/arrow/go/v14/arrow/internal/flatbuf"
+ "github.com/apache/arrow/go/v14/arrow/memory"
+ flatbuffers "github.com/google/flatbuffers/go"
+)
+
+// Magic string identifying an Apache Arrow file.
+var Magic = []byte("ARROW1")
+
+const (
+ currentMetadataVersion = MetadataV5
+ minMetadataVersion = MetadataV4
+
+ // constants for the extension type metadata keys for the type name and
+ // any extension metadata to be passed to deserialize.
+ ExtensionTypeKeyName = "ARROW:extension:name"
+ ExtensionMetadataKeyName = "ARROW:extension:metadata"
+
+ // ARROW-109: We set this number arbitrarily to help catch user mistakes. For
+ // deeply nested schemas, it is expected the user will indicate explicitly the
+ // maximum allowed recursion depth
+ kMaxNestingDepth = 64
+)
+
+type startVecFunc func(b *flatbuffers.Builder, n int) flatbuffers.UOffsetT
+
+type fieldMetadata struct {
+ Len int64
+ Nulls int64
+ Offset int64
+}
+
+type bufferMetadata struct {
+ Offset int64 // relative offset into the memory page to the starting byte of the buffer
+ Len int64 // absolute length in bytes of the buffer
+}
+
+type fileBlock struct {
+ Offset int64
+ Meta int32
+ Body int64
+
+ r io.ReaderAt
+ mem memory.Allocator
+}
+
+func fileBlocksToFB(b *flatbuffers.Builder, blocks []fileBlock, start startVecFunc) flatbuffers.UOffsetT {
+ start(b, len(blocks))
+ for i := len(blocks) - 1; i >= 0; i-- {
+ blk := blocks[i]
+ flatbuf.CreateBlock(b, blk.Offset, blk.Meta, blk.Body)
+ }
+
+ return b.EndVector(len(blocks))
+}
+
+func (blk fileBlock) NewMessage() (*Message, error) {
+ var (
+ err error
+ buf []byte
+ body *memory.Buffer
+ meta *memory.Buffer
+ r = blk.section()
+ )
+
+ meta = memory.NewResizableBuffer(blk.mem)
+ meta.Resize(int(blk.Meta))
+ defer meta.Release()
+
+ buf = meta.Bytes()
+ _, err = io.ReadFull(r, buf)
+ if err != nil {
+ return nil, fmt.Errorf("arrow/ipc: could not read message metadata: %w", err)
+ }
+
+ prefix := 0
+ switch binary.LittleEndian.Uint32(buf) {
+ case 0:
+ case kIPCContToken:
+ prefix = 8
+ default:
+ // ARROW-6314: backwards compatibility for reading old IPC
+ // messages produced prior to version 0.15.0
+ prefix = 4
+ }
+
+ // drop buf-size already known from blk.Meta
+ meta = memory.SliceBuffer(meta, prefix, int(blk.Meta)-prefix)
+ defer meta.Release()
+
+ body = memory.NewResizableBuffer(blk.mem)
+ defer body.Release()
+ body.Resize(int(blk.Body))
+ buf = body.Bytes()
+ _, err = io.ReadFull(r, buf)
+ if err != nil {
+ return nil, fmt.Errorf("arrow/ipc: could not read message body: %w", err)
+ }
+
+ return NewMessage(meta, body), nil
+}
+
+func (blk fileBlock) section() io.Reader {
+ return io.NewSectionReader(blk.r, blk.Offset, int64(blk.Meta)+blk.Body)
+}
+
+func unitFromFB(unit flatbuf.TimeUnit) arrow.TimeUnit {
+ switch unit {
+ case flatbuf.TimeUnitSECOND:
+ return arrow.Second
+ case flatbuf.TimeUnitMILLISECOND:
+ return arrow.Millisecond
+ case flatbuf.TimeUnitMICROSECOND:
+ return arrow.Microsecond
+ case flatbuf.TimeUnitNANOSECOND:
+ return arrow.Nanosecond
+ default:
+ panic(fmt.Errorf("arrow/ipc: invalid flatbuf.TimeUnit(%d) value", unit))
+ }
+}
+
+func unitToFB(unit arrow.TimeUnit) flatbuf.TimeUnit {
+ switch unit {
+ case arrow.Second:
+ return flatbuf.TimeUnitSECOND
+ case arrow.Millisecond:
+ return flatbuf.TimeUnitMILLISECOND
+ case arrow.Microsecond:
+ return flatbuf.TimeUnitMICROSECOND
+ case arrow.Nanosecond:
+ return flatbuf.TimeUnitNANOSECOND
+ default:
+ panic(fmt.Errorf("arrow/ipc: invalid arrow.TimeUnit(%d) value", unit))
+ }
+}
+
+// initFB is a helper function to handle flatbuffers' polymorphism.
+func initFB(t interface {
+ Table() flatbuffers.Table
+ Init([]byte, flatbuffers.UOffsetT)
+}, f func(tbl *flatbuffers.Table) bool) {
+ tbl := t.Table()
+ if !f(&tbl) {
+ panic(fmt.Errorf("arrow/ipc: could not initialize %T from flatbuffer", t))
+ }
+ t.Init(tbl.Bytes, tbl.Pos)
+}
+
+func fieldFromFB(field *flatbuf.Field, pos dictutils.FieldPos, memo *dictutils.Memo) (arrow.Field, error) {
+ var (
+ err error
+ o arrow.Field
+ )
+
+ o.Name = string(field.Name())
+ o.Nullable = field.Nullable()
+ o.Metadata, err = metadataFromFB(field)
+ if err != nil {
+ return o, err
+ }
+
+ n := field.ChildrenLength()
+ children := make([]arrow.Field, n)
+ for i := range children {
+ var childFB flatbuf.Field
+ if !field.Children(&childFB, i) {
+ return o, fmt.Errorf("arrow/ipc: could not load field child %d", i)
+
+ }
+ child, err := fieldFromFB(&childFB, pos.Child(int32(i)), memo)
+ if err != nil {
+ return o, fmt.Errorf("arrow/ipc: could not convert field child %d: %w", i, err)
+ }
+ children[i] = child
+ }
+
+ o.Type, err = typeFromFB(field, pos, children, &o.Metadata, memo)
+ if err != nil {
+ return o, fmt.Errorf("arrow/ipc: could not convert field type: %w", err)
+ }
+
+ return o, nil
+}
+
+func fieldToFB(b *flatbuffers.Builder, pos dictutils.FieldPos, field arrow.Field, memo *dictutils.Mapper) flatbuffers.UOffsetT {
+ var visitor = fieldVisitor{b: b, memo: memo, pos: pos, meta: make(map[string]string)}
+ return visitor.result(field)
+}
+
+type fieldVisitor struct {
+ b *flatbuffers.Builder
+ memo *dictutils.Mapper
+ pos dictutils.FieldPos
+ dtype flatbuf.Type
+ offset flatbuffers.UOffsetT
+ kids []flatbuffers.UOffsetT
+ meta map[string]string
+}
+
+func (fv *fieldVisitor) visit(field arrow.Field) {
+ dt := field.Type
+ switch dt := dt.(type) {
+ case *arrow.NullType:
+ fv.dtype = flatbuf.TypeNull
+ flatbuf.NullStart(fv.b)
+ fv.offset = flatbuf.NullEnd(fv.b)
+
+ case *arrow.BooleanType:
+ fv.dtype = flatbuf.TypeBool
+ flatbuf.BoolStart(fv.b)
+ fv.offset = flatbuf.BoolEnd(fv.b)
+
+ case *arrow.Uint8Type:
+ fv.dtype = flatbuf.TypeInt
+ fv.offset = intToFB(fv.b, int32(dt.BitWidth()), false)
+
+ case *arrow.Uint16Type:
+ fv.dtype = flatbuf.TypeInt
+ fv.offset = intToFB(fv.b, int32(dt.BitWidth()), false)
+
+ case *arrow.Uint32Type:
+ fv.dtype = flatbuf.TypeInt
+ fv.offset = intToFB(fv.b, int32(dt.BitWidth()), false)
+
+ case *arrow.Uint64Type:
+ fv.dtype = flatbuf.TypeInt
+ fv.offset = intToFB(fv.b, int32(dt.BitWidth()), false)
+
+ case *arrow.Int8Type:
+ fv.dtype = flatbuf.TypeInt
+ fv.offset = intToFB(fv.b, int32(dt.BitWidth()), true)
+
+ case *arrow.Int16Type:
+ fv.dtype = flatbuf.TypeInt
+ fv.offset = intToFB(fv.b, int32(dt.BitWidth()), true)
+
+ case *arrow.Int32Type:
+ fv.dtype = flatbuf.TypeInt
+ fv.offset = intToFB(fv.b, int32(dt.BitWidth()), true)
+
+ case *arrow.Int64Type:
+ fv.dtype = flatbuf.TypeInt
+ fv.offset = intToFB(fv.b, int32(dt.BitWidth()), true)
+
+ case *arrow.Float16Type:
+ fv.dtype = flatbuf.TypeFloatingPoint
+ fv.offset = floatToFB(fv.b, int32(dt.BitWidth()))
+
+ case *arrow.Float32Type:
+ fv.dtype = flatbuf.TypeFloatingPoint
+ fv.offset = floatToFB(fv.b, int32(dt.BitWidth()))
+
+ case *arrow.Float64Type:
+ fv.dtype = flatbuf.TypeFloatingPoint
+ fv.offset = floatToFB(fv.b, int32(dt.BitWidth()))
+
+ case *arrow.Decimal128Type:
+ fv.dtype = flatbuf.TypeDecimal
+ flatbuf.DecimalStart(fv.b)
+ flatbuf.DecimalAddPrecision(fv.b, dt.Precision)
+ flatbuf.DecimalAddScale(fv.b, dt.Scale)
+ flatbuf.DecimalAddBitWidth(fv.b, 128)
+ fv.offset = flatbuf.DecimalEnd(fv.b)
+
+ case *arrow.Decimal256Type:
+ fv.dtype = flatbuf.TypeDecimal
+ flatbuf.DecimalStart(fv.b)
+ flatbuf.DecimalAddPrecision(fv.b, dt.Precision)
+ flatbuf.DecimalAddScale(fv.b, dt.Scale)
+ flatbuf.DecimalAddBitWidth(fv.b, 256)
+ fv.offset = flatbuf.DecimalEnd(fv.b)
+
+ case *arrow.FixedSizeBinaryType:
+ fv.dtype = flatbuf.TypeFixedSizeBinary
+ flatbuf.FixedSizeBinaryStart(fv.b)
+ flatbuf.FixedSizeBinaryAddByteWidth(fv.b, int32(dt.ByteWidth))
+ fv.offset = flatbuf.FixedSizeBinaryEnd(fv.b)
+
+ case *arrow.BinaryType:
+ fv.dtype = flatbuf.TypeBinary
+ flatbuf.BinaryStart(fv.b)
+ fv.offset = flatbuf.BinaryEnd(fv.b)
+
+ case *arrow.LargeBinaryType:
+ fv.dtype = flatbuf.TypeLargeBinary
+ flatbuf.LargeBinaryStart(fv.b)
+ fv.offset = flatbuf.LargeBinaryEnd(fv.b)
+
+ case *arrow.StringType:
+ fv.dtype = flatbuf.TypeUtf8
+ flatbuf.Utf8Start(fv.b)
+ fv.offset = flatbuf.Utf8End(fv.b)
+
+ case *arrow.LargeStringType:
+ fv.dtype = flatbuf.TypeLargeUtf8
+ flatbuf.LargeUtf8Start(fv.b)
+ fv.offset = flatbuf.LargeUtf8End(fv.b)
+
+ case *arrow.Date32Type:
+ fv.dtype = flatbuf.TypeDate
+ flatbuf.DateStart(fv.b)
+ flatbuf.DateAddUnit(fv.b, flatbuf.DateUnitDAY)
+ fv.offset = flatbuf.DateEnd(fv.b)
+
+ case *arrow.Date64Type:
+ fv.dtype = flatbuf.TypeDate
+ flatbuf.DateStart(fv.b)
+ flatbuf.DateAddUnit(fv.b, flatbuf.DateUnitMILLISECOND)
+ fv.offset = flatbuf.DateEnd(fv.b)
+
+ case *arrow.Time32Type:
+ fv.dtype = flatbuf.TypeTime
+ flatbuf.TimeStart(fv.b)
+ flatbuf.TimeAddUnit(fv.b, unitToFB(dt.Unit))
+ flatbuf.TimeAddBitWidth(fv.b, 32)
+ fv.offset = flatbuf.TimeEnd(fv.b)
+
+ case *arrow.Time64Type:
+ fv.dtype = flatbuf.TypeTime
+ flatbuf.TimeStart(fv.b)
+ flatbuf.TimeAddUnit(fv.b, unitToFB(dt.Unit))
+ flatbuf.TimeAddBitWidth(fv.b, 64)
+ fv.offset = flatbuf.TimeEnd(fv.b)
+
+ case *arrow.TimestampType:
+ fv.dtype = flatbuf.TypeTimestamp
+ unit := unitToFB(dt.Unit)
+ var tz flatbuffers.UOffsetT
+ if dt.TimeZone != "" {
+ tz = fv.b.CreateString(dt.TimeZone)
+ }
+ flatbuf.TimestampStart(fv.b)
+ flatbuf.TimestampAddUnit(fv.b, unit)
+ flatbuf.TimestampAddTimezone(fv.b, tz)
+ fv.offset = flatbuf.TimestampEnd(fv.b)
+
+ case *arrow.StructType:
+ fv.dtype = flatbuf.TypeStruct_
+ offsets := make([]flatbuffers.UOffsetT, len(dt.Fields()))
+ for i, field := range dt.Fields() {
+ offsets[i] = fieldToFB(fv.b, fv.pos.Child(int32(i)), field, fv.memo)
+ }
+ flatbuf.Struct_Start(fv.b)
+ for i := len(offsets) - 1; i >= 0; i-- {
+ fv.b.PrependUOffsetT(offsets[i])
+ }
+ fv.offset = flatbuf.Struct_End(fv.b)
+ fv.kids = append(fv.kids, offsets...)
+
+ case *arrow.ListType:
+ fv.dtype = flatbuf.TypeList
+ fv.kids = append(fv.kids, fieldToFB(fv.b, fv.pos.Child(0), dt.ElemField(), fv.memo))
+ flatbuf.ListStart(fv.b)
+ fv.offset = flatbuf.ListEnd(fv.b)
+
+ case *arrow.LargeListType:
+ fv.dtype = flatbuf.TypeLargeList
+ fv.kids = append(fv.kids, fieldToFB(fv.b, fv.pos.Child(0), dt.ElemField(), fv.memo))
+ flatbuf.LargeListStart(fv.b)
+ fv.offset = flatbuf.LargeListEnd(fv.b)
+
+ case *arrow.ListViewType:
+ fv.dtype = flatbuf.TypeListView
+ fv.kids = append(fv.kids, fieldToFB(fv.b, fv.pos.Child(0), dt.ElemField(), fv.memo))
+ flatbuf.ListViewStart(fv.b)
+ fv.offset = flatbuf.ListViewEnd(fv.b)
+
+ case *arrow.LargeListViewType:
+ fv.dtype = flatbuf.TypeLargeListView
+ fv.kids = append(fv.kids, fieldToFB(fv.b, fv.pos.Child(0), dt.ElemField(), fv.memo))
+ flatbuf.LargeListViewStart(fv.b)
+ fv.offset = flatbuf.LargeListViewEnd(fv.b)
+
+ case *arrow.FixedSizeListType:
+ fv.dtype = flatbuf.TypeFixedSizeList
+ fv.kids = append(fv.kids, fieldToFB(fv.b, fv.pos.Child(0), dt.ElemField(), fv.memo))
+ flatbuf.FixedSizeListStart(fv.b)
+ flatbuf.FixedSizeListAddListSize(fv.b, dt.Len())
+ fv.offset = flatbuf.FixedSizeListEnd(fv.b)
+
+ case *arrow.MonthIntervalType:
+ fv.dtype = flatbuf.TypeInterval
+ flatbuf.IntervalStart(fv.b)
+ flatbuf.IntervalAddUnit(fv.b, flatbuf.IntervalUnitYEAR_MONTH)
+ fv.offset = flatbuf.IntervalEnd(fv.b)
+
+ case *arrow.DayTimeIntervalType:
+ fv.dtype = flatbuf.TypeInterval
+ flatbuf.IntervalStart(fv.b)
+ flatbuf.IntervalAddUnit(fv.b, flatbuf.IntervalUnitDAY_TIME)
+ fv.offset = flatbuf.IntervalEnd(fv.b)
+
+ case *arrow.MonthDayNanoIntervalType:
+ fv.dtype = flatbuf.TypeInterval
+ flatbuf.IntervalStart(fv.b)
+ flatbuf.IntervalAddUnit(fv.b, flatbuf.IntervalUnitMONTH_DAY_NANO)
+ fv.offset = flatbuf.IntervalEnd(fv.b)
+
+ case *arrow.DurationType:
+ fv.dtype = flatbuf.TypeDuration
+ unit := unitToFB(dt.Unit)
+ flatbuf.DurationStart(fv.b)
+ flatbuf.DurationAddUnit(fv.b, unit)
+ fv.offset = flatbuf.DurationEnd(fv.b)
+
+ case *arrow.MapType:
+ fv.dtype = flatbuf.TypeMap
+ fv.kids = append(fv.kids, fieldToFB(fv.b, fv.pos.Child(0), dt.ElemField(), fv.memo))
+ flatbuf.MapStart(fv.b)
+ flatbuf.MapAddKeysSorted(fv.b, dt.KeysSorted)
+ fv.offset = flatbuf.MapEnd(fv.b)
+
+ case *arrow.RunEndEncodedType:
+ fv.dtype = flatbuf.TypeRunEndEncoded
+ var offsets [2]flatbuffers.UOffsetT
+ offsets[0] = fieldToFB(fv.b, fv.pos.Child(0),
+ arrow.Field{Name: "run_ends", Type: dt.RunEnds()}, fv.memo)
+ offsets[1] = fieldToFB(fv.b, fv.pos.Child(1),
+ arrow.Field{Name: "values", Type: dt.Encoded(), Nullable: true}, fv.memo)
+ flatbuf.RunEndEncodedStart(fv.b)
+ fv.b.PrependUOffsetT(offsets[1])
+ fv.b.PrependUOffsetT(offsets[0])
+ fv.offset = flatbuf.RunEndEncodedEnd(fv.b)
+ fv.kids = append(fv.kids, offsets[0], offsets[1])
+
+ case arrow.ExtensionType:
+ field.Type = dt.StorageType()
+ fv.visit(field)
+ fv.meta[ExtensionTypeKeyName] = dt.ExtensionName()
+ fv.meta[ExtensionMetadataKeyName] = string(dt.Serialize())
+
+ case *arrow.DictionaryType:
+ field.Type = dt.ValueType
+ fv.visit(field)
+
+ case arrow.UnionType:
+ fv.dtype = flatbuf.TypeUnion
+ offsets := make([]flatbuffers.UOffsetT, len(dt.Fields()))
+ for i, field := range dt.Fields() {
+ offsets[i] = fieldToFB(fv.b, fv.pos.Child(int32(i)), field, fv.memo)
+ }
+
+ codes := dt.TypeCodes()
+ flatbuf.UnionStartTypeIdsVector(fv.b, len(codes))
+
+ for i := len(codes) - 1; i >= 0; i-- {
+ fv.b.PlaceInt32(int32(codes[i]))
+ }
+ fbTypeIDs := fv.b.EndVector(len(dt.TypeCodes()))
+ flatbuf.UnionStart(fv.b)
+ switch dt.Mode() {
+ case arrow.SparseMode:
+ flatbuf.UnionAddMode(fv.b, flatbuf.UnionModeSparse)
+ case arrow.DenseMode:
+ flatbuf.UnionAddMode(fv.b, flatbuf.UnionModeDense)
+ default:
+ panic("invalid union mode")
+ }
+ flatbuf.UnionAddTypeIds(fv.b, fbTypeIDs)
+ fv.offset = flatbuf.UnionEnd(fv.b)
+ fv.kids = append(fv.kids, offsets...)
+
+ default:
+ err := fmt.Errorf("arrow/ipc: invalid data type %v", dt)
+ panic(err) // FIXME(sbinet): implement all data-types.
+ }
+}
+
+func (fv *fieldVisitor) result(field arrow.Field) flatbuffers.UOffsetT {
+ nameFB := fv.b.CreateString(field.Name)
+
+ fv.visit(field)
+
+ flatbuf.FieldStartChildrenVector(fv.b, len(fv.kids))
+ for i := len(fv.kids) - 1; i >= 0; i-- {
+ fv.b.PrependUOffsetT(fv.kids[i])
+ }
+ kidsFB := fv.b.EndVector(len(fv.kids))
+
+ storageType := field.Type
+ if storageType.ID() == arrow.EXTENSION {
+ storageType = storageType.(arrow.ExtensionType).StorageType()
+ }
+
+ var dictFB flatbuffers.UOffsetT
+ if storageType.ID() == arrow.DICTIONARY {
+ idxType := field.Type.(*arrow.DictionaryType).IndexType.(arrow.FixedWidthDataType)
+
+ dictID, err := fv.memo.GetFieldID(fv.pos.Path())
+ if err != nil {
+ panic(err)
+ }
+ var signed bool
+ switch idxType.ID() {
+ case arrow.UINT8, arrow.UINT16, arrow.UINT32, arrow.UINT64:
+ signed = false
+ case arrow.INT8, arrow.INT16, arrow.INT32, arrow.INT64:
+ signed = true
+ }
+ indexTypeOffset := intToFB(fv.b, int32(idxType.BitWidth()), signed)
+ flatbuf.DictionaryEncodingStart(fv.b)
+ flatbuf.DictionaryEncodingAddId(fv.b, dictID)
+ flatbuf.DictionaryEncodingAddIndexType(fv.b, indexTypeOffset)
+ flatbuf.DictionaryEncodingAddIsOrdered(fv.b, field.Type.(*arrow.DictionaryType).Ordered)
+ dictFB = flatbuf.DictionaryEncodingEnd(fv.b)
+ }
+
+ var (
+ metaFB flatbuffers.UOffsetT
+ kvs []flatbuffers.UOffsetT
+ )
+ for i, k := range field.Metadata.Keys() {
+ v := field.Metadata.Values()[i]
+ kk := fv.b.CreateString(k)
+ vv := fv.b.CreateString(v)
+ flatbuf.KeyValueStart(fv.b)
+ flatbuf.KeyValueAddKey(fv.b, kk)
+ flatbuf.KeyValueAddValue(fv.b, vv)
+ kvs = append(kvs, flatbuf.KeyValueEnd(fv.b))
+ }
+ {
+ keys := make([]string, 0, len(fv.meta))
+ for k := range fv.meta {
+ keys = append(keys, k)
+ }
+ sort.Strings(keys)
+ for _, k := range keys {
+ v := fv.meta[k]
+ kk := fv.b.CreateString(k)
+ vv := fv.b.CreateString(v)
+ flatbuf.KeyValueStart(fv.b)
+ flatbuf.KeyValueAddKey(fv.b, kk)
+ flatbuf.KeyValueAddValue(fv.b, vv)
+ kvs = append(kvs, flatbuf.KeyValueEnd(fv.b))
+ }
+ }
+ if len(kvs) > 0 {
+ flatbuf.FieldStartCustomMetadataVector(fv.b, len(kvs))
+ for i := len(kvs) - 1; i >= 0; i-- {
+ fv.b.PrependUOffsetT(kvs[i])
+ }
+ metaFB = fv.b.EndVector(len(kvs))
+ }
+
+ flatbuf.FieldStart(fv.b)
+ flatbuf.FieldAddName(fv.b, nameFB)
+ flatbuf.FieldAddNullable(fv.b, field.Nullable)
+ flatbuf.FieldAddTypeType(fv.b, fv.dtype)
+ flatbuf.FieldAddType(fv.b, fv.offset)
+ flatbuf.FieldAddDictionary(fv.b, dictFB)
+ flatbuf.FieldAddChildren(fv.b, kidsFB)
+ flatbuf.FieldAddCustomMetadata(fv.b, metaFB)
+
+ offset := flatbuf.FieldEnd(fv.b)
+
+ return offset
+}
+
+func typeFromFB(field *flatbuf.Field, pos dictutils.FieldPos, children []arrow.Field, md *arrow.Metadata, memo *dictutils.Memo) (arrow.DataType, error) {
+ var data flatbuffers.Table
+ if !field.Type(&data) {
+ return nil, fmt.Errorf("arrow/ipc: could not load field type data")
+ }
+
+ dt, err := concreteTypeFromFB(field.TypeType(), data, children)
+ if err != nil {
+ return dt, err
+ }
+
+ var (
+ dictID = int64(-1)
+ dictValueType arrow.DataType
+ encoding = field.Dictionary(nil)
+ )
+ if encoding != nil {
+ var idt flatbuf.Int
+ encoding.IndexType(&idt)
+ idxType, err := intFromFB(idt)
+ if err != nil {
+ return nil, err
+ }
+
+ dictValueType = dt
+ dt = &arrow.DictionaryType{IndexType: idxType, ValueType: dictValueType, Ordered: encoding.IsOrdered()}
+ dictID = encoding.Id()
+
+ if err = memo.Mapper.AddField(dictID, pos.Path()); err != nil {
+ return dt, err
+ }
+ if err = memo.AddType(dictID, dictValueType); err != nil {
+ return dt, err
+ }
+
+ }
+
+ // look for extension metadata in custom metadata field.
+ if md.Len() > 0 {
+ i := md.FindKey(ExtensionTypeKeyName)
+ if i < 0 {
+ return dt, err
+ }
+
+ extType := arrow.GetExtensionType(md.Values()[i])
+ if extType == nil {
+ // if the extension type is unknown, we do not error here.
+ // simply return the storage type.
+ return dt, err
+ }
+
+ var (
+ data string
+ dataIdx int
+ )
+
+ if dataIdx = md.FindKey(ExtensionMetadataKeyName); dataIdx >= 0 {
+ data = md.Values()[dataIdx]
+ }
+
+ dt, err = extType.Deserialize(dt, data)
+ if err != nil {
+ return dt, err
+ }
+
+ mdkeys := md.Keys()
+ mdvals := md.Values()
+ if dataIdx < 0 {
+ // if there was no extension metadata, just the name, we only have to
+ // remove the extension name metadata key/value to ensure roundtrip
+ // metadata consistency
+ *md = arrow.NewMetadata(append(mdkeys[:i], mdkeys[i+1:]...), append(mdvals[:i], mdvals[i+1:]...))
+ } else {
+ // if there was extension metadata, we need to remove both the type name
+ // and the extension metadata keys and values.
+ newkeys := make([]string, 0, md.Len()-2)
+ newvals := make([]string, 0, md.Len()-2)
+ for j := range mdkeys {
+ if j != i && j != dataIdx { // copy everything except the extension metadata keys/values
+ newkeys = append(newkeys, mdkeys[j])
+ newvals = append(newvals, mdvals[j])
+ }
+ }
+ *md = arrow.NewMetadata(newkeys, newvals)
+ }
+ }
+
+ return dt, err
+}
+
+func concreteTypeFromFB(typ flatbuf.Type, data flatbuffers.Table, children []arrow.Field) (arrow.DataType, error) {
+ switch typ {
+ case flatbuf.TypeNONE:
+ return nil, fmt.Errorf("arrow/ipc: Type metadata cannot be none")
+
+ case flatbuf.TypeNull:
+ return arrow.Null, nil
+
+ case flatbuf.TypeInt:
+ var dt flatbuf.Int
+ dt.Init(data.Bytes, data.Pos)
+ return intFromFB(dt)
+
+ case flatbuf.TypeFloatingPoint:
+ var dt flatbuf.FloatingPoint
+ dt.Init(data.Bytes, data.Pos)
+ return floatFromFB(dt)
+
+ case flatbuf.TypeDecimal:
+ var dt flatbuf.Decimal
+ dt.Init(data.Bytes, data.Pos)
+ return decimalFromFB(dt)
+
+ case flatbuf.TypeBinary:
+ return arrow.BinaryTypes.Binary, nil
+
+ case flatbuf.TypeFixedSizeBinary:
+ var dt flatbuf.FixedSizeBinary
+ dt.Init(data.Bytes, data.Pos)
+ return &arrow.FixedSizeBinaryType{ByteWidth: int(dt.ByteWidth())}, nil
+
+ case flatbuf.TypeUtf8:
+ return arrow.BinaryTypes.String, nil
+
+ case flatbuf.TypeLargeBinary:
+ return arrow.BinaryTypes.LargeBinary, nil
+
+ case flatbuf.TypeLargeUtf8:
+ return arrow.BinaryTypes.LargeString, nil
+
+ case flatbuf.TypeBool:
+ return arrow.FixedWidthTypes.Boolean, nil
+
+ case flatbuf.TypeList:
+ if len(children) != 1 {
+ return nil, fmt.Errorf("arrow/ipc: List must have exactly 1 child field (got=%d)", len(children))
+ }
+ dt := arrow.ListOfField(children[0])
+ return dt, nil
+
+ case flatbuf.TypeLargeList:
+ if len(children) != 1 {
+ return nil, fmt.Errorf("arrow/ipc: LargeList must have exactly 1 child field (got=%d)", len(children))
+ }
+ dt := arrow.LargeListOfField(children[0])
+ return dt, nil
+
+ case flatbuf.TypeListView:
+ if len(children) != 1 {
+ return nil, fmt.Errorf("arrow/ipc: ListView must have exactly 1 child field (got=%d)", len(children))
+ }
+ dt := arrow.ListViewOfField(children[0])
+ return dt, nil
+
+ case flatbuf.TypeLargeListView:
+ if len(children) != 1 {
+ return nil, fmt.Errorf("arrow/ipc: LargeListView must have exactly 1 child field (got=%d)", len(children))
+ }
+ dt := arrow.LargeListViewOfField(children[0])
+ return dt, nil
+
+ case flatbuf.TypeFixedSizeList:
+ var dt flatbuf.FixedSizeList
+ dt.Init(data.Bytes, data.Pos)
+ if len(children) != 1 {
+ return nil, fmt.Errorf("arrow/ipc: FixedSizeList must have exactly 1 child field (got=%d)", len(children))
+ }
+ ret := arrow.FixedSizeListOfField(dt.ListSize(), children[0])
+ return ret, nil
+
+ case flatbuf.TypeStruct_:
+ return arrow.StructOf(children...), nil
+
+ case flatbuf.TypeUnion:
+ var dt flatbuf.Union
+ dt.Init(data.Bytes, data.Pos)
+ var (
+ mode arrow.UnionMode
+ typeIDs []arrow.UnionTypeCode
+ )
+
+ switch dt.Mode() {
+ case flatbuf.UnionModeSparse:
+ mode = arrow.SparseMode
+ case flatbuf.UnionModeDense:
+ mode = arrow.DenseMode
+ }
+
+ typeIDLen := dt.TypeIdsLength()
+
+ if typeIDLen == 0 {
+ for i := range children {
+ typeIDs = append(typeIDs, int8(i))
+ }
+ } else {
+ for i := 0; i < typeIDLen; i++ {
+ id := dt.TypeIds(i)
+ code := arrow.UnionTypeCode(id)
+ if int32(code) != id {
+ return nil, errors.New("union type id out of bounds")
+ }
+ typeIDs = append(typeIDs, code)
+ }
+ }
+
+ return arrow.UnionOf(mode, children, typeIDs), nil
+
+ case flatbuf.TypeTime:
+ var dt flatbuf.Time
+ dt.Init(data.Bytes, data.Pos)
+ return timeFromFB(dt)
+
+ case flatbuf.TypeTimestamp:
+ var dt flatbuf.Timestamp
+ dt.Init(data.Bytes, data.Pos)
+ return timestampFromFB(dt)
+
+ case flatbuf.TypeDate:
+ var dt flatbuf.Date
+ dt.Init(data.Bytes, data.Pos)
+ return dateFromFB(dt)
+
+ case flatbuf.TypeInterval:
+ var dt flatbuf.Interval
+ dt.Init(data.Bytes, data.Pos)
+ return intervalFromFB(dt)
+
+ case flatbuf.TypeDuration:
+ var dt flatbuf.Duration
+ dt.Init(data.Bytes, data.Pos)
+ return durationFromFB(dt)
+
+ case flatbuf.TypeMap:
+ if len(children) != 1 {
+ return nil, fmt.Errorf("arrow/ipc: Map must have exactly 1 child field")
+ }
+
+ if children[0].Nullable || children[0].Type.ID() != arrow.STRUCT || len(children[0].Type.(*arrow.StructType).Fields()) != 2 {
+ return nil, fmt.Errorf("arrow/ipc: Map's key-item pairs must be non-nullable structs")
+ }
+
+ pairType := children[0].Type.(*arrow.StructType)
+ if pairType.Field(0).Nullable {
+ return nil, fmt.Errorf("arrow/ipc: Map's keys must be non-nullable")
+ }
+
+ var dt flatbuf.Map
+ dt.Init(data.Bytes, data.Pos)
+ ret := arrow.MapOf(pairType.Field(0).Type, pairType.Field(1).Type)
+ ret.SetItemNullable(pairType.Field(1).Nullable)
+ ret.KeysSorted = dt.KeysSorted()
+ return ret, nil
+
+ case flatbuf.TypeRunEndEncoded:
+ if len(children) != 2 {
+ return nil, fmt.Errorf("%w: arrow/ipc: RunEndEncoded must have exactly 2 child fields", arrow.ErrInvalid)
+ }
+ switch children[0].Type.ID() {
+ case arrow.INT16, arrow.INT32, arrow.INT64:
+ default:
+ return nil, fmt.Errorf("%w: arrow/ipc: run-end encoded run_ends field must be one of int16, int32, or int64 type", arrow.ErrInvalid)
+ }
+ return arrow.RunEndEncodedOf(children[0].Type, children[1].Type), nil
+
+ default:
+ panic(fmt.Errorf("arrow/ipc: type %v not implemented", flatbuf.EnumNamesType[typ]))
+ }
+}
+
+func intFromFB(data flatbuf.Int) (arrow.DataType, error) {
+ bw := data.BitWidth()
+ if bw > 64 {
+ return nil, fmt.Errorf("arrow/ipc: integers with more than 64 bits not implemented (bits=%d)", bw)
+ }
+ if bw < 8 {
+ return nil, fmt.Errorf("arrow/ipc: integers with less than 8 bits not implemented (bits=%d)", bw)
+ }
+
+ switch bw {
+ case 8:
+ if !data.IsSigned() {
+ return arrow.PrimitiveTypes.Uint8, nil
+ }
+ return arrow.PrimitiveTypes.Int8, nil
+
+ case 16:
+ if !data.IsSigned() {
+ return arrow.PrimitiveTypes.Uint16, nil
+ }
+ return arrow.PrimitiveTypes.Int16, nil
+
+ case 32:
+ if !data.IsSigned() {
+ return arrow.PrimitiveTypes.Uint32, nil
+ }
+ return arrow.PrimitiveTypes.Int32, nil
+
+ case 64:
+ if !data.IsSigned() {
+ return arrow.PrimitiveTypes.Uint64, nil
+ }
+ return arrow.PrimitiveTypes.Int64, nil
+ default:
+ return nil, fmt.Errorf("arrow/ipc: integers not in cstdint are not implemented")
+ }
+}
+
+func intToFB(b *flatbuffers.Builder, bw int32, isSigned bool) flatbuffers.UOffsetT {
+ flatbuf.IntStart(b)
+ flatbuf.IntAddBitWidth(b, bw)
+ flatbuf.IntAddIsSigned(b, isSigned)
+ return flatbuf.IntEnd(b)
+}
+
+func floatFromFB(data flatbuf.FloatingPoint) (arrow.DataType, error) {
+ switch p := data.Precision(); p {
+ case flatbuf.PrecisionHALF:
+ return arrow.FixedWidthTypes.Float16, nil
+ case flatbuf.PrecisionSINGLE:
+ return arrow.PrimitiveTypes.Float32, nil
+ case flatbuf.PrecisionDOUBLE:
+ return arrow.PrimitiveTypes.Float64, nil
+ default:
+ return nil, fmt.Errorf("arrow/ipc: floating point type with %d precision not implemented", p)
+ }
+}
+
+func floatToFB(b *flatbuffers.Builder, bw int32) flatbuffers.UOffsetT {
+ switch bw {
+ case 16:
+ flatbuf.FloatingPointStart(b)
+ flatbuf.FloatingPointAddPrecision(b, flatbuf.PrecisionHALF)
+ return flatbuf.FloatingPointEnd(b)
+ case 32:
+ flatbuf.FloatingPointStart(b)
+ flatbuf.FloatingPointAddPrecision(b, flatbuf.PrecisionSINGLE)
+ return flatbuf.FloatingPointEnd(b)
+ case 64:
+ flatbuf.FloatingPointStart(b)
+ flatbuf.FloatingPointAddPrecision(b, flatbuf.PrecisionDOUBLE)
+ return flatbuf.FloatingPointEnd(b)
+ default:
+ panic(fmt.Errorf("arrow/ipc: invalid floating point precision %d-bits", bw))
+ }
+}
+
+func decimalFromFB(data flatbuf.Decimal) (arrow.DataType, error) {
+ switch data.BitWidth() {
+ case 128:
+ return &arrow.Decimal128Type{Precision: data.Precision(), Scale: data.Scale()}, nil
+ case 256:
+ return &arrow.Decimal256Type{Precision: data.Precision(), Scale: data.Scale()}, nil
+ default:
+ return nil, fmt.Errorf("arrow/ipc: invalid decimal bitwidth: %d", data.BitWidth())
+ }
+}
+
+func timeFromFB(data flatbuf.Time) (arrow.DataType, error) {
+ bw := data.BitWidth()
+ unit := unitFromFB(data.Unit())
+
+ switch bw {
+ case 32:
+ switch unit {
+ case arrow.Millisecond:
+ return arrow.FixedWidthTypes.Time32ms, nil
+ case arrow.Second:
+ return arrow.FixedWidthTypes.Time32s, nil
+ default:
+ return nil, fmt.Errorf("arrow/ipc: Time32 type with %v unit not implemented", unit)
+ }
+ case 64:
+ switch unit {
+ case arrow.Nanosecond:
+ return arrow.FixedWidthTypes.Time64ns, nil
+ case arrow.Microsecond:
+ return arrow.FixedWidthTypes.Time64us, nil
+ default:
+ return nil, fmt.Errorf("arrow/ipc: Time64 type with %v unit not implemented", unit)
+ }
+ default:
+ return nil, fmt.Errorf("arrow/ipc: Time type with %d bitwidth not implemented", bw)
+ }
+}
+
+func timestampFromFB(data flatbuf.Timestamp) (arrow.DataType, error) {
+ unit := unitFromFB(data.Unit())
+ tz := string(data.Timezone())
+ return &arrow.TimestampType{Unit: unit, TimeZone: tz}, nil
+}
+
+func dateFromFB(data flatbuf.Date) (arrow.DataType, error) {
+ switch data.Unit() {
+ case flatbuf.DateUnitDAY:
+ return arrow.FixedWidthTypes.Date32, nil
+ case flatbuf.DateUnitMILLISECOND:
+ return arrow.FixedWidthTypes.Date64, nil
+ }
+ return nil, fmt.Errorf("arrow/ipc: Date type with %d unit not implemented", data.Unit())
+}
+
+func intervalFromFB(data flatbuf.Interval) (arrow.DataType, error) {
+ switch data.Unit() {
+ case flatbuf.IntervalUnitYEAR_MONTH:
+ return arrow.FixedWidthTypes.MonthInterval, nil
+ case flatbuf.IntervalUnitDAY_TIME:
+ return arrow.FixedWidthTypes.DayTimeInterval, nil
+ case flatbuf.IntervalUnitMONTH_DAY_NANO:
+ return arrow.FixedWidthTypes.MonthDayNanoInterval, nil
+ }
+ return nil, fmt.Errorf("arrow/ipc: Interval type with %d unit not implemented", data.Unit())
+}
+
+func durationFromFB(data flatbuf.Duration) (arrow.DataType, error) {
+ switch data.Unit() {
+ case flatbuf.TimeUnitSECOND:
+ return arrow.FixedWidthTypes.Duration_s, nil
+ case flatbuf.TimeUnitMILLISECOND:
+ return arrow.FixedWidthTypes.Duration_ms, nil
+ case flatbuf.TimeUnitMICROSECOND:
+ return arrow.FixedWidthTypes.Duration_us, nil
+ case flatbuf.TimeUnitNANOSECOND:
+ return arrow.FixedWidthTypes.Duration_ns, nil
+ }
+ return nil, fmt.Errorf("arrow/ipc: Duration type with %d unit not implemented", data.Unit())
+}
+
+type customMetadataer interface {
+ CustomMetadataLength() int
+ CustomMetadata(*flatbuf.KeyValue, int) bool
+}
+
+func metadataFromFB(md customMetadataer) (arrow.Metadata, error) {
+ var (
+ keys = make([]string, md.CustomMetadataLength())
+ vals = make([]string, md.CustomMetadataLength())
+ )
+
+ for i := range keys {
+ var kv flatbuf.KeyValue
+ if !md.CustomMetadata(&kv, i) {
+ return arrow.Metadata{}, fmt.Errorf("arrow/ipc: could not read key-value %d from flatbuffer", i)
+ }
+ keys[i] = string(kv.Key())
+ vals[i] = string(kv.Value())
+ }
+
+ return arrow.NewMetadata(keys, vals), nil
+}
+
+func metadataToFB(b *flatbuffers.Builder, meta arrow.Metadata, start startVecFunc) flatbuffers.UOffsetT {
+ if meta.Len() == 0 {
+ return 0
+ }
+
+ n := meta.Len()
+ kvs := make([]flatbuffers.UOffsetT, n)
+ for i := range kvs {
+ k := b.CreateString(meta.Keys()[i])
+ v := b.CreateString(meta.Values()[i])
+ flatbuf.KeyValueStart(b)
+ flatbuf.KeyValueAddKey(b, k)
+ flatbuf.KeyValueAddValue(b, v)
+ kvs[i] = flatbuf.KeyValueEnd(b)
+ }
+
+ start(b, n)
+ for i := n - 1; i >= 0; i-- {
+ b.PrependUOffsetT(kvs[i])
+ }
+ return b.EndVector(n)
+}
+
+func schemaFromFB(schema *flatbuf.Schema, memo *dictutils.Memo) (*arrow.Schema, error) {
+ var (
+ err error
+ fields = make([]arrow.Field, schema.FieldsLength())
+ pos = dictutils.NewFieldPos()
+ )
+
+ for i := range fields {
+ var field flatbuf.Field
+ if !schema.Fields(&field, i) {
+ return nil, fmt.Errorf("arrow/ipc: could not read field %d from schema", i)
+ }
+
+ fields[i], err = fieldFromFB(&field, pos.Child(int32(i)), memo)
+ if err != nil {
+ return nil, fmt.Errorf("arrow/ipc: could not convert field %d from flatbuf: %w", i, err)
+ }
+ }
+
+ md, err := metadataFromFB(schema)
+ if err != nil {
+ return nil, fmt.Errorf("arrow/ipc: could not convert schema metadata from flatbuf: %w", err)
+ }
+
+ return arrow.NewSchemaWithEndian(fields, &md, endian.Endianness(schema.Endianness())), nil
+}
+
+func schemaToFB(b *flatbuffers.Builder, schema *arrow.Schema, memo *dictutils.Mapper) flatbuffers.UOffsetT {
+ fields := make([]flatbuffers.UOffsetT, len(schema.Fields()))
+ pos := dictutils.NewFieldPos()
+ for i, field := range schema.Fields() {
+ fields[i] = fieldToFB(b, pos.Child(int32(i)), field, memo)
+ }
+
+ flatbuf.SchemaStartFieldsVector(b, len(fields))
+ for i := len(fields) - 1; i >= 0; i-- {
+ b.PrependUOffsetT(fields[i])
+ }
+ fieldsFB := b.EndVector(len(fields))
+
+ metaFB := metadataToFB(b, schema.Metadata(), flatbuf.SchemaStartCustomMetadataVector)
+
+ flatbuf.SchemaStart(b)
+ flatbuf.SchemaAddEndianness(b, flatbuf.Endianness(schema.Endianness()))
+ flatbuf.SchemaAddFields(b, fieldsFB)
+ flatbuf.SchemaAddCustomMetadata(b, metaFB)
+ offset := flatbuf.SchemaEnd(b)
+
+ return offset
+}
+
+// payloadFromSchema returns a slice of payloads corresponding to the given schema.
+// Callers of payloadFromSchema will need to call Release after use.
+func payloadFromSchema(schema *arrow.Schema, mem memory.Allocator, memo *dictutils.Mapper) payloads {
+ ps := make(payloads, 1)
+ ps[0].msg = MessageSchema
+ ps[0].meta = writeSchemaMessage(schema, mem, memo)
+
+ return ps
+}
+
+func writeFBBuilder(b *flatbuffers.Builder, mem memory.Allocator) *memory.Buffer {
+ raw := b.FinishedBytes()
+ buf := memory.NewResizableBuffer(mem)
+ buf.Resize(len(raw))
+ copy(buf.Bytes(), raw)
+ return buf
+}
+
+func writeMessageFB(b *flatbuffers.Builder, mem memory.Allocator, hdrType flatbuf.MessageHeader, hdr flatbuffers.UOffsetT, bodyLen int64) *memory.Buffer {
+
+ flatbuf.MessageStart(b)
+ flatbuf.MessageAddVersion(b, flatbuf.MetadataVersion(currentMetadataVersion))
+ flatbuf.MessageAddHeaderType(b, hdrType)
+ flatbuf.MessageAddHeader(b, hdr)
+ flatbuf.MessageAddBodyLength(b, bodyLen)
+ msg := flatbuf.MessageEnd(b)
+ b.Finish(msg)
+
+ return writeFBBuilder(b, mem)
+}
+
+func writeSchemaMessage(schema *arrow.Schema, mem memory.Allocator, dict *dictutils.Mapper) *memory.Buffer {
+ b := flatbuffers.NewBuilder(1024)
+ schemaFB := schemaToFB(b, schema, dict)
+ return writeMessageFB(b, mem, flatbuf.MessageHeaderSchema, schemaFB, 0)
+}
+
+func writeFileFooter(schema *arrow.Schema, dicts, recs []fileBlock, w io.Writer) error {
+ var (
+ b = flatbuffers.NewBuilder(1024)
+ memo dictutils.Mapper
+ )
+ memo.ImportSchema(schema)
+
+ schemaFB := schemaToFB(b, schema, &memo)
+ dictsFB := fileBlocksToFB(b, dicts, flatbuf.FooterStartDictionariesVector)
+ recsFB := fileBlocksToFB(b, recs, flatbuf.FooterStartRecordBatchesVector)
+
+ flatbuf.FooterStart(b)
+ flatbuf.FooterAddVersion(b, flatbuf.MetadataVersion(currentMetadataVersion))
+ flatbuf.FooterAddSchema(b, schemaFB)
+ flatbuf.FooterAddDictionaries(b, dictsFB)
+ flatbuf.FooterAddRecordBatches(b, recsFB)
+ footer := flatbuf.FooterEnd(b)
+
+ b.Finish(footer)
+
+ _, err := w.Write(b.FinishedBytes())
+ return err
+}
+
+func writeRecordMessage(mem memory.Allocator, size, bodyLength int64, fields []fieldMetadata, meta []bufferMetadata, codec flatbuf.CompressionType) *memory.Buffer {
+ b := flatbuffers.NewBuilder(0)
+ recFB := recordToFB(b, size, bodyLength, fields, meta, codec)
+ return writeMessageFB(b, mem, flatbuf.MessageHeaderRecordBatch, recFB, bodyLength)
+}
+
+func writeDictionaryMessage(mem memory.Allocator, id int64, isDelta bool, size, bodyLength int64, fields []fieldMetadata, meta []bufferMetadata, codec flatbuf.CompressionType) *memory.Buffer {
+ b := flatbuffers.NewBuilder(0)
+ recFB := recordToFB(b, size, bodyLength, fields, meta, codec)
+
+ flatbuf.DictionaryBatchStart(b)
+ flatbuf.DictionaryBatchAddId(b, id)
+ flatbuf.DictionaryBatchAddData(b, recFB)
+ flatbuf.DictionaryBatchAddIsDelta(b, isDelta)
+ dictFB := flatbuf.DictionaryBatchEnd(b)
+ return writeMessageFB(b, mem, flatbuf.MessageHeaderDictionaryBatch, dictFB, bodyLength)
+}
+
+func recordToFB(b *flatbuffers.Builder, size, bodyLength int64, fields []fieldMetadata, meta []bufferMetadata, codec flatbuf.CompressionType) flatbuffers.UOffsetT {
+ fieldsFB := writeFieldNodes(b, fields, flatbuf.RecordBatchStartNodesVector)
+ metaFB := writeBuffers(b, meta, flatbuf.RecordBatchStartBuffersVector)
+ var bodyCompressFB flatbuffers.UOffsetT
+ if codec != -1 {
+ bodyCompressFB = writeBodyCompression(b, codec)
+ }
+
+ flatbuf.RecordBatchStart(b)
+ flatbuf.RecordBatchAddLength(b, size)
+ flatbuf.RecordBatchAddNodes(b, fieldsFB)
+ flatbuf.RecordBatchAddBuffers(b, metaFB)
+ if codec != -1 {
+ flatbuf.RecordBatchAddCompression(b, bodyCompressFB)
+ }
+
+ return flatbuf.RecordBatchEnd(b)
+}
+
+func writeFieldNodes(b *flatbuffers.Builder, fields []fieldMetadata, start startVecFunc) flatbuffers.UOffsetT {
+
+ start(b, len(fields))
+ for i := len(fields) - 1; i >= 0; i-- {
+ field := fields[i]
+ if field.Offset != 0 {
+ panic(fmt.Errorf("arrow/ipc: field metadata for IPC must have offset 0"))
+ }
+ flatbuf.CreateFieldNode(b, field.Len, field.Nulls)
+ }
+
+ return b.EndVector(len(fields))
+}
+
+func writeBuffers(b *flatbuffers.Builder, buffers []bufferMetadata, start startVecFunc) flatbuffers.UOffsetT {
+ start(b, len(buffers))
+ for i := len(buffers) - 1; i >= 0; i-- {
+ buffer := buffers[i]
+ flatbuf.CreateBuffer(b, buffer.Offset, buffer.Len)
+ }
+ return b.EndVector(len(buffers))
+}
+
+func writeBodyCompression(b *flatbuffers.Builder, codec flatbuf.CompressionType) flatbuffers.UOffsetT {
+ flatbuf.BodyCompressionStart(b)
+ flatbuf.BodyCompressionAddCodec(b, codec)
+ flatbuf.BodyCompressionAddMethod(b, flatbuf.BodyCompressionMethodBUFFER)
+ return flatbuf.BodyCompressionEnd(b)
+}
+
+func writeMessage(msg *memory.Buffer, alignment int32, w io.Writer) (int, error) {
+ var (
+ n int
+ err error
+ )
+
+ // ARROW-3212: we do not make any assumption on whether the output stream is aligned or not.
+ paddedMsgLen := int32(msg.Len()) + 8
+ remainder := paddedMsgLen % alignment
+ if remainder != 0 {
+ paddedMsgLen += alignment - remainder
+ }
+
+ tmp := make([]byte, 4)
+
+ // write continuation indicator, to address 8-byte alignment requirement from FlatBuffers.
+ binary.LittleEndian.PutUint32(tmp, kIPCContToken)
+ _, err = w.Write(tmp)
+ if err != nil {
+ return 0, fmt.Errorf("arrow/ipc: could not write continuation bit indicator: %w", err)
+ }
+
+ // the returned message size includes the length prefix, the flatbuffer, + padding
+ n = int(paddedMsgLen)
+
+ // write the flatbuffer size prefix, including padding
+ sizeFB := paddedMsgLen - 8
+ binary.LittleEndian.PutUint32(tmp, uint32(sizeFB))
+ _, err = w.Write(tmp)
+ if err != nil {
+ return n, fmt.Errorf("arrow/ipc: could not write message flatbuffer size prefix: %w", err)
+ }
+
+ // write the flatbuffer
+ _, err = w.Write(msg.Bytes())
+ if err != nil {
+ return n, fmt.Errorf("arrow/ipc: could not write message flatbuffer: %w", err)
+ }
+
+ // write any padding
+ padding := paddedMsgLen - int32(msg.Len()) - 8
+ if padding > 0 {
+ _, err = w.Write(paddingBytes[:padding])
+ if err != nil {
+ return n, fmt.Errorf("arrow/ipc: could not write message padding bytes: %w", err)
+ }
+ }
+
+ return n, err
+}
diff --git a/vendor/github.com/apache/arrow/go/v14/arrow/ipc/reader.go b/vendor/github.com/apache/arrow/go/v14/arrow/ipc/reader.go
new file mode 100644
index 000000000..1f684c1f6
--- /dev/null
+++ b/vendor/github.com/apache/arrow/go/v14/arrow/ipc/reader.go
@@ -0,0 +1,285 @@
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements. See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership. The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License. You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package ipc
+
+import (
+ "bytes"
+ "errors"
+ "fmt"
+ "io"
+ "sync/atomic"
+
+ "github.com/apache/arrow/go/v14/arrow"
+ "github.com/apache/arrow/go/v14/arrow/array"
+ "github.com/apache/arrow/go/v14/arrow/endian"
+ "github.com/apache/arrow/go/v14/arrow/internal/debug"
+ "github.com/apache/arrow/go/v14/arrow/internal/dictutils"
+ "github.com/apache/arrow/go/v14/arrow/internal/flatbuf"
+ "github.com/apache/arrow/go/v14/arrow/memory"
+)
+
+// Reader reads records from an io.Reader.
+// Reader expects a schema (plus any dictionaries) as the first messages
+// in the stream, followed by records.
+type Reader struct {
+ r MessageReader
+ schema *arrow.Schema
+
+ refCount int64
+ rec arrow.Record
+ err error
+
+ // types dictTypeMap
+ memo dictutils.Memo
+ readInitialDicts bool
+ done bool
+ swapEndianness bool
+ ensureNativeEndian bool
+ expectedSchema *arrow.Schema
+
+ mem memory.Allocator
+}
+
+// NewReaderFromMessageReader allows constructing a new reader object with the
+// provided MessageReader allowing injection of reading messages other than
+// by simple streaming bytes such as Arrow Flight which receives a protobuf message
+func NewReaderFromMessageReader(r MessageReader, opts ...Option) (reader *Reader, err error) {
+ defer func() {
+ if pErr := recover(); pErr != nil {
+ err = fmt.Errorf("arrow/ipc: unknown error while reading: %v", pErr)
+ }
+ }()
+ cfg := newConfig()
+ for _, opt := range opts {
+ opt(cfg)
+ }
+
+ rr := &Reader{
+ r: r,
+ refCount: 1,
+ // types: make(dictTypeMap),
+ memo: dictutils.NewMemo(),
+ mem: cfg.alloc,
+ ensureNativeEndian: cfg.ensureNativeEndian,
+ expectedSchema: cfg.schema,
+ }
+
+ if !cfg.noAutoSchema {
+ if err := rr.readSchema(cfg.schema); err != nil {
+ return nil, err
+ }
+ }
+
+ return rr, nil
+}
+
+// NewReader returns a reader that reads records from an input stream.
+func NewReader(r io.Reader, opts ...Option) (*Reader, error) {
+ return NewReaderFromMessageReader(NewMessageReader(r, opts...), opts...)
+}
+
+// Err returns the last error encountered during the iteration over the
+// underlying stream.
+func (r *Reader) Err() error { return r.err }
+
+func (r *Reader) Schema() *arrow.Schema {
+ if r.schema == nil {
+ if err := r.readSchema(r.expectedSchema); err != nil {
+ r.err = fmt.Errorf("arrow/ipc: could not read schema from stream: %w", err)
+ r.done = true
+ }
+ }
+ return r.schema
+}
+
+func (r *Reader) readSchema(schema *arrow.Schema) error {
+ msg, err := r.r.Message()
+ if err != nil {
+ return fmt.Errorf("arrow/ipc: could not read message schema: %w", err)
+ }
+
+ if msg.Type() != MessageSchema {
+ return fmt.Errorf("arrow/ipc: invalid message type (got=%v, want=%v)", msg.Type(), MessageSchema)
+ }
+
+ // FIXME(sbinet) refactor msg-header handling.
+ var schemaFB flatbuf.Schema
+ initFB(&schemaFB, msg.msg.Header)
+
+ r.schema, err = schemaFromFB(&schemaFB, &r.memo)
+ if err != nil {
+ return fmt.Errorf("arrow/ipc: could not decode schema from message schema: %w", err)
+ }
+
+ // check the provided schema match the one read from stream.
+ if schema != nil && !schema.Equal(r.schema) {
+ return errInconsistentSchema
+ }
+
+ if r.ensureNativeEndian && !r.schema.IsNativeEndian() {
+ r.swapEndianness = true
+ r.schema = r.schema.WithEndianness(endian.NativeEndian)
+ }
+
+ return nil
+}
+
+// Retain increases the reference count by 1.
+// Retain may be called simultaneously from multiple goroutines.
+func (r *Reader) Retain() {
+ atomic.AddInt64(&r.refCount, 1)
+}
+
+// Release decreases the reference count by 1.
+// When the reference count goes to zero, the memory is freed.
+// Release may be called simultaneously from multiple goroutines.
+func (r *Reader) Release() {
+ debug.Assert(atomic.LoadInt64(&r.refCount) > 0, "too many releases")
+
+ if atomic.AddInt64(&r.refCount, -1) == 0 {
+ if r.rec != nil {
+ r.rec.Release()
+ r.rec = nil
+ }
+ if r.r != nil {
+ r.r.Release()
+ r.r = nil
+ }
+ r.memo.Clear()
+ }
+}
+
+// Next returns whether a Record could be extracted from the underlying stream.
+func (r *Reader) Next() bool {
+ if r.rec != nil {
+ r.rec.Release()
+ r.rec = nil
+ }
+
+ if r.err != nil || r.done {
+ return false
+ }
+
+ return r.next()
+}
+
+func (r *Reader) getInitialDicts() bool {
+ var msg *Message
+ // we have to get all dictionaries before reconstructing the first
+ // record. subsequent deltas and replacements modify the memo
+ numDicts := r.memo.Mapper.NumDicts()
+ // there should be numDicts dictionary messages
+ for i := 0; i < numDicts; i++ {
+ msg, r.err = r.r.Message()
+ if r.err != nil {
+ r.done = true
+ if r.err == io.EOF {
+ if i == 0 {
+ r.err = nil
+ } else {
+ r.err = fmt.Errorf("arrow/ipc: IPC stream ended without reading the expected (%d) dictionaries", numDicts)
+ }
+ }
+ return false
+ }
+
+ if msg.Type() != MessageDictionaryBatch {
+ r.err = fmt.Errorf("arrow/ipc: IPC stream did not have the expected (%d) dictionaries at the start of the stream", numDicts)
+ }
+ if _, err := readDictionary(&r.memo, msg.meta, bytes.NewReader(msg.body.Bytes()), r.swapEndianness, r.mem); err != nil {
+ r.done = true
+ r.err = err
+ return false
+ }
+ }
+ r.readInitialDicts = true
+ return true
+}
+
+func (r *Reader) next() bool {
+ defer func() {
+ if pErr := recover(); pErr != nil {
+ r.err = fmt.Errorf("arrow/ipc: unknown error while reading: %v", pErr)
+ }
+ }()
+ if r.schema == nil {
+ if err := r.readSchema(r.expectedSchema); err != nil {
+ r.err = fmt.Errorf("arrow/ipc: could not read schema from stream: %w", err)
+ r.done = true
+ return false
+ }
+ }
+
+ if !r.readInitialDicts && !r.getInitialDicts() {
+ return false
+ }
+
+ var msg *Message
+ msg, r.err = r.r.Message()
+
+ for msg != nil && msg.Type() == MessageDictionaryBatch {
+ if _, r.err = readDictionary(&r.memo, msg.meta, bytes.NewReader(msg.body.Bytes()), r.swapEndianness, r.mem); r.err != nil {
+ r.done = true
+ return false
+ }
+ msg, r.err = r.r.Message()
+ }
+ if r.err != nil {
+ r.done = true
+ if errors.Is(r.err, io.EOF) {
+ r.err = nil
+ }
+ return false
+ }
+
+ if got, want := msg.Type(), MessageRecordBatch; got != want {
+ r.err = fmt.Errorf("arrow/ipc: invalid message type (got=%v, want=%v", got, want)
+ return false
+ }
+
+ r.rec = newRecord(r.schema, &r.memo, msg.meta, bytes.NewReader(msg.body.Bytes()), r.swapEndianness, r.mem)
+ return true
+}
+
+// Record returns the current record that has been extracted from the
+// underlying stream.
+// It is valid until the next call to Next.
+func (r *Reader) Record() arrow.Record {
+ return r.rec
+}
+
+// Read reads the current record from the underlying stream and an error, if any.
+// When the Reader reaches the end of the underlying stream, it returns (nil, io.EOF).
+func (r *Reader) Read() (arrow.Record, error) {
+ if r.rec != nil {
+ r.rec.Release()
+ r.rec = nil
+ }
+
+ if !r.next() {
+ if r.done && r.err == nil {
+ return nil, io.EOF
+ }
+ return nil, r.err
+ }
+
+ return r.rec, nil
+}
+
+var (
+ _ array.RecordReader = (*Reader)(nil)
+)
diff --git a/vendor/github.com/apache/arrow/go/v14/arrow/ipc/writer.go b/vendor/github.com/apache/arrow/go/v14/arrow/ipc/writer.go
new file mode 100644
index 000000000..a97f47ef4
--- /dev/null
+++ b/vendor/github.com/apache/arrow/go/v14/arrow/ipc/writer.go
@@ -0,0 +1,1004 @@
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements. See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership. The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License. You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package ipc
+
+import (
+ "context"
+ "encoding/binary"
+ "errors"
+ "fmt"
+ "io"
+ "math"
+ "sync"
+ "unsafe"
+
+ "github.com/apache/arrow/go/v14/arrow"
+ "github.com/apache/arrow/go/v14/arrow/array"
+ "github.com/apache/arrow/go/v14/arrow/bitutil"
+ "github.com/apache/arrow/go/v14/arrow/internal"
+ "github.com/apache/arrow/go/v14/arrow/internal/debug"
+ "github.com/apache/arrow/go/v14/arrow/internal/dictutils"
+ "github.com/apache/arrow/go/v14/arrow/internal/flatbuf"
+ "github.com/apache/arrow/go/v14/arrow/memory"
+)
+
+type swriter struct {
+ w io.Writer
+ pos int64
+}
+
+func (w *swriter) Start() error { return nil }
+func (w *swriter) Close() error {
+ _, err := w.Write(kEOS[:])
+ return err
+}
+
+func (w *swriter) WritePayload(p Payload) error {
+ _, err := writeIPCPayload(w, p)
+ if err != nil {
+ return err
+ }
+ return nil
+}
+
+func (w *swriter) Write(p []byte) (int, error) {
+ n, err := w.w.Write(p)
+ w.pos += int64(n)
+ return n, err
+}
+
+func hasNestedDict(data arrow.ArrayData) bool {
+ if data.DataType().ID() == arrow.DICTIONARY {
+ return true
+ }
+ for _, c := range data.Children() {
+ if hasNestedDict(c) {
+ return true
+ }
+ }
+ return false
+}
+
+// Writer is an Arrow stream writer.
+type Writer struct {
+ w io.Writer
+
+ mem memory.Allocator
+ pw PayloadWriter
+
+ started bool
+ schema *arrow.Schema
+ mapper dictutils.Mapper
+ codec flatbuf.CompressionType
+ compressNP int
+ minSpaceSavings *float64
+
+ // map of the last written dictionaries by id
+ // so we can avoid writing the same dictionary over and over
+ lastWrittenDicts map[int64]arrow.Array
+ emitDictDeltas bool
+}
+
+// NewWriterWithPayloadWriter constructs a writer with the provided payload writer
+// instead of the default stream payload writer. This makes the writer more
+// reusable such as by the Arrow Flight writer.
+func NewWriterWithPayloadWriter(pw PayloadWriter, opts ...Option) *Writer {
+ cfg := newConfig(opts...)
+ return &Writer{
+ mem: cfg.alloc,
+ pw: pw,
+ schema: cfg.schema,
+ codec: cfg.codec,
+ compressNP: cfg.compressNP,
+ minSpaceSavings: cfg.minSpaceSavings,
+ emitDictDeltas: cfg.emitDictDeltas,
+ }
+}
+
+// NewWriter returns a writer that writes records to the provided output stream.
+func NewWriter(w io.Writer, opts ...Option) *Writer {
+ cfg := newConfig(opts...)
+ return &Writer{
+ w: w,
+ mem: cfg.alloc,
+ pw: &swriter{w: w},
+ schema: cfg.schema,
+ codec: cfg.codec,
+ emitDictDeltas: cfg.emitDictDeltas,
+ }
+}
+
+func (w *Writer) Close() error {
+ if !w.started {
+ err := w.start()
+ if err != nil {
+ return err
+ }
+ }
+
+ if w.pw == nil {
+ return nil
+ }
+
+ err := w.pw.Close()
+ if err != nil {
+ return fmt.Errorf("arrow/ipc: could not close payload writer: %w", err)
+ }
+ w.pw = nil
+
+ for _, d := range w.lastWrittenDicts {
+ d.Release()
+ }
+
+ return nil
+}
+
+func (w *Writer) Write(rec arrow.Record) (err error) {
+ defer func() {
+ if pErr := recover(); pErr != nil {
+ err = fmt.Errorf("arrow/ipc: unknown error while writing: %v", pErr)
+ }
+ }()
+
+ if !w.started {
+ err := w.start()
+ if err != nil {
+ return err
+ }
+ }
+
+ schema := rec.Schema()
+ if schema == nil || !schema.Equal(w.schema) {
+ return errInconsistentSchema
+ }
+
+ const allow64b = true
+ var (
+ data = Payload{msg: MessageRecordBatch}
+ enc = newRecordEncoder(w.mem, 0, kMaxNestingDepth, allow64b, w.codec, w.compressNP, w.minSpaceSavings)
+ )
+ defer data.Release()
+
+ err = writeDictionaryPayloads(w.mem, rec, false, w.emitDictDeltas, &w.mapper, w.lastWrittenDicts, w.pw, enc)
+ if err != nil {
+ return fmt.Errorf("arrow/ipc: failure writing dictionary batches: %w", err)
+ }
+
+ enc.reset()
+ if err := enc.Encode(&data, rec); err != nil {
+ return fmt.Errorf("arrow/ipc: could not encode record to payload: %w", err)
+ }
+
+ return w.pw.WritePayload(data)
+}
+
+func writeDictionaryPayloads(mem memory.Allocator, batch arrow.Record, isFileFormat bool, emitDictDeltas bool, mapper *dictutils.Mapper, lastWrittenDicts map[int64]arrow.Array, pw PayloadWriter, encoder *recordEncoder) error {
+ dictionaries, err := dictutils.CollectDictionaries(batch, mapper)
+ if err != nil {
+ return err
+ }
+ defer func() {
+ for _, d := range dictionaries {
+ d.Dict.Release()
+ }
+ }()
+
+ eqopt := array.WithNaNsEqual(true)
+ for _, pair := range dictionaries {
+ encoder.reset()
+ var (
+ deltaStart int64
+ enc = dictEncoder{encoder}
+ )
+ lastDict, exists := lastWrittenDicts[pair.ID]
+ if exists {
+ if lastDict.Data() == pair.Dict.Data() {
+ continue
+ }
+ newLen, lastLen := pair.Dict.Len(), lastDict.Len()
+ if lastLen == newLen && array.ApproxEqual(lastDict, pair.Dict, eqopt) {
+ // same dictionary by value
+ // might cost CPU, but required for IPC file format
+ continue
+ }
+ if isFileFormat {
+ return errors.New("arrow/ipc: Dictionary replacement detected when writing IPC file format. Arrow IPC File only supports single dictionary per field")
+ }
+
+ if newLen > lastLen &&
+ emitDictDeltas &&
+ !hasNestedDict(pair.Dict.Data()) &&
+ (array.SliceApproxEqual(lastDict, 0, int64(lastLen), pair.Dict, 0, int64(lastLen), eqopt)) {
+ deltaStart = int64(lastLen)
+ }
+ }
+
+ var data = Payload{msg: MessageDictionaryBatch}
+ defer data.Release()
+
+ dict := pair.Dict
+ if deltaStart > 0 {
+ dict = array.NewSlice(dict, deltaStart, int64(dict.Len()))
+ defer dict.Release()
+ }
+ if err := enc.Encode(&data, pair.ID, deltaStart > 0, dict); err != nil {
+ return err
+ }
+
+ if err := pw.WritePayload(data); err != nil {
+ return err
+ }
+
+ lastWrittenDicts[pair.ID] = pair.Dict
+ if lastDict != nil {
+ lastDict.Release()
+ }
+ pair.Dict.Retain()
+ }
+ return nil
+}
+
+func (w *Writer) start() error {
+ w.started = true
+
+ w.mapper.ImportSchema(w.schema)
+ w.lastWrittenDicts = make(map[int64]arrow.Array)
+
+ // write out schema payloads
+ ps := payloadFromSchema(w.schema, w.mem, &w.mapper)
+ defer ps.Release()
+
+ for _, data := range ps {
+ err := w.pw.WritePayload(data)
+ if err != nil {
+ return err
+ }
+ }
+
+ return nil
+}
+
+type dictEncoder struct {
+ *recordEncoder
+}
+
+func (d *dictEncoder) encodeMetadata(p *Payload, isDelta bool, id, nrows int64) error {
+ p.meta = writeDictionaryMessage(d.mem, id, isDelta, nrows, p.size, d.fields, d.meta, d.codec)
+ return nil
+}
+
+func (d *dictEncoder) Encode(p *Payload, id int64, isDelta bool, dict arrow.Array) error {
+ d.start = 0
+ defer func() {
+ d.start = 0
+ }()
+
+ schema := arrow.NewSchema([]arrow.Field{{Name: "dictionary", Type: dict.DataType(), Nullable: true}}, nil)
+ batch := array.NewRecord(schema, []arrow.Array{dict}, int64(dict.Len()))
+ defer batch.Release()
+ if err := d.encode(p, batch); err != nil {
+ return err
+ }
+
+ return d.encodeMetadata(p, isDelta, id, batch.NumRows())
+}
+
+type recordEncoder struct {
+ mem memory.Allocator
+
+ fields []fieldMetadata
+ meta []bufferMetadata
+
+ depth int64
+ start int64
+ allow64b bool
+ codec flatbuf.CompressionType
+ compressNP int
+ minSpaceSavings *float64
+}
+
+func newRecordEncoder(mem memory.Allocator, startOffset, maxDepth int64, allow64b bool, codec flatbuf.CompressionType, compressNP int, minSpaceSavings *float64) *recordEncoder {
+ return &recordEncoder{
+ mem: mem,
+ start: startOffset,
+ depth: maxDepth,
+ allow64b: allow64b,
+ codec: codec,
+ compressNP: compressNP,
+ minSpaceSavings: minSpaceSavings,
+ }
+}
+
+func (w *recordEncoder) shouldCompress(uncompressed, compressed int) bool {
+ debug.Assert(uncompressed > 0, "uncompressed size is 0")
+ if w.minSpaceSavings == nil {
+ return true
+ }
+
+ savings := 1.0 - float64(compressed)/float64(uncompressed)
+ return savings >= *w.minSpaceSavings
+}
+
+func (w *recordEncoder) reset() {
+ w.start = 0
+ w.fields = make([]fieldMetadata, 0)
+}
+
+func (w *recordEncoder) compressBodyBuffers(p *Payload) error {
+ compress := func(idx int, codec compressor) error {
+ if p.body[idx] == nil || p.body[idx].Len() == 0 {
+ return nil
+ }
+
+ buf := memory.NewResizableBuffer(w.mem)
+ buf.Reserve(codec.MaxCompressedLen(p.body[idx].Len()) + arrow.Int64SizeBytes)
+
+ binary.LittleEndian.PutUint64(buf.Buf(), uint64(p.body[idx].Len()))
+ bw := &bufferWriter{buf: buf, pos: arrow.Int64SizeBytes}
+ codec.Reset(bw)
+
+ n, err := codec.Write(p.body[idx].Bytes())
+ if err != nil {
+ return err
+ }
+ if err := codec.Close(); err != nil {
+ return err
+ }
+
+ finalLen := bw.pos
+ compressedLen := bw.pos - arrow.Int64SizeBytes
+ if !w.shouldCompress(n, compressedLen) {
+ n = copy(buf.Buf()[arrow.Int64SizeBytes:], p.body[idx].Bytes())
+ // size of -1 indicates to the reader that the body
+ // doesn't need to be decompressed
+ var noprefix int64 = -1
+ binary.LittleEndian.PutUint64(buf.Buf(), uint64(noprefix))
+ finalLen = n + arrow.Int64SizeBytes
+ }
+ bw.buf.Resize(finalLen)
+ p.body[idx].Release()
+ p.body[idx] = buf
+ return nil
+ }
+
+ if w.compressNP <= 1 {
+ codec := getCompressor(w.codec)
+ for idx := range p.body {
+ if err := compress(idx, codec); err != nil {
+ return err
+ }
+ }
+ return nil
+ }
+
+ var (
+ wg sync.WaitGroup
+ ch = make(chan int)
+ errch = make(chan error)
+ ctx, cancel = context.WithCancel(context.Background())
+ )
+ defer cancel()
+
+ for i := 0; i < w.compressNP; i++ {
+ wg.Add(1)
+ go func() {
+ defer wg.Done()
+ codec := getCompressor(w.codec)
+ for {
+ select {
+ case idx, ok := <-ch:
+ if !ok {
+ // we're done, channel is closed!
+ return
+ }
+
+ if err := compress(idx, codec); err != nil {
+ errch <- err
+ cancel()
+ return
+ }
+ case <-ctx.Done():
+ // cancelled, return early
+ return
+ }
+ }
+ }()
+ }
+
+ for idx := range p.body {
+ ch <- idx
+ }
+
+ close(ch)
+ wg.Wait()
+ close(errch)
+
+ return <-errch
+}
+
+func (w *recordEncoder) encode(p *Payload, rec arrow.Record) error {
+ // perform depth-first traversal of the row-batch
+ for i, col := range rec.Columns() {
+ err := w.visit(p, col)
+ if err != nil {
+ return fmt.Errorf("arrow/ipc: could not encode column %d (%q): %w", i, rec.ColumnName(i), err)
+ }
+ }
+
+ if w.codec != -1 {
+ if w.minSpaceSavings != nil {
+ pct := *w.minSpaceSavings
+ if pct < 0 || pct > 1 {
+ p.Release()
+ return fmt.Errorf("%w: minSpaceSavings not in range [0,1]. Provided %.05f",
+ arrow.ErrInvalid, pct)
+ }
+ }
+ w.compressBodyBuffers(p)
+ }
+
+ // position for the start of a buffer relative to the passed frame of reference.
+ // may be 0 or some other position in an address space.
+ offset := w.start
+ w.meta = make([]bufferMetadata, len(p.body))
+
+ // construct the metadata for the record batch header
+ for i, buf := range p.body {
+ var (
+ size int64
+ padding int64
+ )
+ // the buffer might be null if we are handling zero row lengths.
+ if buf != nil {
+ size = int64(buf.Len())
+ padding = bitutil.CeilByte64(size) - size
+ }
+ w.meta[i] = bufferMetadata{
+ Offset: offset,
+ // even though we add padding, we need the Len to be correct
+ // so that decompressing works properly.
+ Len: size,
+ }
+ offset += size + padding
+ }
+
+ p.size = offset - w.start
+ if !bitutil.IsMultipleOf8(p.size) {
+ panic("not aligned")
+ }
+
+ return nil
+}
+
+func (w *recordEncoder) visit(p *Payload, arr arrow.Array) error {
+ if w.depth <= 0 {
+ return errMaxRecursion
+ }
+
+ if !w.allow64b && arr.Len() > math.MaxInt32 {
+ return errBigArray
+ }
+
+ if arr.DataType().ID() == arrow.EXTENSION {
+ arr := arr.(array.ExtensionArray)
+ err := w.visit(p, arr.Storage())
+ if err != nil {
+ return fmt.Errorf("failed visiting storage of for array %T: %w", arr, err)
+ }
+ return nil
+ }
+
+ if arr.DataType().ID() == arrow.DICTIONARY {
+ arr := arr.(*array.Dictionary)
+ return w.visit(p, arr.Indices())
+ }
+
+ // add all common elements
+ w.fields = append(w.fields, fieldMetadata{
+ Len: int64(arr.Len()),
+ Nulls: int64(arr.NullN()),
+ Offset: 0,
+ })
+
+ if arr.DataType().ID() == arrow.NULL {
+ return nil
+ }
+
+ if internal.HasValidityBitmap(arr.DataType().ID(), flatbuf.MetadataVersion(currentMetadataVersion)) {
+ switch arr.NullN() {
+ case 0:
+ // there are no null values, drop the null bitmap
+ p.body = append(p.body, nil)
+ default:
+ data := arr.Data()
+ var bitmap *memory.Buffer
+ if data.NullN() == data.Len() {
+ // every value is null, just use a new zero-initialized bitmap to avoid the expense of copying
+ bitmap = memory.NewResizableBuffer(w.mem)
+ minLength := paddedLength(bitutil.BytesForBits(int64(data.Len())), kArrowAlignment)
+ bitmap.Resize(int(minLength))
+ } else {
+ // otherwise truncate and copy the bits
+ bitmap = newTruncatedBitmap(w.mem, int64(data.Offset()), int64(data.Len()), data.Buffers()[0])
+ }
+ p.body = append(p.body, bitmap)
+ }
+ }
+
+ switch dtype := arr.DataType().(type) {
+ case *arrow.NullType:
+ // ok. NullArrays are completely empty.
+
+ case *arrow.BooleanType:
+ var (
+ data = arr.Data()
+ bitm *memory.Buffer
+ )
+
+ if data.Len() != 0 {
+ bitm = newTruncatedBitmap(w.mem, int64(data.Offset()), int64(data.Len()), data.Buffers()[1])
+ }
+ p.body = append(p.body, bitm)
+
+ case arrow.FixedWidthDataType:
+ data := arr.Data()
+ values := data.Buffers()[1]
+ arrLen := int64(arr.Len())
+ typeWidth := int64(dtype.BitWidth() / 8)
+ minLength := paddedLength(arrLen*typeWidth, kArrowAlignment)
+
+ switch {
+ case needTruncate(int64(data.Offset()), values, minLength):
+ // non-zero offset: slice the buffer
+ offset := int64(data.Offset()) * typeWidth
+ // send padding if available
+ len := minI64(bitutil.CeilByte64(arrLen*typeWidth), int64(values.Len())-offset)
+ values = memory.NewBufferBytes(values.Bytes()[offset : offset+len])
+ default:
+ if values != nil {
+ values.Retain()
+ }
+ }
+ p.body = append(p.body, values)
+
+ case *arrow.BinaryType, *arrow.LargeBinaryType, *arrow.StringType, *arrow.LargeStringType:
+ arr := arr.(array.BinaryLike)
+ voffsets := w.getZeroBasedValueOffsets(arr)
+ data := arr.Data()
+ values := data.Buffers()[2]
+
+ var totalDataBytes int64
+ if voffsets != nil {
+ totalDataBytes = int64(len(arr.ValueBytes()))
+ }
+
+ switch {
+ case needTruncate(int64(data.Offset()), values, totalDataBytes):
+ // slice data buffer to include the range we need now.
+ var (
+ beg = arr.ValueOffset64(0)
+ len = minI64(paddedLength(totalDataBytes, kArrowAlignment), int64(totalDataBytes))
+ )
+ values = memory.NewBufferBytes(data.Buffers()[2].Bytes()[beg : beg+len])
+ default:
+ if values != nil {
+ values.Retain()
+ }
+ }
+ p.body = append(p.body, voffsets)
+ p.body = append(p.body, values)
+
+ case *arrow.StructType:
+ w.depth--
+ arr := arr.(*array.Struct)
+ for i := 0; i < arr.NumField(); i++ {
+ err := w.visit(p, arr.Field(i))
+ if err != nil {
+ return fmt.Errorf("could not visit field %d of struct-array: %w", i, err)
+ }
+ }
+ w.depth++
+
+ case *arrow.SparseUnionType:
+ offset, length := arr.Data().Offset(), arr.Len()
+ arr := arr.(*array.SparseUnion)
+ typeCodes := getTruncatedBuffer(int64(offset), int64(length), int32(unsafe.Sizeof(arrow.UnionTypeCode(0))), arr.TypeCodes())
+ p.body = append(p.body, typeCodes)
+
+ w.depth--
+ for i := 0; i < arr.NumFields(); i++ {
+ err := w.visit(p, arr.Field(i))
+ if err != nil {
+ return fmt.Errorf("could not visit field %d of sparse union array: %w", i, err)
+ }
+ }
+ w.depth++
+ case *arrow.DenseUnionType:
+ offset, length := arr.Data().Offset(), arr.Len()
+ arr := arr.(*array.DenseUnion)
+ typeCodes := getTruncatedBuffer(int64(offset), int64(length), int32(unsafe.Sizeof(arrow.UnionTypeCode(0))), arr.TypeCodes())
+ p.body = append(p.body, typeCodes)
+
+ w.depth--
+ dt := arr.UnionType()
+
+ // union type codes are not necessarily 0-indexed
+ maxCode := dt.MaxTypeCode()
+
+ // allocate an array of child offsets. Set all to -1 to indicate we
+ // haven't observed a first occurrence of a particular child yet
+ offsets := make([]int32, maxCode+1)
+ lengths := make([]int32, maxCode+1)
+ offsets[0], lengths[0] = -1, 0
+ for i := 1; i < len(offsets); i *= 2 {
+ copy(offsets[i:], offsets[:i])
+ copy(lengths[i:], lengths[:i])
+ }
+
+ var valueOffsets *memory.Buffer
+ if offset != 0 {
+ valueOffsets = w.rebaseDenseUnionValueOffsets(arr, offsets, lengths)
+ } else {
+ valueOffsets = getTruncatedBuffer(int64(offset), int64(length), int32(arrow.Int32SizeBytes), arr.ValueOffsets())
+ }
+ p.body = append(p.body, valueOffsets)
+
+ // visit children and slice accordingly
+ for i := range dt.Fields() {
+ child := arr.Field(i)
+ // for sliced unions it's tricky to know how much to truncate
+ // the children. For now we'll truncate the children to be
+ // no longer than the parent union.
+
+ if offset != 0 {
+ code := dt.TypeCodes()[i]
+ childOffset := offsets[code]
+ childLen := lengths[code]
+
+ if childOffset > 0 {
+ child = array.NewSlice(child, int64(childOffset), int64(childOffset+childLen))
+ defer child.Release()
+ } else if childLen < int32(child.Len()) {
+ child = array.NewSlice(child, 0, int64(childLen))
+ defer child.Release()
+ }
+ }
+ if err := w.visit(p, child); err != nil {
+ return fmt.Errorf("could not visit field %d of dense union array: %w", i, err)
+ }
+ }
+ w.depth++
+ case *arrow.MapType, *arrow.ListType, *arrow.LargeListType:
+ arr := arr.(array.ListLike)
+ voffsets := w.getZeroBasedValueOffsets(arr)
+ p.body = append(p.body, voffsets)
+
+ w.depth--
+ var (
+ values = arr.ListValues()
+ mustRelease = false
+ values_offset int64
+ values_end int64
+ )
+ defer func() {
+ if mustRelease {
+ values.Release()
+ }
+ }()
+
+ if arr.Len() > 0 && voffsets != nil {
+ values_offset, _ = arr.ValueOffsets(0)
+ _, values_end = arr.ValueOffsets(arr.Len() - 1)
+ }
+
+ if arr.Len() != 0 || values_end < int64(values.Len()) {
+ // must also slice the values
+ values = array.NewSlice(values, values_offset, values_end)
+ mustRelease = true
+ }
+ err := w.visit(p, values)
+
+ if err != nil {
+ return fmt.Errorf("could not visit list element for array %T: %w", arr, err)
+ }
+ w.depth++
+
+ case *arrow.ListViewType, *arrow.LargeListViewType:
+ data := arr.Data()
+ arr := arr.(array.VarLenListLike)
+ offsetTraits := arr.DataType().(arrow.OffsetsDataType).OffsetTypeTraits()
+ rngOff, rngLen := array.RangeOfValuesUsed(arr)
+ voffsets := w.getValueOffsetsAtBaseValue(arr, rngOff)
+ p.body = append(p.body, voffsets)
+
+ vsizes := data.Buffers()[2]
+ if vsizes != nil {
+ if data.Offset() != 0 || vsizes.Len() > offsetTraits.BytesRequired(arr.Len()) {
+ beg := offsetTraits.BytesRequired(data.Offset())
+ end := beg + offsetTraits.BytesRequired(data.Len())
+ vsizes = memory.NewBufferBytes(vsizes.Bytes()[beg:end])
+ } else {
+ vsizes.Retain()
+ }
+ }
+ p.body = append(p.body, vsizes)
+
+ w.depth--
+ var (
+ values = arr.ListValues()
+ mustRelease = false
+ values_offset = int64(rngOff)
+ values_end = int64(rngOff + rngLen)
+ )
+ defer func() {
+ if mustRelease {
+ values.Release()
+ }
+ }()
+
+ if arr.Len() > 0 && values_end < int64(values.Len()) {
+ // must also slice the values
+ values = array.NewSlice(values, values_offset, values_end)
+ mustRelease = true
+ }
+ err := w.visit(p, values)
+
+ if err != nil {
+ return fmt.Errorf("could not visit list element for array %T: %w", arr, err)
+ }
+ w.depth++
+
+ case *arrow.FixedSizeListType:
+ arr := arr.(*array.FixedSizeList)
+
+ w.depth--
+
+ size := int64(arr.DataType().(*arrow.FixedSizeListType).Len())
+ beg := int64(arr.Offset()) * size
+ end := int64(arr.Offset()+arr.Len()) * size
+
+ values := array.NewSlice(arr.ListValues(), beg, end)
+ defer values.Release()
+
+ err := w.visit(p, values)
+
+ if err != nil {
+ return fmt.Errorf("could not visit list element for array %T: %w", arr, err)
+ }
+ w.depth++
+
+ case *arrow.RunEndEncodedType:
+ arr := arr.(*array.RunEndEncoded)
+ w.depth--
+ child := arr.LogicalRunEndsArray(w.mem)
+ defer child.Release()
+ if err := w.visit(p, child); err != nil {
+ return err
+ }
+ child = arr.LogicalValuesArray()
+ defer child.Release()
+ if err := w.visit(p, child); err != nil {
+ return err
+ }
+ w.depth++
+
+ default:
+ panic(fmt.Errorf("arrow/ipc: unknown array %T (dtype=%T)", arr, dtype))
+ }
+
+ return nil
+}
+
+func (w *recordEncoder) getZeroBasedValueOffsets(arr arrow.Array) *memory.Buffer {
+ data := arr.Data()
+ voffsets := data.Buffers()[1]
+ offsetTraits := arr.DataType().(arrow.OffsetsDataType).OffsetTypeTraits()
+ offsetBytesNeeded := offsetTraits.BytesRequired(data.Len() + 1)
+
+ if voffsets == nil || voffsets.Len() == 0 {
+ return nil
+ }
+
+ // if we have a non-zero offset, then the value offsets do not start at
+ // zero. we must a) create a new offsets array with shifted offsets and
+ // b) slice the values array accordingly
+ //
+ // or if there are more value offsets than values (the array has been sliced)
+ // we need to trim off the trailing offsets
+ needsTruncateAndShift := data.Offset() != 0 || offsetBytesNeeded < voffsets.Len()
+
+ if needsTruncateAndShift {
+ shiftedOffsets := memory.NewResizableBuffer(w.mem)
+ shiftedOffsets.Resize(offsetBytesNeeded)
+
+ switch arr.DataType().Layout().Buffers[1].ByteWidth {
+ case 8:
+ dest := arrow.Int64Traits.CastFromBytes(shiftedOffsets.Bytes())
+ offsets := arrow.Int64Traits.CastFromBytes(voffsets.Bytes())[data.Offset() : data.Offset()+data.Len()+1]
+
+ startOffset := offsets[0]
+ for i, o := range offsets {
+ dest[i] = o - startOffset
+ }
+
+ default:
+ debug.Assert(arr.DataType().Layout().Buffers[1].ByteWidth == 4, "invalid offset bytewidth")
+ dest := arrow.Int32Traits.CastFromBytes(shiftedOffsets.Bytes())
+ offsets := arrow.Int32Traits.CastFromBytes(voffsets.Bytes())[data.Offset() : data.Offset()+data.Len()+1]
+
+ startOffset := offsets[0]
+ for i, o := range offsets {
+ dest[i] = o - startOffset
+ }
+ }
+
+ voffsets = shiftedOffsets
+ } else {
+ voffsets.Retain()
+ }
+
+ return voffsets
+}
+
+// Truncates the offsets if needed and shifts the values if minOffset > 0.
+// The offsets returned are corrected assuming the child values are truncated
+// and now start at minOffset.
+//
+// This function only works on offset buffers of ListViews and LargeListViews.
+// TODO(felipecrv): Unify this with getZeroBasedValueOffsets.
+func (w *recordEncoder) getValueOffsetsAtBaseValue(arr arrow.Array, minOffset int) *memory.Buffer {
+ data := arr.Data()
+ voffsets := data.Buffers()[1]
+ offsetTraits := arr.DataType().(arrow.OffsetsDataType).OffsetTypeTraits()
+ offsetBytesNeeded := offsetTraits.BytesRequired(data.Len())
+
+ if voffsets == nil || voffsets.Len() == 0 {
+ return nil
+ }
+
+ needsTruncate := data.Offset() != 0 || offsetBytesNeeded < voffsets.Len()
+ needsShift := minOffset > 0
+
+ if needsTruncate || needsShift {
+ shiftedOffsets := memory.NewResizableBuffer(w.mem)
+ shiftedOffsets.Resize(offsetBytesNeeded)
+
+ switch arr.DataType().Layout().Buffers[1].ByteWidth {
+ case 8:
+ dest := arrow.Int64Traits.CastFromBytes(shiftedOffsets.Bytes())
+ offsets := arrow.Int64Traits.CastFromBytes(voffsets.Bytes())[data.Offset() : data.Offset()+data.Len()]
+
+ if minOffset > 0 {
+ for i, o := range offsets {
+ dest[i] = o - int64(minOffset)
+ }
+ } else {
+ copy(dest, offsets)
+ }
+ default:
+ debug.Assert(arr.DataType().Layout().Buffers[1].ByteWidth == 4, "invalid offset bytewidth")
+ dest := arrow.Int32Traits.CastFromBytes(shiftedOffsets.Bytes())
+ offsets := arrow.Int32Traits.CastFromBytes(voffsets.Bytes())[data.Offset() : data.Offset()+data.Len()]
+
+ if minOffset > 0 {
+ for i, o := range offsets {
+ dest[i] = o - int32(minOffset)
+ }
+ } else {
+ copy(dest, offsets)
+ }
+ }
+
+ voffsets = shiftedOffsets
+ } else {
+ voffsets.Retain()
+ }
+
+ return voffsets
+}
+
+func (w *recordEncoder) rebaseDenseUnionValueOffsets(arr *array.DenseUnion, offsets, lengths []int32) *memory.Buffer {
+ // this case sucks. Because the offsets are different for each
+ // child array, when we have a sliced array, we need to re-base
+ // the value offsets for each array! ew.
+ unshiftedOffsets := arr.RawValueOffsets()
+ codes := arr.RawTypeCodes()
+
+ shiftedOffsetsBuf := memory.NewResizableBuffer(w.mem)
+ shiftedOffsetsBuf.Resize(arrow.Int32Traits.BytesRequired(arr.Len()))
+ shiftedOffsets := arrow.Int32Traits.CastFromBytes(shiftedOffsetsBuf.Bytes())
+
+ // compute shifted offsets by subtracting child offset
+ for i, c := range codes {
+ if offsets[c] == -1 {
+ // offsets are guaranteed to be increasing according to the spec
+ // so the first offset we find for a child is the initial offset
+ // and will become the "0" for this child.
+ offsets[c] = unshiftedOffsets[i]
+ shiftedOffsets[i] = 0
+ } else {
+ shiftedOffsets[i] = unshiftedOffsets[i] - offsets[c]
+ }
+ lengths[c] = maxI32(lengths[c], shiftedOffsets[i]+1)
+ }
+ return shiftedOffsetsBuf
+}
+
+func (w *recordEncoder) Encode(p *Payload, rec arrow.Record) error {
+ if err := w.encode(p, rec); err != nil {
+ return err
+ }
+ return w.encodeMetadata(p, rec.NumRows())
+}
+
+func (w *recordEncoder) encodeMetadata(p *Payload, nrows int64) error {
+ p.meta = writeRecordMessage(w.mem, nrows, p.size, w.fields, w.meta, w.codec)
+ return nil
+}
+
+func newTruncatedBitmap(mem memory.Allocator, offset, length int64, input *memory.Buffer) *memory.Buffer {
+ if input == nil {
+ return nil
+ }
+
+ minLength := paddedLength(bitutil.BytesForBits(length), kArrowAlignment)
+ switch {
+ case offset != 0 || minLength < int64(input.Len()):
+ // with a sliced array / non-zero offset, we must copy the bitmap
+ buf := memory.NewResizableBuffer(mem)
+ buf.Resize(int(minLength))
+ bitutil.CopyBitmap(input.Bytes(), int(offset), int(length), buf.Bytes(), 0)
+ return buf
+ default:
+ input.Retain()
+ return input
+ }
+}
+
+func getTruncatedBuffer(offset, length int64, byteWidth int32, buf *memory.Buffer) *memory.Buffer {
+ if buf == nil {
+ return buf
+ }
+
+ paddedLen := paddedLength(length*int64(byteWidth), kArrowAlignment)
+ if offset != 0 || paddedLen < int64(buf.Len()) {
+ return memory.SliceBuffer(buf, int(offset*int64(byteWidth)), int(minI64(paddedLen, int64(buf.Len()))))
+ }
+ buf.Retain()
+ return buf
+}
+
+func needTruncate(offset int64, buf *memory.Buffer, minLength int64) bool {
+ if buf == nil {
+ return false
+ }
+ return offset != 0 || minLength < int64(buf.Len())
+}
+
+func minI64(a, b int64) int64 {
+ if a < b {
+ return a
+ }
+ return b
+}
+
+func maxI32(a, b int32) int32 {
+ if a > b {
+ return a
+ }
+ return b
+}
diff --git a/vendor/github.com/apache/arrow/go/v14/arrow/memory/Makefile b/vendor/github.com/apache/arrow/go/v14/arrow/memory/Makefile
new file mode 100644
index 000000000..1cc4079c4
--- /dev/null
+++ b/vendor/github.com/apache/arrow/go/v14/arrow/memory/Makefile
@@ -0,0 +1,66 @@
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements. See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership. The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+GO_BUILD=go build
+GO_GEN=go generate
+GO_TEST=go test
+GOPATH=$(realpath ../../../..)
+
+# this converts rotate instructions from "ro[lr] <reg>" -> "ro[lr] <reg>, 1" for yasm compatibility
+PERL_FIXUP_ROTATE=perl -i -pe 's/(ro[rl]\s+\w{2,3})$$/\1, 1/'
+
+C2GOASM=c2goasm -a -f
+CC=clang
+C_FLAGS=-target x86_64-unknown-none -masm=intel -mno-red-zone -mstackrealign -mllvm -inline-threshold=1000 -fno-asynchronous-unwind-tables \
+ -fno-exceptions -fno-rtti -O3 -fno-builtin -ffast-math -fno-jump-tables -I_lib
+ASM_FLAGS_AVX2=-mavx2 -mfma -mllvm -force-vector-width=32
+ASM_FLAGS_SSE3=-msse3
+ASM_FLAGS_SSE4=-msse4
+
+C_FLAGS_NEON=-O3 -fvectorize -mllvm -force-vector-width=16 -fno-asynchronous-unwind-tables -mno-red-zone -mstackrealign -fno-exceptions \
+ -fno-rtti -fno-builtin -ffast-math -fno-jump-tables -I_lib
+
+GO_SOURCES := $(shell find . -path ./_lib -prune -o -name '*.go' -not -name '*_test.go')
+ALL_SOURCES := $(shell find . -path ./_lib -prune -o -name '*.go' -name '*.s' -not -name '*_test.go')
+
+
+INTEL_SOURCES := \
+ memory_avx2_amd64.s memory_sse4_amd64.s
+
+.PHONEY: assembly
+
+#
+# ARROW-15320: DO NOT add the assembly target for Arm64 (ARM_SOURCES) until c2goasm added the Arm64 support.
+# memory_neon_arm64.s were generated by asm2plan9s.
+# And manually formatted it as the Arm64 Plan9.
+#
+
+assembly: $(INTEL_SOURCES)
+
+_lib/memory_avx2.s: _lib/memory.c
+ $(CC) -S $(C_FLAGS) $(ASM_FLAGS_AVX2) $^ -o $@ ; $(PERL_FIXUP_ROTATE) $@
+
+_lib/memory_sse4.s: _lib/memory.c
+ $(CC) -S $(C_FLAGS) $(ASM_FLAGS_SSE4) $^ -o $@ ; $(PERL_FIXUP_ROTATE) $@
+
+_lib/memory_neon.s: _lib/memory.c
+ $(CC) -S $(C_FLAGS_NEON) $^ -o $@ ; $(PERL_FIXUP_ROTATE) $@
+
+memory_avx2_amd64.s: _lib/memory_avx2.s
+ $(C2GOASM) -a -f $^ $@
+
+memory_sse4_amd64.s: _lib/memory_sse4.s
+ $(C2GOASM) -a -f $^ $@
diff --git a/vendor/github.com/apache/arrow/go/v14/arrow/memory/allocator.go b/vendor/github.com/apache/arrow/go/v14/arrow/memory/allocator.go
new file mode 100644
index 000000000..1427190ea
--- /dev/null
+++ b/vendor/github.com/apache/arrow/go/v14/arrow/memory/allocator.go
@@ -0,0 +1,27 @@
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements. See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership. The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License. You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package memory
+
+const (
+ alignment = 64
+)
+
+type Allocator interface {
+ Allocate(size int) []byte
+ Reallocate(size int, b []byte) []byte
+ Free(b []byte)
+}
diff --git a/vendor/github.com/apache/arrow/go/v14/arrow/memory/buffer.go b/vendor/github.com/apache/arrow/go/v14/arrow/memory/buffer.go
new file mode 100644
index 000000000..2ddb3f829
--- /dev/null
+++ b/vendor/github.com/apache/arrow/go/v14/arrow/memory/buffer.go
@@ -0,0 +1,145 @@
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements. See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership. The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License. You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package memory
+
+import (
+ "sync/atomic"
+
+ "github.com/apache/arrow/go/v14/arrow/internal/debug"
+)
+
+// Buffer is a wrapper type for a buffer of bytes.
+type Buffer struct {
+ refCount int64
+ buf []byte
+ length int
+ mutable bool
+ mem Allocator
+
+ parent *Buffer
+}
+
+// NewBufferBytes creates a fixed-size buffer from the specified data.
+func NewBufferBytes(data []byte) *Buffer {
+ return &Buffer{refCount: 0, buf: data, length: len(data)}
+}
+
+// NewResizableBuffer creates a mutable, resizable buffer with an Allocator for managing memory.
+func NewResizableBuffer(mem Allocator) *Buffer {
+ return &Buffer{refCount: 1, mutable: true, mem: mem}
+}
+
+func SliceBuffer(buf *Buffer, offset, length int) *Buffer {
+ buf.Retain()
+ return &Buffer{refCount: 1, parent: buf, buf: buf.Bytes()[offset : offset+length], length: length}
+}
+
+// Parent returns either nil or a pointer to the parent buffer if this buffer
+// was sliced from another.
+func (b *Buffer) Parent() *Buffer { return b.parent }
+
+// Retain increases the reference count by 1.
+func (b *Buffer) Retain() {
+ if b.mem != nil || b.parent != nil {
+ atomic.AddInt64(&b.refCount, 1)
+ }
+}
+
+// Release decreases the reference count by 1.
+// When the reference count goes to zero, the memory is freed.
+func (b *Buffer) Release() {
+ if b.mem != nil || b.parent != nil {
+ debug.Assert(atomic.LoadInt64(&b.refCount) > 0, "too many releases")
+
+ if atomic.AddInt64(&b.refCount, -1) == 0 {
+ if b.mem != nil {
+ b.mem.Free(b.buf)
+ } else {
+ b.parent.Release()
+ b.parent = nil
+ }
+ b.buf, b.length = nil, 0
+ }
+ }
+}
+
+// Reset resets the buffer for reuse.
+func (b *Buffer) Reset(buf []byte) {
+ if b.parent != nil {
+ b.parent.Release()
+ b.parent = nil
+ }
+ b.buf = buf
+ b.length = len(buf)
+}
+
+// Buf returns the slice of memory allocated by the Buffer, which is adjusted by calling Reserve.
+func (b *Buffer) Buf() []byte { return b.buf }
+
+// Bytes returns a slice of size Len, which is adjusted by calling Resize.
+func (b *Buffer) Bytes() []byte { return b.buf[:b.length] }
+
+// Mutable returns a bool indicating whether the buffer is mutable or not.
+func (b *Buffer) Mutable() bool { return b.mutable }
+
+// Len returns the length of the buffer.
+func (b *Buffer) Len() int { return b.length }
+
+// Cap returns the capacity of the buffer.
+func (b *Buffer) Cap() int { return len(b.buf) }
+
+// Reserve reserves the provided amount of capacity for the buffer.
+func (b *Buffer) Reserve(capacity int) {
+ if capacity > len(b.buf) {
+ newCap := roundUpToMultipleOf64(capacity)
+ if len(b.buf) == 0 {
+ b.buf = b.mem.Allocate(newCap)
+ } else {
+ b.buf = b.mem.Reallocate(newCap, b.buf)
+ }
+ }
+}
+
+// Resize resizes the buffer to the target size.
+func (b *Buffer) Resize(newSize int) {
+ b.resize(newSize, true)
+}
+
+// ResizeNoShrink resizes the buffer to the target size, but will not
+// shrink it.
+func (b *Buffer) ResizeNoShrink(newSize int) {
+ b.resize(newSize, false)
+}
+
+func (b *Buffer) resize(newSize int, shrink bool) {
+ if !shrink || newSize > b.length {
+ b.Reserve(newSize)
+ } else {
+ // Buffer is not growing, so shrink to the requested size without
+ // excess space.
+ newCap := roundUpToMultipleOf64(newSize)
+ if len(b.buf) != newCap {
+ if newSize == 0 {
+ b.mem.Free(b.buf)
+ b.buf = nil
+ } else {
+ b.buf = b.mem.Reallocate(newCap, b.buf)
+ }
+ }
+ }
+ b.length = newSize
+}
diff --git a/vendor/github.com/apache/arrow/go/v14/arrow/memory/cgo_allocator.go b/vendor/github.com/apache/arrow/go/v14/arrow/memory/cgo_allocator.go
new file mode 100644
index 000000000..85ee44521
--- /dev/null
+++ b/vendor/github.com/apache/arrow/go/v14/arrow/memory/cgo_allocator.go
@@ -0,0 +1,108 @@
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements. See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership. The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License. You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// +build cgo
+// +build ccalloc
+
+package memory
+
+import (
+ "runtime"
+
+ cga "github.com/apache/arrow/go/v14/arrow/memory/internal/cgoalloc"
+)
+
+// CgoArrowAllocator is an allocator which exposes the C++ memory pool class
+// from the Arrow C++ Library as an allocator for memory buffers to use in Go.
+// The build tag 'ccalloc' must be used in order to include it as it requires
+// linking against the arrow library.
+//
+// The primary reason to use this would be as an allocator when dealing with
+// exporting data across the cdata interface in order to ensure that the memory
+// is allocated safely on the C side so it can be held on the CGO side beyond
+// the context of a single function call. If the memory in use isn't allocated
+// on the C side, then it is not safe for any pointers to data to be held outside
+// of Go beyond the context of a single Cgo function call as it will be invisible
+// to the Go garbage collector and could potentially get moved without being updated.
+//
+// As an alternative, if the arrow C++ libraries aren't available, remember that
+// Allocator is an interface, so anything which can allocate data using C/C++ can
+// be exposed and then used to meet the Allocator interface if wanting to export data
+// across the Cgo interfaces.
+type CgoArrowAllocator struct {
+ pool cga.CGOMemPool
+}
+
+// Allocate does what it says on the tin, allocates a chunk of memory using the underlying
+// memory pool, however CGO calls are 'relatively' expensive, which means doing tons of
+// small allocations can end up being expensive and potentially slower than just using
+// go memory. This means that preallocating via reserve becomes much more important when
+// using this allocator.
+//
+// Future development TODO: look into converting this more into a slab style allocator
+// which amortizes the cost of smaller allocations by allocating bigger chunks of memory
+// and passes them out.
+func (alloc *CgoArrowAllocator) Allocate(size int) []byte {
+ b := cga.CgoPoolAlloc(alloc.pool, size)
+ return b
+}
+
+func (alloc *CgoArrowAllocator) Free(b []byte) {
+ cga.CgoPoolFree(alloc.pool, b)
+}
+
+func (alloc *CgoArrowAllocator) Reallocate(size int, b []byte) []byte {
+ oldSize := len(b)
+ out := cga.CgoPoolRealloc(alloc.pool, size, b)
+
+ if size > oldSize {
+ // zero initialize the slice like go would do normally
+ // C won't zero initialize the memory.
+ Set(out[oldSize:], 0)
+ }
+ return out
+}
+
+// AllocatedBytes returns the current total of bytes that have been allocated by
+// the memory pool on the C++ side.
+func (alloc *CgoArrowAllocator) AllocatedBytes() int64 {
+ return cga.CgoPoolCurBytes(alloc.pool)
+}
+
+// AssertSize can be used for testing to ensure and check that there are no memory
+// leaks using the allocator.
+func (alloc *CgoArrowAllocator) AssertSize(t TestingT, sz int) {
+ cur := alloc.AllocatedBytes()
+ if int64(sz) != cur {
+ t.Helper()
+ t.Errorf("invalid memory size exp=%d, got=%d", sz, cur)
+ }
+}
+
+// NewCgoArrowAllocator creates a new allocator which is backed by the C++ Arrow
+// memory pool object which could potentially be using jemalloc or mimalloc or
+// otherwise as its backend. Memory allocated by this is invisible to the Go
+// garbage collector, and as such care should be taken to avoid any memory leaks.
+//
+// A finalizer is set on the allocator so when the allocator object itself is eventually
+// cleaned up by the garbage collector, it will delete the associated C++ memory pool
+// object. If the build tag 'cclog' is added, then the memory pool will output a log line
+// for every time memory is allocated, freed or reallocated.
+func NewCgoArrowAllocator() *CgoArrowAllocator {
+ alloc := &CgoArrowAllocator{pool: cga.NewCgoArrowAllocator(enableLogging)}
+ runtime.SetFinalizer(alloc, func(a *CgoArrowAllocator) { cga.ReleaseCGOMemPool(a.pool) })
+ return alloc
+}
diff --git a/vendor/github.com/apache/arrow/go/v14/arrow/memory/cgo_allocator_defaults.go b/vendor/github.com/apache/arrow/go/v14/arrow/memory/cgo_allocator_defaults.go
new file mode 100644
index 000000000..501431a0e
--- /dev/null
+++ b/vendor/github.com/apache/arrow/go/v14/arrow/memory/cgo_allocator_defaults.go
@@ -0,0 +1,23 @@
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements. See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership. The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License. You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// +build cgo
+// +build ccalloc
+// +build !cclog
+
+package memory
+
+const enableLogging = false
diff --git a/vendor/github.com/apache/arrow/go/v14/arrow/memory/cgo_allocator_logging.go b/vendor/github.com/apache/arrow/go/v14/arrow/memory/cgo_allocator_logging.go
new file mode 100644
index 000000000..01ad6b394
--- /dev/null
+++ b/vendor/github.com/apache/arrow/go/v14/arrow/memory/cgo_allocator_logging.go
@@ -0,0 +1,23 @@
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements. See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership. The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License. You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// +build cgo
+// +build ccalloc
+// +build cclog
+
+package memory
+
+const enableLogging = true
diff --git a/vendor/github.com/apache/arrow/go/v14/arrow/memory/checked_allocator.go b/vendor/github.com/apache/arrow/go/v14/arrow/memory/checked_allocator.go
new file mode 100644
index 000000000..78a09a57d
--- /dev/null
+++ b/vendor/github.com/apache/arrow/go/v14/arrow/memory/checked_allocator.go
@@ -0,0 +1,221 @@
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements. See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership. The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License. You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+//go:build !tinygo
+// +build !tinygo
+
+package memory
+
+import (
+ "fmt"
+ "os"
+ "runtime"
+ "strconv"
+ "strings"
+ "sync"
+ "sync/atomic"
+ "unsafe"
+)
+
+type CheckedAllocator struct {
+ mem Allocator
+ sz int64
+
+ allocs sync.Map
+}
+
+func NewCheckedAllocator(mem Allocator) *CheckedAllocator {
+ return &CheckedAllocator{mem: mem}
+}
+
+func (a *CheckedAllocator) CurrentAlloc() int { return int(atomic.LoadInt64(&a.sz)) }
+
+func (a *CheckedAllocator) Allocate(size int) []byte {
+ atomic.AddInt64(&a.sz, int64(size))
+ out := a.mem.Allocate(size)
+ if size == 0 {
+ return out
+ }
+
+ ptr := uintptr(unsafe.Pointer(&out[0]))
+ pcs := make([]uintptr, maxRetainedFrames)
+
+ // For historical reasons the meaning of the skip argument
+ // differs between Caller and Callers. For Callers, 0 identifies
+ // the frame for the caller itself. We skip 2 additional frames
+ // here to get to the caller right before the call to Allocate.
+ runtime.Callers(allocFrames+2, pcs)
+ callersFrames := runtime.CallersFrames(pcs)
+ if pc, _, l, ok := runtime.Caller(allocFrames); ok {
+ a.allocs.Store(ptr, &dalloc{pc: pc, line: l, sz: size, callersFrames: callersFrames})
+ }
+ return out
+}
+
+func (a *CheckedAllocator) Reallocate(size int, b []byte) []byte {
+ atomic.AddInt64(&a.sz, int64(size-len(b)))
+
+ oldptr := uintptr(unsafe.Pointer(&b[0]))
+ out := a.mem.Reallocate(size, b)
+ if size == 0 {
+ return out
+ }
+
+ newptr := uintptr(unsafe.Pointer(&out[0]))
+ a.allocs.Delete(oldptr)
+ pcs := make([]uintptr, maxRetainedFrames)
+
+ // For historical reasons the meaning of the skip argument
+ // differs between Caller and Callers. For Callers, 0 identifies
+ // the frame for the caller itself. We skip 2 additional frames
+ // here to get to the caller right before the call to Reallocate.
+ runtime.Callers(reallocFrames+2, pcs)
+ callersFrames := runtime.CallersFrames(pcs)
+ if pc, _, l, ok := runtime.Caller(reallocFrames); ok {
+ a.allocs.Store(newptr, &dalloc{pc: pc, line: l, sz: size, callersFrames: callersFrames})
+ }
+
+ return out
+}
+
+func (a *CheckedAllocator) Free(b []byte) {
+ atomic.AddInt64(&a.sz, int64(len(b)*-1))
+ defer a.mem.Free(b)
+
+ if len(b) == 0 {
+ return
+ }
+
+ ptr := uintptr(unsafe.Pointer(&b[0]))
+ a.allocs.Delete(ptr)
+}
+
+// typically the allocations are happening in memory.Buffer, not by consumers calling
+// allocate/reallocate directly. As a result, we want to skip the caller frames
+// of the inner workings of Buffer in order to find the caller that actually triggered
+// the allocation via a call to Resize/Reserve/etc.
+const (
+ defAllocFrames = 4
+ defReallocFrames = 3
+ defMaxRetainedFrames = 0
+)
+
+// Use the environment variables ARROW_CHECKED_ALLOC_FRAMES and ARROW_CHECKED_REALLOC_FRAMES
+// to control how many frames it skips when storing the caller for allocations/reallocs
+// when using this to find memory leaks. Use ARROW_CHECKED_MAX_RETAINED_FRAMES to control how
+// many frames are retained for printing the stack trace of a leak.
+var allocFrames, reallocFrames, maxRetainedFrames int = defAllocFrames, defReallocFrames, defMaxRetainedFrames
+
+func init() {
+ if val, ok := os.LookupEnv("ARROW_CHECKED_ALLOC_FRAMES"); ok {
+ if f, err := strconv.Atoi(val); err == nil {
+ allocFrames = f
+ }
+ }
+
+ if val, ok := os.LookupEnv("ARROW_CHECKED_REALLOC_FRAMES"); ok {
+ if f, err := strconv.Atoi(val); err == nil {
+ reallocFrames = f
+ }
+ }
+
+ if val, ok := os.LookupEnv("ARROW_CHECKED_MAX_RETAINED_FRAMES"); ok {
+ if f, err := strconv.Atoi(val); err == nil {
+ maxRetainedFrames = f
+ }
+ }
+}
+
+type dalloc struct {
+ pc uintptr
+ line int
+ sz int
+ callersFrames *runtime.Frames
+}
+
+type TestingT interface {
+ Errorf(format string, args ...interface{})
+ Helper()
+}
+
+func (a *CheckedAllocator) AssertSize(t TestingT, sz int) {
+ a.allocs.Range(func(_, value interface{}) bool {
+ info := value.(*dalloc)
+ f := runtime.FuncForPC(info.pc)
+ frames := info.callersFrames
+ var callersMsg strings.Builder
+ for {
+ frame, more := frames.Next()
+ if frame.Line == 0 {
+ break
+ }
+ callersMsg.WriteString("\t")
+ // frame.Func is a useful source of information if it's present.
+ // It may be nil for non-Go code or fully inlined functions.
+ if fn := frame.Func; fn != nil {
+ // format as func name + the offset in bytes from func entrypoint
+ callersMsg.WriteString(fmt.Sprintf("%s+%x", fn.Name(), frame.PC-fn.Entry()))
+ } else {
+ // fallback to outer func name + file line
+ callersMsg.WriteString(fmt.Sprintf("%s, line %d", frame.Function, frame.Line))
+ }
+
+ // Write a proper file name + line, so it's really easy to find the leak
+ callersMsg.WriteString("\n\t\t")
+ callersMsg.WriteString(frame.File + ":" + strconv.Itoa(frame.Line))
+ callersMsg.WriteString("\n")
+ if !more {
+ break
+ }
+ }
+
+ file, line := f.FileLine(info.pc)
+ t.Errorf("LEAK of %d bytes FROM\n\t%s+%x\n\t\t%s:%d\n%v",
+ info.sz,
+ f.Name(), info.pc-f.Entry(), // func name + offset in bytes between frame & entrypoint to func
+ file, line, // a proper file name + line, so it's really easy to find the leak
+ callersMsg.String(),
+ )
+ return true
+ })
+
+ if int(atomic.LoadInt64(&a.sz)) != sz {
+ t.Helper()
+ t.Errorf("invalid memory size exp=%d, got=%d", sz, a.sz)
+ }
+}
+
+type CheckedAllocatorScope struct {
+ alloc *CheckedAllocator
+ sz int
+}
+
+func NewCheckedAllocatorScope(alloc *CheckedAllocator) *CheckedAllocatorScope {
+ sz := atomic.LoadInt64(&alloc.sz)
+ return &CheckedAllocatorScope{alloc: alloc, sz: int(sz)}
+}
+
+func (c *CheckedAllocatorScope) CheckSize(t TestingT) {
+ sz := int(atomic.LoadInt64(&c.alloc.sz))
+ if c.sz != sz {
+ t.Helper()
+ t.Errorf("invalid memory size exp=%d, got=%d", c.sz, sz)
+ }
+}
+
+var (
+ _ Allocator = (*CheckedAllocator)(nil)
+)
diff --git a/vendor/github.com/apache/arrow/go/v14/arrow/memory/default_allocator.go b/vendor/github.com/apache/arrow/go/v14/arrow/memory/default_allocator.go
new file mode 100644
index 000000000..f60caccdb
--- /dev/null
+++ b/vendor/github.com/apache/arrow/go/v14/arrow/memory/default_allocator.go
@@ -0,0 +1,25 @@
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements. See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership. The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License. You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+//go:build !mallocator || !cgo
+
+package memory
+
+// DefaultAllocator is a default implementation of Allocator and can be used anywhere
+// an Allocator is required.
+//
+// DefaultAllocator is safe to use from multiple goroutines.
+var DefaultAllocator Allocator = NewGoAllocator()
diff --git a/vendor/github.com/apache/arrow/go/v14/arrow/memory/default_mallocator.go b/vendor/github.com/apache/arrow/go/v14/arrow/memory/default_mallocator.go
new file mode 100644
index 000000000..12ad08466
--- /dev/null
+++ b/vendor/github.com/apache/arrow/go/v14/arrow/memory/default_mallocator.go
@@ -0,0 +1,29 @@
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements. See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership. The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License. You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+//go:build mallocator && cgo
+
+package memory
+
+import (
+ "github.com/apache/arrow/go/v14/arrow/memory/mallocator"
+)
+
+// DefaultAllocator is a default implementation of Allocator and can be used anywhere
+// an Allocator is required.
+//
+// DefaultAllocator is safe to use from multiple goroutines.
+var DefaultAllocator Allocator = mallocator.NewMallocator()
diff --git a/vendor/github.com/apache/arrow/go/v14/arrow/memory/doc.go b/vendor/github.com/apache/arrow/go/v14/arrow/memory/doc.go
new file mode 100644
index 000000000..20a28e4e2
--- /dev/null
+++ b/vendor/github.com/apache/arrow/go/v14/arrow/memory/doc.go
@@ -0,0 +1,22 @@
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements. See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership. The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License. You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+/*
+Package memory provides support for allocating and manipulating memory at a low level.
+
+The build tag 'mallocator' will switch the default allocator to one backed by libc malloc. This also requires CGO.
+*/
+package memory
diff --git a/vendor/github.com/apache/arrow/go/v14/arrow/memory/go_allocator.go b/vendor/github.com/apache/arrow/go/v14/arrow/memory/go_allocator.go
new file mode 100644
index 000000000..1017eb688
--- /dev/null
+++ b/vendor/github.com/apache/arrow/go/v14/arrow/memory/go_allocator.go
@@ -0,0 +1,47 @@
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements. See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership. The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License. You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package memory
+
+type GoAllocator struct{}
+
+func NewGoAllocator() *GoAllocator { return &GoAllocator{} }
+
+func (a *GoAllocator) Allocate(size int) []byte {
+ buf := make([]byte, size+alignment) // padding for 64-byte alignment
+ addr := int(addressOf(buf))
+ next := roundUpToMultipleOf64(addr)
+ if addr != next {
+ shift := next - addr
+ return buf[shift : size+shift : size+shift]
+ }
+ return buf[:size:size]
+}
+
+func (a *GoAllocator) Reallocate(size int, b []byte) []byte {
+ if cap(b) >= size {
+ return b[:size]
+ }
+ newBuf := a.Allocate(size)
+ copy(newBuf, b)
+ return newBuf
+}
+
+func (a *GoAllocator) Free(b []byte) {}
+
+var (
+ _ Allocator = (*GoAllocator)(nil)
+)
diff --git a/vendor/github.com/apache/arrow/go/v14/arrow/memory/internal/cgoalloc/allocator.cc b/vendor/github.com/apache/arrow/go/v14/arrow/memory/internal/cgoalloc/allocator.cc
new file mode 100644
index 000000000..b2b037374
--- /dev/null
+++ b/vendor/github.com/apache/arrow/go/v14/arrow/memory/internal/cgoalloc/allocator.cc
@@ -0,0 +1,71 @@
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements. See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership. The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License. You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing,
+// software distributed under the License is distributed on an
+// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+// KIND, either express or implied. See the License for the
+// specific language governing permissions and limitations
+// under the License.
+
+// +build ccalloc
+
+#include "allocator.h"
+#include "arrow/memory_pool.h"
+#include "helpers.h"
+
+struct mem_holder {
+ std::unique_ptr<arrow::MemoryPool> owned_pool;
+ arrow::MemoryPool* pool;
+};
+
+ArrowMemoryPool arrow_create_memory_pool(bool enable_logging) {
+ auto holder = std::make_shared<mem_holder>();
+ if (enable_logging) {
+ holder->owned_pool.reset(new arrow::LoggingMemoryPool(arrow::default_memory_pool()));
+ holder->pool = holder->owned_pool.get();
+ } else {
+ holder->pool = arrow::default_memory_pool();
+ }
+
+ return create_ref(holder);
+}
+
+void arrow_release_pool(ArrowMemoryPool pool) {
+ release_ref<mem_holder>(pool);
+}
+
+int arrow_pool_allocate(ArrowMemoryPool pool, int64_t size, uint8_t** out) {
+ auto holder = retrieve_instance<mem_holder>(pool);
+ auto status = holder->pool->Allocate(size, out);
+ if (!status.ok()) {
+ return 1;
+ }
+ return 0;
+}
+
+void arrow_pool_free(ArrowMemoryPool pool, uint8_t* buffer, int64_t size) {
+ auto holder = retrieve_instance<mem_holder>(pool);
+ holder->pool->Free(buffer, size);
+}
+
+int arrow_pool_reallocate(ArrowMemoryPool pool, int64_t old_size, int64_t new_size, uint8_t** ptr) {
+ auto holder = retrieve_instance<mem_holder>(pool);
+ auto status = holder->pool->Reallocate(old_size, new_size, ptr);
+ if (!status.ok()) {
+ return 1;
+ }
+ return 0;
+}
+
+int64_t arrow_pool_bytes_allocated(ArrowMemoryPool pool) {
+ auto holder = retrieve_instance<mem_holder>(pool);
+ return holder->pool->bytes_allocated();
+}
diff --git a/vendor/github.com/apache/arrow/go/v14/arrow/memory/internal/cgoalloc/allocator.go b/vendor/github.com/apache/arrow/go/v14/arrow/memory/internal/cgoalloc/allocator.go
new file mode 100644
index 000000000..48f34d862
--- /dev/null
+++ b/vendor/github.com/apache/arrow/go/v14/arrow/memory/internal/cgoalloc/allocator.go
@@ -0,0 +1,108 @@
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements. See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership. The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License. You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+//go:build ccalloc
+// +build ccalloc
+
+package cgoalloc
+
+// #cgo !windows pkg-config: arrow
+// #cgo CXXFLAGS: -std=c++17
+// #cgo windows LDFLAGS: -larrow
+// #include "allocator.h"
+import "C"
+import (
+ "reflect"
+ "unsafe"
+)
+
+// CGOMemPool is an alias to the typedef'd uintptr from the allocator.h file
+type CGOMemPool = C.ArrowMemoryPool
+
+// CgoPoolAlloc allocates a block of memory of length 'size' using the memory
+// pool that is passed in.
+func CgoPoolAlloc(pool CGOMemPool, size int) []byte {
+ var ret []byte
+ if size == 0 {
+ return ret
+ }
+
+ var out *C.uint8_t
+ C.arrow_pool_allocate(pool, C.int64_t(size), (**C.uint8_t)(unsafe.Pointer(&out)))
+
+ s := (*reflect.SliceHeader)(unsafe.Pointer(&ret))
+ s.Data = uintptr(unsafe.Pointer(out))
+ s.Len = size
+ s.Cap = size
+
+ return ret
+}
+
+// CgoPoolRealloc calls 'reallocate' on the block of memory passed in which must
+// be a slice that was returned by CgoPoolAlloc or CgoPoolRealloc.
+func CgoPoolRealloc(pool CGOMemPool, size int, b []byte) []byte {
+ if len(b) == 0 {
+ return CgoPoolAlloc(pool, size)
+ }
+
+ oldSize := C.int64_t(len(b))
+ data := (*C.uint8_t)(unsafe.Pointer(&b[0]))
+ C.arrow_pool_reallocate(pool, oldSize, C.int64_t(size), &data)
+
+ var ret []byte
+ s := (*reflect.SliceHeader)(unsafe.Pointer(&ret))
+ s.Data = uintptr(unsafe.Pointer(data))
+ s.Len = size
+ s.Cap = size
+
+ return ret
+}
+
+// CgoPoolFree uses the indicated memory pool to free a block of memory. The
+// slice passed in *must* be a slice which was returned by CgoPoolAlloc or
+// CgoPoolRealloc.
+func CgoPoolFree(pool CGOMemPool, b []byte) {
+ if len(b) == 0 {
+ return
+ }
+
+ oldSize := C.int64_t(len(b))
+ data := (*C.uint8_t)(unsafe.Pointer(&b[0]))
+ C.arrow_pool_free(pool, data, oldSize)
+}
+
+// CgoPoolCurBytes returns the current number of bytes allocated by the
+// passed in memory pool.
+func CgoPoolCurBytes(pool CGOMemPool) int64 {
+ return int64(C.arrow_pool_bytes_allocated(pool))
+}
+
+// ReleaseCGOMemPool deletes and frees the memory associated with the
+// passed in memory pool on the C++ side.
+func ReleaseCGOMemPool(pool CGOMemPool) {
+ C.arrow_release_pool(pool)
+}
+
+// NewCgoArrowAllocator constructs a new memory pool in C++ and returns
+// a reference to it which can then be used with the other functions
+// here in order to use it.
+//
+// Optionally if logging is true, a logging proxy will be wrapped around
+// the memory pool so that it will output a line every time memory is
+// allocated, reallocated or freed along with the size of the allocation.
+func NewCgoArrowAllocator(logging bool) CGOMemPool {
+ return C.arrow_create_memory_pool(C.bool(logging))
+}
diff --git a/vendor/github.com/apache/arrow/go/v14/arrow/memory/internal/cgoalloc/allocator.h b/vendor/github.com/apache/arrow/go/v14/arrow/memory/internal/cgoalloc/allocator.h
new file mode 100644
index 000000000..0c8744375
--- /dev/null
+++ b/vendor/github.com/apache/arrow/go/v14/arrow/memory/internal/cgoalloc/allocator.h
@@ -0,0 +1,39 @@
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements. See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership. The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License. You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing,
+// software distributed under the License is distributed on an
+// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+// KIND, either express or implied. See the License for the
+// specific language governing permissions and limitations
+// under the License.
+
+#pragma once
+
+#include <stdint.h>
+#include <stdbool.h>
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+typedef uintptr_t ArrowMemoryPool;
+
+ArrowMemoryPool arrow_create_memory_pool(bool enable_logging);
+int arrow_pool_allocate(ArrowMemoryPool pool, int64_t size, uint8_t** out);
+int arrow_pool_reallocate(ArrowMemoryPool pool, int64_t old_size, int64_t new_size, uint8_t** ptr);
+void arrow_pool_free(ArrowMemoryPool pool, uint8_t* buffer, int64_t size);
+int64_t arrow_pool_bytes_allocated(ArrowMemoryPool pool);
+void arrow_release_pool(ArrowMemoryPool pool);
+
+
+#ifdef __cplusplus
+}
+#endif
diff --git a/vendor/github.com/apache/arrow/go/v14/arrow/memory/internal/cgoalloc/helpers.h b/vendor/github.com/apache/arrow/go/v14/arrow/memory/internal/cgoalloc/helpers.h
new file mode 100644
index 000000000..fa5feb6a9
--- /dev/null
+++ b/vendor/github.com/apache/arrow/go/v14/arrow/memory/internal/cgoalloc/helpers.h
@@ -0,0 +1,52 @@
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements. See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership. The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License. You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing,
+// software distributed under the License is distributed on an
+// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+// KIND, either express or implied. See the License for the
+// specific language governing permissions and limitations
+// under the License.
+
+#pragma once
+
+#include <cstdint>
+#include <memory>
+
+// helper functions to be included by C++ code for interacting with Cgo
+
+// create_ref will construct a shared_ptr on the heap and return a pointer
+// to it. the returned uintptr_t can then be used with retrieve_instance
+// to get back the shared_ptr and object it refers to. This ensures that
+// the object outlives the exported function so that Go can use it.
+template <typename T>
+uintptr_t create_ref(std::shared_ptr<T> t) {
+ std::shared_ptr<T>* retained_ptr = new std::shared_ptr<T>(t);
+ return reinterpret_cast<uintptr_t>(retained_ptr);
+}
+
+// retrieve_instance is used to get back the shared_ptr which was created with
+// create_ref in order to use it in functions where the caller passes back the
+// uintptr_t so that an object can be managed by C++ while a reference to it
+// is passed around in C/CGO
+template <typename T>
+std::shared_ptr<T> retrieve_instance(uintptr_t ref) {
+ std::shared_ptr<T>* retrieved_ptr = reinterpret_cast<std::shared_ptr<T>*>(ref);
+ return *retrieved_ptr;
+}
+
+// release_ref deletes the shared_ptr that was created by create_ref, freeing the
+// object if it was the last shared_ptr which referenced it as per normal smart_ptr
+// rules.
+template <typename T>
+void release_ref(uintptr_t ref) {
+ std::shared_ptr<T>* retrieved_ptr = reinterpret_cast<std::shared_ptr<T>*>(ref);
+ delete retrieved_ptr;
+}
diff --git a/vendor/github.com/apache/arrow/go/v14/arrow/memory/mallocator/doc.go b/vendor/github.com/apache/arrow/go/v14/arrow/memory/mallocator/doc.go
new file mode 100644
index 000000000..a399d85ee
--- /dev/null
+++ b/vendor/github.com/apache/arrow/go/v14/arrow/memory/mallocator/doc.go
@@ -0,0 +1,21 @@
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements. See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership. The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License. You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing,
+// software distributed under the License is distributed on an
+// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+// KIND, either express or implied. See the License for the
+// specific language governing permissions and limitations
+// under the License.
+
+// Package mallocator defines an allocator implementation for
+// memory.Allocator which defers to libc malloc. It requires
+// usage of CGO.
+package mallocator
diff --git a/vendor/github.com/apache/arrow/go/v14/arrow/memory/mallocator/mallocator.go b/vendor/github.com/apache/arrow/go/v14/arrow/memory/mallocator/mallocator.go
new file mode 100644
index 000000000..18e0377c4
--- /dev/null
+++ b/vendor/github.com/apache/arrow/go/v14/arrow/memory/mallocator/mallocator.go
@@ -0,0 +1,115 @@
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements. See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership. The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License. You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing,
+// software distributed under the License is distributed on an
+// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+// KIND, either express or implied. See the License for the
+// specific language governing permissions and limitations
+// under the License.
+
+package mallocator
+
+// #include <stdlib.h>
+// #include <string.h>
+//
+// void* realloc_and_initialize(void* ptr, size_t old_len, size_t new_len) {
+// void* new_ptr = realloc(ptr, new_len);
+// if (new_ptr && new_len > old_len) {
+// memset(new_ptr + old_len, 0, new_len - old_len);
+// }
+// return new_ptr;
+// }
+import "C"
+
+import (
+ "reflect"
+ "sync/atomic"
+ "unsafe"
+)
+
+// Mallocator is an allocator which defers to libc malloc.
+//
+// The priamry reason to use this is when exporting data across the C Data
+// Interface. CGO requires that pointers to Go memory are not stored in C
+// memory, which is exactly what the C Data Interface would otherwise
+// require. By allocating with Mallocator up front, we can safely export the
+// buffers in Arrow arrays without copying buffers or violating CGO rules.
+//
+// The build tag 'mallocator' will also make this the default allocator.
+type Mallocator struct {
+ allocatedBytes uint64
+}
+
+func NewMallocator() *Mallocator { return &Mallocator{} }
+
+func (alloc *Mallocator) Allocate(size int) []byte {
+ // Use calloc to zero-initialize memory.
+ // > ...the current implementation may sometimes cause a runtime error if the
+ // > contents of the C memory appear to be a Go pointer. Therefore, avoid
+ // > passing uninitialized C memory to Go code if the Go code is going to store
+ // > pointer values in it. Zero out the memory in C before passing it to Go.
+ if size < 0 {
+ panic("mallocator: negative size")
+ }
+ ptr, err := C.calloc(C.size_t(size), 1)
+ if err != nil {
+ panic(err)
+ } else if ptr == nil {
+ panic("mallocator: out of memory")
+ }
+ atomic.AddUint64(&alloc.allocatedBytes, uint64(size))
+ return unsafe.Slice((*byte)(ptr), size)
+}
+
+func (alloc *Mallocator) Free(b []byte) {
+ sh := (*reflect.SliceHeader)(unsafe.Pointer(&b))
+ C.free(unsafe.Pointer(sh.Data))
+ // Subtract sh.Len via two's complement (since atomic doesn't offer subtract)
+ atomic.AddUint64(&alloc.allocatedBytes, ^(uint64(sh.Len) - 1))
+}
+
+func (alloc *Mallocator) Reallocate(size int, b []byte) []byte {
+ if size < 0 {
+ panic("mallocator: negative size")
+ }
+ sh := (*reflect.SliceHeader)(unsafe.Pointer(&b))
+ ptr, err := C.realloc_and_initialize(unsafe.Pointer(sh.Data), C.size_t(sh.Cap), C.size_t(size))
+ if err != nil {
+ panic(err)
+ } else if ptr == nil && size != 0 {
+ panic("mallocator: out of memory")
+ }
+ delta := size - len(b)
+ if delta >= 0 {
+ atomic.AddUint64(&alloc.allocatedBytes, uint64(delta))
+ } else {
+ atomic.AddUint64(&alloc.allocatedBytes, ^(uint64(-delta) - 1))
+ }
+ return unsafe.Slice((*byte)(ptr), size)
+}
+
+func (alloc *Mallocator) AllocatedBytes() int64 {
+ return int64(alloc.allocatedBytes)
+}
+
+// Duplicate interface to avoid circular import
+type TestingT interface {
+ Errorf(format string, args ...interface{})
+ Helper()
+}
+
+func (alloc *Mallocator) AssertSize(t TestingT, sz int) {
+ cur := alloc.AllocatedBytes()
+ if int64(sz) != cur {
+ t.Helper()
+ t.Errorf("invalid memory size exp=%d, got=%d", sz, cur)
+ }
+}
diff --git a/vendor/github.com/apache/arrow/go/v14/arrow/memory/memory.go b/vendor/github.com/apache/arrow/go/v14/arrow/memory/memory.go
new file mode 100644
index 000000000..43627f5ed
--- /dev/null
+++ b/vendor/github.com/apache/arrow/go/v14/arrow/memory/memory.go
@@ -0,0 +1,33 @@
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements. See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership. The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License. You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package memory
+
+var (
+ memset func(b []byte, c byte) = memory_memset_go
+)
+
+// Set assigns the value c to every element of the slice buf.
+func Set(buf []byte, c byte) {
+ memset(buf, c)
+}
+
+// memory_memset_go reference implementation
+func memory_memset_go(buf []byte, c byte) {
+ for i := 0; i < len(buf); i++ {
+ buf[i] = c
+ }
+}
diff --git a/vendor/github.com/apache/arrow/go/v14/arrow/memory/memory_amd64.go b/vendor/github.com/apache/arrow/go/v14/arrow/memory/memory_amd64.go
new file mode 100644
index 000000000..58356d648
--- /dev/null
+++ b/vendor/github.com/apache/arrow/go/v14/arrow/memory/memory_amd64.go
@@ -0,0 +1,33 @@
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements. See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership. The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License. You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// +build !noasm
+
+package memory
+
+import (
+ "golang.org/x/sys/cpu"
+)
+
+func init() {
+ if cpu.X86.HasAVX2 {
+ memset = memory_memset_avx2
+ } else if cpu.X86.HasSSE42 {
+ memset = memory_memset_sse4
+ } else {
+ memset = memory_memset_go
+ }
+}
diff --git a/vendor/github.com/apache/arrow/go/v14/arrow/memory/memory_arm64.go b/vendor/github.com/apache/arrow/go/v14/arrow/memory/memory_arm64.go
new file mode 100644
index 000000000..3db5d1101
--- /dev/null
+++ b/vendor/github.com/apache/arrow/go/v14/arrow/memory/memory_arm64.go
@@ -0,0 +1,31 @@
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements. See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership. The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License. You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// +build !noasm
+
+package memory
+
+import (
+ "golang.org/x/sys/cpu"
+)
+
+func init() {
+ if cpu.ARM64.HasASIMD {
+ memset = memory_memset_neon
+ } else {
+ memset = memory_memset_go
+ }
+}
diff --git a/vendor/github.com/apache/arrow/go/v14/arrow/memory/memory_avx2_amd64.go b/vendor/github.com/apache/arrow/go/v14/arrow/memory/memory_avx2_amd64.go
new file mode 100644
index 000000000..2bd851ea5
--- /dev/null
+++ b/vendor/github.com/apache/arrow/go/v14/arrow/memory/memory_avx2_amd64.go
@@ -0,0 +1,41 @@
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements. See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership. The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License. You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// +build !noasm
+
+package memory
+
+import "unsafe"
+
+//go:noescape
+func _memset_avx2(buf unsafe.Pointer, len, c uintptr)
+
+func memory_memset_avx2(buf []byte, c byte) {
+ if len(buf) == 0 {
+ return
+ }
+
+ var (
+ p1 = unsafe.Pointer(&buf[0])
+ p2 = uintptr(len(buf))
+ p3 = uintptr(c)
+ )
+ if len(buf) > 2000 || isMultipleOfPowerOf2(len(buf), 256) {
+ _memset_avx2(p1, p2, p3)
+ } else {
+ _memset_sse4(p1, p2, p3)
+ }
+}
diff --git a/vendor/github.com/apache/arrow/go/v14/arrow/memory/memory_avx2_amd64.s b/vendor/github.com/apache/arrow/go/v14/arrow/memory/memory_avx2_amd64.s
new file mode 100644
index 000000000..2a77807cb
--- /dev/null
+++ b/vendor/github.com/apache/arrow/go/v14/arrow/memory/memory_avx2_amd64.s
@@ -0,0 +1,85 @@
+//+build !noasm !appengine
+// AUTO-GENERATED BY C2GOASM -- DO NOT EDIT
+
+TEXT ·_memset_avx2(SB), $0-24
+
+ MOVQ buf+0(FP), DI
+ MOVQ len+8(FP), SI
+ MOVQ c+16(FP), DX
+
+ LONG $0x371c8d4c // lea r11, [rdi + rsi]
+ WORD $0x3949; BYTE $0xfb // cmp r11, rdi
+ JBE LBB0_13
+ LONG $0x80fe8148; WORD $0x0000; BYTE $0x00 // cmp rsi, 128
+ JB LBB0_12
+ WORD $0x8949; BYTE $0xf0 // mov r8, rsi
+ LONG $0x80e08349 // and r8, -128
+ WORD $0x8949; BYTE $0xf2 // mov r10, rsi
+ LONG $0x80e28349 // and r10, -128
+ JE LBB0_12
+ LONG $0xc26ef9c5 // vmovd xmm0, edx
+ LONG $0x787de2c4; BYTE $0xc0 // vpbroadcastb ymm0, xmm0
+ LONG $0x804a8d4d // lea r9, [r10 - 128]
+ WORD $0x8944; BYTE $0xc8 // mov eax, r9d
+ WORD $0xe8c1; BYTE $0x07 // shr eax, 7
+ WORD $0xc0ff // inc eax
+ LONG $0x03e08348 // and rax, 3
+ JE LBB0_4
+ WORD $0xf748; BYTE $0xd8 // neg rax
+ WORD $0xc931 // xor ecx, ecx
+
+LBB0_6:
+ LONG $0x047ffec5; BYTE $0x0f // vmovdqu yword [rdi + rcx], ymm0
+ LONG $0x447ffec5; WORD $0x200f // vmovdqu yword [rdi + rcx + 32], ymm0
+ LONG $0x447ffec5; WORD $0x400f // vmovdqu yword [rdi + rcx + 64], ymm0
+ LONG $0x447ffec5; WORD $0x600f // vmovdqu yword [rdi + rcx + 96], ymm0
+ LONG $0x80e98348 // sub rcx, -128
+ WORD $0xff48; BYTE $0xc0 // inc rax
+ JNE LBB0_6
+ JMP LBB0_7
+
+LBB0_4:
+ WORD $0xc931 // xor ecx, ecx
+
+LBB0_7:
+ LONG $0x80f98149; WORD $0x0001; BYTE $0x00 // cmp r9, 384
+ JB LBB0_10
+ WORD $0x894c; BYTE $0xd0 // mov rax, r10
+ WORD $0x2948; BYTE $0xc8 // sub rax, rcx
+ QUAD $0x000001e00f8c8d48 // lea rcx, [rdi + rcx + 480]
+
+LBB0_9:
+ QUAD $0xfffffe20817ffec5 // vmovdqu yword [rcx - 480], ymm0
+ QUAD $0xfffffe40817ffec5 // vmovdqu yword [rcx - 448], ymm0
+ QUAD $0xfffffe60817ffec5 // vmovdqu yword [rcx - 416], ymm0
+ QUAD $0xfffffe80817ffec5 // vmovdqu yword [rcx - 384], ymm0
+ QUAD $0xfffffea0817ffec5 // vmovdqu yword [rcx - 352], ymm0
+ QUAD $0xfffffec0817ffec5 // vmovdqu yword [rcx - 320], ymm0
+ QUAD $0xfffffee0817ffec5 // vmovdqu yword [rcx - 288], ymm0
+ QUAD $0xffffff00817ffec5 // vmovdqu yword [rcx - 256], ymm0
+ QUAD $0xffffff20817ffec5 // vmovdqu yword [rcx - 224], ymm0
+ QUAD $0xffffff40817ffec5 // vmovdqu yword [rcx - 192], ymm0
+ QUAD $0xffffff60817ffec5 // vmovdqu yword [rcx - 160], ymm0
+ LONG $0x417ffec5; BYTE $0x80 // vmovdqu yword [rcx - 128], ymm0
+ LONG $0x417ffec5; BYTE $0xa0 // vmovdqu yword [rcx - 96], ymm0
+ LONG $0x417ffec5; BYTE $0xc0 // vmovdqu yword [rcx - 64], ymm0
+ LONG $0x417ffec5; BYTE $0xe0 // vmovdqu yword [rcx - 32], ymm0
+ LONG $0x017ffec5 // vmovdqu yword [rcx], ymm0
+ LONG $0x00c18148; WORD $0x0002; BYTE $0x00 // add rcx, 512
+ LONG $0xfe000548; WORD $0xffff // add rax, -512
+ JNE LBB0_9
+
+LBB0_10:
+ WORD $0x3949; BYTE $0xf2 // cmp r10, rsi
+ JE LBB0_13
+ WORD $0x014c; BYTE $0xc7 // add rdi, r8
+
+LBB0_12:
+ WORD $0x1788 // mov byte [rdi], dl
+ WORD $0xff48; BYTE $0xc7 // inc rdi
+ WORD $0x3949; BYTE $0xfb // cmp r11, rdi
+ JNE LBB0_12
+
+LBB0_13:
+ VZEROUPPER
+ RET
diff --git a/vendor/github.com/apache/arrow/go/v14/arrow/memory/memory_js_wasm.go b/vendor/github.com/apache/arrow/go/v14/arrow/memory/memory_js_wasm.go
new file mode 100644
index 000000000..9b94d99ff
--- /dev/null
+++ b/vendor/github.com/apache/arrow/go/v14/arrow/memory/memory_js_wasm.go
@@ -0,0 +1,23 @@
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements. See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership. The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License. You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// +build wasm
+
+package memory
+
+func init() {
+ memset = memory_memset_go
+}
diff --git a/vendor/github.com/apache/arrow/go/v14/arrow/memory/memory_neon_arm64.go b/vendor/github.com/apache/arrow/go/v14/arrow/memory/memory_neon_arm64.go
new file mode 100644
index 000000000..6cb0400c9
--- /dev/null
+++ b/vendor/github.com/apache/arrow/go/v14/arrow/memory/memory_neon_arm64.go
@@ -0,0 +1,31 @@
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements. See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership. The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License. You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// +build !noasm
+
+package memory
+
+import "unsafe"
+
+//go:noescape
+func _memset_neon(buf unsafe.Pointer, len, c uintptr)
+
+func memory_memset_neon(buf []byte, c byte) {
+ if len(buf) == 0 {
+ return
+ }
+ _memset_neon(unsafe.Pointer(&buf[0]), uintptr(len(buf)), uintptr(c))
+}
diff --git a/vendor/github.com/apache/arrow/go/v14/arrow/memory/memory_neon_arm64.s b/vendor/github.com/apache/arrow/go/v14/arrow/memory/memory_neon_arm64.s
new file mode 100644
index 000000000..18655cc7a
--- /dev/null
+++ b/vendor/github.com/apache/arrow/go/v14/arrow/memory/memory_neon_arm64.s
@@ -0,0 +1,43 @@
+//+build !noasm !appengine
+
+// ARROW-15320:
+// (C2GOASM doesn't work correctly for Arm64)
+// Partly GENERATED BY asm2plan9s.
+
+// func _memset_neon(buf unsafe.Pointer, len, c uintptr)
+TEXT ·_memset_neon(SB), $0-24
+
+ MOVD buf+0(FP), R0
+ MOVD len+8(FP), R1
+ MOVD c+16(FP), R2
+
+ WORD $0xa9bf7bfd // stp x29, x30, [sp, #-16]!
+ WORD $0x8b010008 // add x8, x0, x1
+ WORD $0xeb00011f // cmp x8, x0
+ WORD $0x910003fd // mov x29, sp
+ BLS LBB0_7
+
+ WORD $0xf100803f // cmp x1, #32
+ BHS LBB0_3
+ WORD $0xaa0003e9 // mov x9, x0
+ JMP LBB0_6
+LBB0_3:
+ WORD $0x927be82a // and x10, x1, #0xffffffffffffffe0
+ WORD $0x4e010c40 // dup v0.16b, w2
+ WORD $0x8b0a0009 // add x9, x0, x10
+ WORD $0x9100400b // add x11, x0, #16
+ WORD $0xaa0a03ec // mov x12, x10
+LBB0_4:
+ WORD $0xad3f8160 // stp q0, q0, [x11, #-16]
+ WORD $0xf100818c // subs x12, x12, #32
+ WORD $0x9100816b // add x11, x11, #32
+ BNE LBB0_4
+ WORD $0xeb01015f // cmp x10, x1
+ BEQ LBB0_7
+LBB0_6:
+ WORD $0x38001522 // strb w2, [x9], #1
+ WORD $0xeb09011f // cmp x8, x9
+ BNE LBB0_6
+LBB0_7:
+ WORD $0xa8c17bfd // ldp x29, x30, [sp], #16
+ RET
diff --git a/vendor/github.com/apache/arrow/go/v14/arrow/memory/memory_noasm.go b/vendor/github.com/apache/arrow/go/v14/arrow/memory/memory_noasm.go
new file mode 100644
index 000000000..bf8846fa2
--- /dev/null
+++ b/vendor/github.com/apache/arrow/go/v14/arrow/memory/memory_noasm.go
@@ -0,0 +1,23 @@
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements. See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership. The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License. You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// +build noasm
+
+package memory
+
+func init() {
+ memset = memory_memset_go
+}
diff --git a/vendor/github.com/apache/arrow/go/v14/arrow/memory/memory_sse4_amd64.go b/vendor/github.com/apache/arrow/go/v14/arrow/memory/memory_sse4_amd64.go
new file mode 100644
index 000000000..716c0d270
--- /dev/null
+++ b/vendor/github.com/apache/arrow/go/v14/arrow/memory/memory_sse4_amd64.go
@@ -0,0 +1,31 @@
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements. See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership. The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License. You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// +build !noasm
+
+package memory
+
+import "unsafe"
+
+//go:noescape
+func _memset_sse4(buf unsafe.Pointer, len, c uintptr)
+
+func memory_memset_sse4(buf []byte, c byte) {
+ if len(buf) == 0 {
+ return
+ }
+ _memset_sse4(unsafe.Pointer(&buf[0]), uintptr(len(buf)), uintptr(c))
+}
diff --git a/vendor/github.com/apache/arrow/go/v14/arrow/memory/memory_sse4_amd64.s b/vendor/github.com/apache/arrow/go/v14/arrow/memory/memory_sse4_amd64.s
new file mode 100644
index 000000000..b1906f99b
--- /dev/null
+++ b/vendor/github.com/apache/arrow/go/v14/arrow/memory/memory_sse4_amd64.s
@@ -0,0 +1,84 @@
+//+build !noasm !appengine
+// AUTO-GENERATED BY C2GOASM -- DO NOT EDIT
+
+TEXT ·_memset_sse4(SB), $0-24
+
+ MOVQ buf+0(FP), DI
+ MOVQ len+8(FP), SI
+ MOVQ c+16(FP), DX
+
+ LONG $0x371c8d4c // lea r11, [rdi + rsi]
+ WORD $0x3949; BYTE $0xfb // cmp r11, rdi
+ JBE LBB0_13
+ LONG $0x20fe8348 // cmp rsi, 32
+ JB LBB0_12
+ WORD $0x8949; BYTE $0xf0 // mov r8, rsi
+ LONG $0xe0e08349 // and r8, -32
+ WORD $0x8949; BYTE $0xf2 // mov r10, rsi
+ LONG $0xe0e28349 // and r10, -32
+ JE LBB0_12
+ WORD $0xb60f; BYTE $0xc2 // movzx eax, dl
+ LONG $0xc06e0f66 // movd xmm0, eax
+ LONG $0xc9ef0f66 // pxor xmm1, xmm1
+ LONG $0x00380f66; BYTE $0xc1 // pshufb xmm0, xmm1
+ LONG $0xe04a8d4d // lea r9, [r10 - 32]
+ WORD $0x8944; BYTE $0xc9 // mov ecx, r9d
+ WORD $0xe9c1; BYTE $0x05 // shr ecx, 5
+ WORD $0xc1ff // inc ecx
+ LONG $0x07e18348 // and rcx, 7
+ JE LBB0_4
+ WORD $0xf748; BYTE $0xd9 // neg rcx
+ WORD $0xc031 // xor eax, eax
+
+LBB0_6:
+ LONG $0x047f0ff3; BYTE $0x07 // movdqu oword [rdi + rax], xmm0
+ LONG $0x447f0ff3; WORD $0x1007 // movdqu oword [rdi + rax + 16], xmm0
+ LONG $0x20c08348 // add rax, 32
+ WORD $0xff48; BYTE $0xc1 // inc rcx
+ JNE LBB0_6
+ JMP LBB0_7
+
+LBB0_4:
+ WORD $0xc031 // xor eax, eax
+
+LBB0_7:
+ LONG $0xe0f98149; WORD $0x0000; BYTE $0x00 // cmp r9, 224
+ JB LBB0_10
+ WORD $0x894c; BYTE $0xd1 // mov rcx, r10
+ WORD $0x2948; BYTE $0xc1 // sub rcx, rax
+ QUAD $0x000000f007848d48 // lea rax, [rdi + rax + 240]
+
+LBB0_9:
+ QUAD $0xffffff10807f0ff3 // movdqu oword [rax - 240], xmm0
+ QUAD $0xffffff20807f0ff3 // movdqu oword [rax - 224], xmm0
+ QUAD $0xffffff30807f0ff3 // movdqu oword [rax - 208], xmm0
+ QUAD $0xffffff40807f0ff3 // movdqu oword [rax - 192], xmm0
+ QUAD $0xffffff50807f0ff3 // movdqu oword [rax - 176], xmm0
+ QUAD $0xffffff60807f0ff3 // movdqu oword [rax - 160], xmm0
+ QUAD $0xffffff70807f0ff3 // movdqu oword [rax - 144], xmm0
+ LONG $0x407f0ff3; BYTE $0x80 // movdqu oword [rax - 128], xmm0
+ LONG $0x407f0ff3; BYTE $0x90 // movdqu oword [rax - 112], xmm0
+ LONG $0x407f0ff3; BYTE $0xa0 // movdqu oword [rax - 96], xmm0
+ LONG $0x407f0ff3; BYTE $0xb0 // movdqu oword [rax - 80], xmm0
+ LONG $0x407f0ff3; BYTE $0xc0 // movdqu oword [rax - 64], xmm0
+ LONG $0x407f0ff3; BYTE $0xd0 // movdqu oword [rax - 48], xmm0
+ LONG $0x407f0ff3; BYTE $0xe0 // movdqu oword [rax - 32], xmm0
+ LONG $0x407f0ff3; BYTE $0xf0 // movdqu oword [rax - 16], xmm0
+ LONG $0x007f0ff3 // movdqu oword [rax], xmm0
+ LONG $0x01000548; WORD $0x0000 // add rax, 256
+ LONG $0x00c18148; WORD $0xffff; BYTE $0xff // add rcx, -256
+ JNE LBB0_9
+
+LBB0_10:
+ WORD $0x3949; BYTE $0xf2 // cmp r10, rsi
+ JE LBB0_13
+ WORD $0x014c; BYTE $0xc7 // add rdi, r8
+
+LBB0_12:
+ WORD $0x1788 // mov byte [rdi], dl
+ WORD $0xff48; BYTE $0xc7 // inc rdi
+ WORD $0x3949; BYTE $0xfb // cmp r11, rdi
+ JNE LBB0_12
+
+LBB0_13:
+ RET
diff --git a/vendor/github.com/apache/arrow/go/v14/arrow/memory/util.go b/vendor/github.com/apache/arrow/go/v14/arrow/memory/util.go
new file mode 100644
index 000000000..3b0d3a5cb
--- /dev/null
+++ b/vendor/github.com/apache/arrow/go/v14/arrow/memory/util.go
@@ -0,0 +1,37 @@
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements. See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership. The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License. You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package memory
+
+import "unsafe"
+
+func roundToPowerOf2(v, round int) int {
+ forceCarry := round - 1
+ truncateMask := ^forceCarry
+ return (v + forceCarry) & truncateMask
+}
+
+func roundUpToMultipleOf64(v int) int {
+ return roundToPowerOf2(v, 64)
+}
+
+func isMultipleOfPowerOf2(v int, d int) bool {
+ return (v & (d - 1)) == 0
+}
+
+func addressOf(b []byte) uintptr {
+ return uintptr(unsafe.Pointer(&b[0]))
+}
diff --git a/vendor/github.com/apache/arrow/go/v14/arrow/numeric.schema.json b/vendor/github.com/apache/arrow/go/v14/arrow/numeric.schema.json
new file mode 100644
index 000000000..7fa2800a5
--- /dev/null
+++ b/vendor/github.com/apache/arrow/go/v14/arrow/numeric.schema.json
@@ -0,0 +1,15 @@
+{
+ "title": "templates",
+ "type": "array",
+ "items": {
+ "title": "template",
+ "type": "object",
+ "properties": {
+ "Name": {
+ "type": "string",
+ "description": "The name of the template type"
+ }
+ },
+ "required": ["Name"]
+ }
+} \ No newline at end of file
diff --git a/vendor/github.com/apache/arrow/go/v14/arrow/numeric.tmpldata b/vendor/github.com/apache/arrow/go/v14/arrow/numeric.tmpldata
new file mode 100644
index 000000000..3c2d63b7c
--- /dev/null
+++ b/vendor/github.com/apache/arrow/go/v14/arrow/numeric.tmpldata
@@ -0,0 +1,135 @@
+[
+ {
+ "Name": "Int64",
+ "name": "int64",
+ "Type": "int64",
+ "Default": "0",
+ "Size": "8",
+ "Opt": {
+ "BufferBuilder": true
+ }
+ },
+ {
+ "Name": "Uint64",
+ "name": "uint64",
+ "Type": "uint64",
+ "Default": "0",
+ "Size": "8"
+ },
+ {
+ "Name": "Float64",
+ "name": "float64",
+ "Type": "float64",
+ "Default": "0",
+ "Size": "8"
+ },
+ {
+ "Name": "Int32",
+ "name": "int32",
+ "Type": "int32",
+ "Default": "0",
+ "Size": "4",
+ "Opt": {
+ "BufferBuilder": true
+ }
+ },
+ {
+ "Name": "Uint32",
+ "name": "uint32",
+ "Type": "uint32",
+ "Default": "0",
+ "Size": "4"
+ },
+ {
+ "Name": "Float32",
+ "name": "float32",
+ "Type": "float32",
+ "Default": "0",
+ "Size": "4"
+ },
+ {
+ "Name": "Int16",
+ "name": "int16",
+ "Type": "int16",
+ "Default": "0",
+ "Size": "2"
+ },
+ {
+ "Name": "Uint16",
+ "name": "uint16",
+ "Type": "uint16",
+ "Default": "0",
+ "Size": "2"
+ },
+ {
+ "Name": "Int8",
+ "name": "int8",
+ "Type": "int8",
+ "Default": "0",
+ "Size": "1",
+ "Opt": {
+ "BufferBuilder": true
+ }
+ },
+ {
+ "Name": "Uint8",
+ "name": "uint8",
+ "Type": "uint8",
+ "Default": "0",
+ "Size": "1"
+ },
+ {
+ "Name": "Time32",
+ "name": "time32",
+ "Type": "Time32",
+ "QualifiedType": "arrow.Time32",
+ "InternalType": "int32",
+ "Default": "0",
+ "Size": "4",
+ "Opt": {
+ "Parametric": true
+ }
+ },
+ {
+ "Name": "Time64",
+ "name": "time64",
+ "Type": "Time64",
+ "QualifiedType": "arrow.Time64",
+ "InternalType": "int64",
+ "Default": "0",
+ "Size": "8",
+ "Opt": {
+ "Parametric": true
+ }
+ },
+ {
+ "Name": "Date32",
+ "name": "date32",
+ "Type": "Date32",
+ "QualifiedType": "arrow.Date32",
+ "InternalType": "int32",
+ "Default": "0",
+ "Size": "4"
+ },
+ {
+ "Name": "Date64",
+ "name": "date64",
+ "Type": "Date64",
+ "QualifiedType": "arrow.Date64",
+ "InternalType": "int64",
+ "Default": "0",
+ "Size": "8"
+ },
+ {
+ "Name": "Duration",
+ "name": "duration",
+ "Type": "Duration",
+ "QualifiedType": "arrow.Duration",
+ "InternalType": "int64",
+ "Default": "0",
+ "Size": "8",
+ "Opt": {
+ "Parametric": true
+ }
+ }
+]
diff --git a/vendor/github.com/apache/arrow/go/v14/arrow/record.go b/vendor/github.com/apache/arrow/go/v14/arrow/record.go
new file mode 100644
index 000000000..d98c7732e
--- /dev/null
+++ b/vendor/github.com/apache/arrow/go/v14/arrow/record.go
@@ -0,0 +1,49 @@
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements. See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership. The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License. You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package arrow
+
+import "github.com/apache/arrow/go/v14/internal/json"
+
+// Record is a collection of equal-length arrays matching a particular Schema.
+// Also known as a RecordBatch in the spec and in some implementations.
+//
+// It is also possible to construct a Table from a collection of Records that
+// all have the same schema.
+type Record interface {
+ json.Marshaler
+
+ Release()
+ Retain()
+
+ Schema() *Schema
+
+ NumRows() int64
+ NumCols() int64
+
+ Columns() []Array
+ Column(i int) Array
+ ColumnName(i int) string
+ SetColumn(i int, col Array) (Record, error)
+
+ // NewSlice constructs a zero-copy slice of the record with the indicated
+ // indices i and j, corresponding to array[i:j].
+ // The returned record must be Release()'d after use.
+ //
+ // NewSlice panics if the slice is outside the valid range of the record array.
+ // NewSlice panics if j < i.
+ NewSlice(i, j int64) Record
+}
diff --git a/vendor/github.com/apache/arrow/go/v14/arrow/schema.go b/vendor/github.com/apache/arrow/go/v14/arrow/schema.go
new file mode 100644
index 000000000..a7fa43413
--- /dev/null
+++ b/vendor/github.com/apache/arrow/go/v14/arrow/schema.go
@@ -0,0 +1,301 @@
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements. See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership. The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License. You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package arrow
+
+import (
+ "fmt"
+ "sort"
+ "strings"
+
+ "github.com/apache/arrow/go/v14/arrow/endian"
+)
+
+type Metadata struct {
+ keys []string
+ values []string
+}
+
+func NewMetadata(keys, values []string) Metadata {
+ if len(keys) != len(values) {
+ panic("arrow: len mismatch")
+ }
+
+ n := len(keys)
+ if n == 0 {
+ return Metadata{}
+ }
+
+ md := Metadata{
+ keys: make([]string, n),
+ values: make([]string, n),
+ }
+ copy(md.keys, keys)
+ copy(md.values, values)
+ return md
+}
+
+func MetadataFrom(kv map[string]string) Metadata {
+ md := Metadata{
+ keys: make([]string, 0, len(kv)),
+ values: make([]string, 0, len(kv)),
+ }
+ for k := range kv {
+ md.keys = append(md.keys, k)
+ }
+ sort.Strings(md.keys)
+ for _, k := range md.keys {
+ md.values = append(md.values, kv[k])
+ }
+ return md
+}
+
+func (md Metadata) Len() int { return len(md.keys) }
+func (md Metadata) Keys() []string { return md.keys }
+func (md Metadata) Values() []string { return md.values }
+func (md Metadata) ToMap() map[string]string {
+ m := make(map[string]string, len(md.keys))
+ for i := range md.keys {
+ m[md.keys[i]] = md.values[i]
+ }
+ return m
+}
+
+func (md Metadata) String() string {
+ o := new(strings.Builder)
+ fmt.Fprintf(o, "[")
+ for i := range md.keys {
+ if i > 0 {
+ fmt.Fprintf(o, ", ")
+ }
+ fmt.Fprintf(o, "%q: %q", md.keys[i], md.values[i])
+ }
+ fmt.Fprintf(o, "]")
+ return o.String()
+}
+
+// FindKey returns the index of the key-value pair with the provided key name,
+// or -1 if such a key does not exist.
+func (md Metadata) FindKey(k string) int {
+ for i, v := range md.keys {
+ if v == k {
+ return i
+ }
+ }
+ return -1
+}
+
+// GetValue returns the value associated with the provided key name.
+// If the key does not exist, the second return value is false.
+func (md Metadata) GetValue(k string) (string, bool) {
+ i := md.FindKey(k)
+ if i < 0 {
+ return "", false
+ }
+ return md.values[i], true
+}
+
+func (md Metadata) clone() Metadata {
+ if len(md.keys) == 0 {
+ return Metadata{}
+ }
+
+ o := Metadata{
+ keys: make([]string, len(md.keys)),
+ values: make([]string, len(md.values)),
+ }
+ copy(o.keys, md.keys)
+ copy(o.values, md.values)
+
+ return o
+}
+
+func (md Metadata) sortedIndices() []int {
+ idxes := make([]int, len(md.keys))
+ for i := range idxes {
+ idxes[i] = i
+ }
+
+ sort.Slice(idxes, func(i, j int) bool {
+ return md.keys[idxes[i]] < md.keys[idxes[j]]
+ })
+ return idxes
+}
+
+func (md Metadata) Equal(rhs Metadata) bool {
+ if md.Len() != rhs.Len() {
+ return false
+ }
+
+ idxes := md.sortedIndices()
+ rhsIdxes := rhs.sortedIndices()
+ for i := range idxes {
+ j := idxes[i]
+ k := rhsIdxes[i]
+ if md.keys[j] != rhs.keys[k] || md.values[j] != rhs.values[k] {
+ return false
+ }
+ }
+ return true
+}
+
+// Schema is a sequence of Field values, describing the columns of a table or
+// a record batch.
+type Schema struct {
+ fields []Field
+ index map[string][]int
+ meta Metadata
+ endianness endian.Endianness
+}
+
+// NewSchema returns a new Schema value from the slice of fields and metadata.
+//
+// NewSchema panics if there is a field with an invalid DataType.
+func NewSchema(fields []Field, metadata *Metadata) *Schema {
+ return NewSchemaWithEndian(fields, metadata, endian.NativeEndian)
+}
+
+func NewSchemaWithEndian(fields []Field, metadata *Metadata, e endian.Endianness) *Schema {
+ sc := &Schema{
+ fields: make([]Field, 0, len(fields)),
+ index: make(map[string][]int, len(fields)),
+ endianness: e,
+ }
+ if metadata != nil {
+ sc.meta = metadata.clone()
+ }
+ for i, field := range fields {
+ if field.Type == nil {
+ panic("arrow: field with nil DataType")
+ }
+ sc.fields = append(sc.fields, field)
+ sc.index[field.Name] = append(sc.index[field.Name], i)
+ }
+ return sc
+}
+
+func (sc *Schema) WithEndianness(e endian.Endianness) *Schema {
+ return NewSchemaWithEndian(sc.fields, &sc.meta, e)
+}
+
+func (sc *Schema) Endianness() endian.Endianness { return sc.endianness }
+func (sc *Schema) IsNativeEndian() bool { return sc.endianness == endian.NativeEndian }
+func (sc *Schema) Metadata() Metadata { return sc.meta }
+func (sc *Schema) Fields() []Field {
+ fields := make([]Field, len(sc.fields))
+ copy(fields, sc.fields)
+ return fields
+}
+func (sc *Schema) Field(i int) Field { return sc.fields[i] }
+func (sc *Schema) NumFields() int { return len(sc.fields) }
+
+func (sc *Schema) FieldsByName(n string) ([]Field, bool) {
+ indices, ok := sc.index[n]
+ if !ok {
+ return nil, ok
+ }
+ fields := make([]Field, 0, len(indices))
+ for _, v := range indices {
+ fields = append(fields, sc.fields[v])
+ }
+ return fields, ok
+}
+
+// FieldIndices returns the indices of the named field or nil.
+func (sc *Schema) FieldIndices(n string) []int {
+ return sc.index[n]
+}
+
+func (sc *Schema) HasField(n string) bool { return len(sc.FieldIndices(n)) > 0 }
+func (sc *Schema) HasMetadata() bool { return len(sc.meta.keys) > 0 }
+
+// Equal returns whether two schema are equal.
+// Equal does not compare the metadata.
+func (sc *Schema) Equal(o *Schema) bool {
+ switch {
+ case sc == o:
+ return true
+ case sc == nil || o == nil:
+ return false
+ case len(sc.fields) != len(o.fields):
+ return false
+ case sc.endianness != o.endianness:
+ return false
+ }
+
+ for i := range sc.fields {
+ if !sc.fields[i].Equal(o.fields[i]) {
+ return false
+ }
+ }
+ return true
+}
+
+// AddField adds a field at the given index and return a new schema.
+func (s *Schema) AddField(i int, field Field) (*Schema, error) {
+ if i < 0 || i > len(s.fields) {
+ return nil, fmt.Errorf("arrow: invalid field index %d", i)
+ }
+
+ fields := make([]Field, len(s.fields)+1)
+ copy(fields[:i], s.fields[:i])
+ fields[i] = field
+ copy(fields[i+1:], s.fields[i:])
+ return NewSchema(fields, &s.meta), nil
+}
+
+func (s *Schema) String() string {
+ o := new(strings.Builder)
+ fmt.Fprintf(o, "schema:\n fields: %d\n", len(s.Fields()))
+ for i, f := range s.Fields() {
+ if i > 0 {
+ o.WriteString("\n")
+ }
+ fmt.Fprintf(o, " - %v", f)
+ }
+ if s.endianness != endian.NativeEndian {
+ fmt.Fprintf(o, "\n endianness: %v", s.endianness)
+ }
+ if meta := s.Metadata(); meta.Len() > 0 {
+ fmt.Fprintf(o, "\n metadata: %v", meta)
+ }
+ return o.String()
+}
+
+func (s *Schema) Fingerprint() string {
+ if s == nil {
+ return ""
+ }
+
+ var b strings.Builder
+ b.WriteString("S{")
+ for _, f := range s.Fields() {
+ fieldFingerprint := f.Fingerprint()
+ if fieldFingerprint == "" {
+ return ""
+ }
+
+ b.WriteString(fieldFingerprint)
+ b.WriteByte(';')
+ }
+ if s.endianness == endian.LittleEndian {
+ b.WriteByte('L')
+ } else {
+ b.WriteByte('B')
+ }
+ b.WriteByte('}')
+ return b.String()
+}
diff --git a/vendor/github.com/apache/arrow/go/v14/arrow/table.go b/vendor/github.com/apache/arrow/go/v14/arrow/table.go
new file mode 100644
index 000000000..5a68085f8
--- /dev/null
+++ b/vendor/github.com/apache/arrow/go/v14/arrow/table.go
@@ -0,0 +1,193 @@
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements. See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership. The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License. You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package arrow
+
+import (
+ "fmt"
+ "sync/atomic"
+
+ "github.com/apache/arrow/go/v14/arrow/internal/debug"
+)
+
+// Table represents a logical sequence of chunked arrays of equal length. It is
+// similar to a Record except that the columns are ChunkedArrays instead,
+// allowing for a Table to be built up by chunks progressively whereas the columns
+// in a single Record are always each a single contiguous array.
+type Table interface {
+ Schema() *Schema
+ NumRows() int64
+ NumCols() int64
+ Column(i int) *Column
+
+ // AddColumn adds a new column to the table and a corresponding field (of the same type)
+ // to its schema, at the specified position. Returns the new table with updated columns and schema.
+ AddColumn(pos int, f Field, c Column) (Table, error)
+
+ Retain()
+ Release()
+}
+
+// Column is an immutable column data structure consisting of
+// a field (type metadata) and a chunked data array.
+//
+// To get strongly typed data from a Column, you need to iterate the
+// chunks and type assert each individual Array. For example:
+//
+// switch column.DataType().ID {
+// case arrow.INT32:
+// for _, c := range column.Data().Chunks() {
+// arr := c.(*array.Int32)
+// // do something with arr
+// }
+// case arrow.INT64:
+// for _, c := range column.Data().Chunks() {
+// arr := c.(*array.Int64)
+// // do something with arr
+// }
+// case ...
+// }
+type Column struct {
+ field Field
+ data *Chunked
+}
+
+// NewColumnFromArr is a convenience function to create a column from
+// a field and a non-chunked array.
+//
+// This provides a simple mechanism for bypassing the middle step of
+// constructing a Chunked array of one and then releasing it because
+// of the ref counting.
+func NewColumnFromArr(field Field, arr Array) Column {
+ if !TypeEqual(field.Type, arr.DataType()) {
+ panic(fmt.Errorf("%w: arrow/array: inconsistent data type %s vs %s", ErrInvalid, field.Type, arr.DataType()))
+ }
+
+ arr.Retain()
+ return Column{
+ field: field,
+ data: &Chunked{
+ refCount: 1,
+ chunks: []Array{arr},
+ length: arr.Len(),
+ nulls: arr.NullN(),
+ dtype: field.Type,
+ },
+ }
+}
+
+// NewColumn returns a column from a field and a chunked data array.
+//
+// NewColumn panics if the field's data type is inconsistent with the data type
+// of the chunked data array.
+func NewColumn(field Field, chunks *Chunked) *Column {
+ col := Column{
+ field: field,
+ data: chunks,
+ }
+ col.data.Retain()
+
+ if !TypeEqual(col.data.DataType(), col.field.Type) {
+ col.data.Release()
+ panic(fmt.Errorf("%w: arrow/array: inconsistent data type %s vs %s", ErrInvalid, col.data.DataType(), col.field.Type))
+ }
+
+ return &col
+}
+
+// Retain increases the reference count by 1.
+// Retain may be called simultaneously from multiple goroutines.
+func (col *Column) Retain() {
+ col.data.Retain()
+}
+
+// Release decreases the reference count by 1.
+// When the reference count goes to zero, the memory is freed.
+// Release may be called simultaneously from multiple goroutines.
+func (col *Column) Release() {
+ col.data.Release()
+}
+
+func (col *Column) Len() int { return col.data.Len() }
+func (col *Column) NullN() int { return col.data.NullN() }
+func (col *Column) Data() *Chunked { return col.data }
+func (col *Column) Field() Field { return col.field }
+func (col *Column) Name() string { return col.field.Name }
+func (col *Column) DataType() DataType { return col.field.Type }
+
+// Chunked manages a collection of primitives arrays as one logical large array.
+type Chunked struct {
+ refCount int64 // refCount must be first in the struct for 64 bit alignment and sync/atomic (https://github.com/golang/go/issues/37262)
+
+ chunks []Array
+
+ length int
+ nulls int
+ dtype DataType
+}
+
+// NewChunked returns a new chunked array from the slice of arrays.
+//
+// NewChunked panics if the chunks do not have the same data type.
+func NewChunked(dtype DataType, chunks []Array) *Chunked {
+ arr := &Chunked{
+ chunks: make([]Array, 0, len(chunks)),
+ refCount: 1,
+ dtype: dtype,
+ }
+ for _, chunk := range chunks {
+ if chunk == nil {
+ continue
+ }
+
+ if !TypeEqual(chunk.DataType(), dtype) {
+ panic(fmt.Errorf("%w: arrow/array: mismatch data type %s vs %s", ErrInvalid, chunk.DataType().String(), dtype.String()))
+ }
+ chunk.Retain()
+ arr.chunks = append(arr.chunks, chunk)
+ arr.length += chunk.Len()
+ arr.nulls += chunk.NullN()
+ }
+ return arr
+}
+
+// Retain increases the reference count by 1.
+// Retain may be called simultaneously from multiple goroutines.
+func (a *Chunked) Retain() {
+ atomic.AddInt64(&a.refCount, 1)
+}
+
+// Release decreases the reference count by 1.
+// When the reference count goes to zero, the memory is freed.
+// Release may be called simultaneously from multiple goroutines.
+func (a *Chunked) Release() {
+ debug.Assert(atomic.LoadInt64(&a.refCount) > 0, "too many releases")
+
+ if atomic.AddInt64(&a.refCount, -1) == 0 {
+ for _, arr := range a.chunks {
+ arr.Release()
+ }
+ a.chunks = nil
+ a.length = 0
+ a.nulls = 0
+ }
+}
+
+func (a *Chunked) Len() int { return a.length }
+func (a *Chunked) NullN() int { return a.nulls }
+func (a *Chunked) DataType() DataType { return a.dtype }
+func (a *Chunked) Chunks() []Array { return a.chunks }
+func (a *Chunked) Chunk(i int) Array { return a.chunks[i] }
diff --git a/vendor/github.com/apache/arrow/go/v14/arrow/tools.go b/vendor/github.com/apache/arrow/go/v14/arrow/tools.go
new file mode 100644
index 000000000..37b6dde36
--- /dev/null
+++ b/vendor/github.com/apache/arrow/go/v14/arrow/tools.go
@@ -0,0 +1,25 @@
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements. See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership. The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License. You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+//go:build tools
+// +build tools
+
+package tools
+
+import (
+ _ "golang.org/x/tools/cmd/goimports"
+ _ "golang.org/x/tools/cmd/stringer"
+)
diff --git a/vendor/github.com/apache/arrow/go/v14/arrow/type_string.go b/vendor/github.com/apache/arrow/go/v14/arrow/type_string.go
new file mode 100644
index 000000000..ee3ccb7ef
--- /dev/null
+++ b/vendor/github.com/apache/arrow/go/v14/arrow/type_string.go
@@ -0,0 +1,65 @@
+// Code generated by "stringer -type=Type"; DO NOT EDIT.
+
+package arrow
+
+import "strconv"
+
+func _() {
+ // An "invalid array index" compiler error signifies that the constant values have changed.
+ // Re-run the stringer command to generate them again.
+ var x [1]struct{}
+ _ = x[NULL-0]
+ _ = x[BOOL-1]
+ _ = x[UINT8-2]
+ _ = x[INT8-3]
+ _ = x[UINT16-4]
+ _ = x[INT16-5]
+ _ = x[UINT32-6]
+ _ = x[INT32-7]
+ _ = x[UINT64-8]
+ _ = x[INT64-9]
+ _ = x[FLOAT16-10]
+ _ = x[FLOAT32-11]
+ _ = x[FLOAT64-12]
+ _ = x[STRING-13]
+ _ = x[BINARY-14]
+ _ = x[FIXED_SIZE_BINARY-15]
+ _ = x[DATE32-16]
+ _ = x[DATE64-17]
+ _ = x[TIMESTAMP-18]
+ _ = x[TIME32-19]
+ _ = x[TIME64-20]
+ _ = x[INTERVAL_MONTHS-21]
+ _ = x[INTERVAL_DAY_TIME-22]
+ _ = x[DECIMAL128-23]
+ _ = x[DECIMAL256-24]
+ _ = x[LIST-25]
+ _ = x[STRUCT-26]
+ _ = x[SPARSE_UNION-27]
+ _ = x[DENSE_UNION-28]
+ _ = x[DICTIONARY-29]
+ _ = x[MAP-30]
+ _ = x[EXTENSION-31]
+ _ = x[FIXED_SIZE_LIST-32]
+ _ = x[DURATION-33]
+ _ = x[LARGE_STRING-34]
+ _ = x[LARGE_BINARY-35]
+ _ = x[LARGE_LIST-36]
+ _ = x[INTERVAL_MONTH_DAY_NANO-37]
+ _ = x[RUN_END_ENCODED-38]
+ _ = x[STRING_VIEW-39]
+ _ = x[BINARY_VIEW-40]
+ _ = x[LIST_VIEW-41]
+ _ = x[LARGE_LIST_VIEW-42]
+}
+
+const _Type_name = "NULLBOOLUINT8INT8UINT16INT16UINT32INT32UINT64INT64FLOAT16FLOAT32FLOAT64STRINGBINARYFIXED_SIZE_BINARYDATE32DATE64TIMESTAMPTIME32TIME64INTERVAL_MONTHSINTERVAL_DAY_TIMEDECIMAL128DECIMAL256LISTSTRUCTSPARSE_UNIONDENSE_UNIONDICTIONARYMAPEXTENSIONFIXED_SIZE_LISTDURATIONLARGE_STRINGLARGE_BINARYLARGE_LISTINTERVAL_MONTH_DAY_NANORUN_END_ENCODEDSTRING_VIEWBINARY_VIEWLIST_VIEWLARGE_LIST_VIEW"
+
+var _Type_index = [...]uint16{0, 4, 8, 13, 17, 23, 28, 34, 39, 45, 50, 57, 64, 71, 77, 83, 100, 106, 112, 121, 127, 133, 148, 165, 175, 185, 189, 195, 207, 218, 228, 231, 240, 255, 263, 275, 287, 297, 320, 335, 346, 357, 366, 381}
+
+func (i Type) String() string {
+ if i < 0 || i >= Type(len(_Type_index)-1) {
+ return "Type(" + strconv.FormatInt(int64(i), 10) + ")"
+ }
+ return _Type_name[_Type_index[i]:_Type_index[i+1]]
+}
diff --git a/vendor/github.com/apache/arrow/go/v14/arrow/type_traits_boolean.go b/vendor/github.com/apache/arrow/go/v14/arrow/type_traits_boolean.go
new file mode 100644
index 000000000..6a46bdec7
--- /dev/null
+++ b/vendor/github.com/apache/arrow/go/v14/arrow/type_traits_boolean.go
@@ -0,0 +1,28 @@
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements. See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership. The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License. You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package arrow
+
+import (
+ "github.com/apache/arrow/go/v14/arrow/bitutil"
+)
+
+type booleanTraits struct{}
+
+var BooleanTraits booleanTraits
+
+// BytesRequired returns the number of bytes required to store n elements in memory.
+func (booleanTraits) BytesRequired(n int) int { return bitutil.CeilByte(n) / 8 }
diff --git a/vendor/github.com/apache/arrow/go/v14/arrow/type_traits_decimal128.go b/vendor/github.com/apache/arrow/go/v14/arrow/type_traits_decimal128.go
new file mode 100644
index 000000000..d2d3aae37
--- /dev/null
+++ b/vendor/github.com/apache/arrow/go/v14/arrow/type_traits_decimal128.go
@@ -0,0 +1,63 @@
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements. See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership. The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License. You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package arrow
+
+import (
+ "reflect"
+ "unsafe"
+
+ "github.com/apache/arrow/go/v14/arrow/decimal128"
+ "github.com/apache/arrow/go/v14/arrow/endian"
+)
+
+// Decimal128 traits
+var Decimal128Traits decimal128Traits
+
+const (
+ // Decimal128SizeBytes specifies the number of bytes required to store a single decimal128 in memory
+ Decimal128SizeBytes = int(unsafe.Sizeof(decimal128.Num{}))
+)
+
+type decimal128Traits struct{}
+
+// BytesRequired returns the number of bytes required to store n elements in memory.
+func (decimal128Traits) BytesRequired(n int) int { return Decimal128SizeBytes * n }
+
+// PutValue
+func (decimal128Traits) PutValue(b []byte, v decimal128.Num) {
+ endian.Native.PutUint64(b[:8], uint64(v.LowBits()))
+ endian.Native.PutUint64(b[8:], uint64(v.HighBits()))
+}
+
+// CastFromBytes reinterprets the slice b to a slice of type uint16.
+//
+// NOTE: len(b) must be a multiple of Uint16SizeBytes.
+func (decimal128Traits) CastFromBytes(b []byte) []decimal128.Num {
+ h := (*reflect.SliceHeader)(unsafe.Pointer(&b))
+
+ return unsafe.Slice((*decimal128.Num)(unsafe.Pointer(h.Data)), cap(b)/Decimal128SizeBytes)[:len(b)/Decimal128SizeBytes]
+}
+
+// CastToBytes reinterprets the slice b to a slice of bytes.
+func (decimal128Traits) CastToBytes(b []decimal128.Num) []byte {
+ h := (*reflect.SliceHeader)(unsafe.Pointer(&b))
+
+ return unsafe.Slice((*byte)(unsafe.Pointer(h.Data)), cap(b)*Decimal128SizeBytes)[:len(b)*Decimal128SizeBytes]
+}
+
+// Copy copies src to dst.
+func (decimal128Traits) Copy(dst, src []decimal128.Num) { copy(dst, src) }
diff --git a/vendor/github.com/apache/arrow/go/v14/arrow/type_traits_decimal256.go b/vendor/github.com/apache/arrow/go/v14/arrow/type_traits_decimal256.go
new file mode 100644
index 000000000..256ed68ff
--- /dev/null
+++ b/vendor/github.com/apache/arrow/go/v14/arrow/type_traits_decimal256.go
@@ -0,0 +1,58 @@
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements. See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership. The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License. You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package arrow
+
+import (
+ "reflect"
+ "unsafe"
+
+ "github.com/apache/arrow/go/v14/arrow/decimal256"
+ "github.com/apache/arrow/go/v14/arrow/endian"
+)
+
+// Decimal256 traits
+var Decimal256Traits decimal256Traits
+
+const (
+ Decimal256SizeBytes = int(unsafe.Sizeof(decimal256.Num{}))
+)
+
+type decimal256Traits struct{}
+
+func (decimal256Traits) BytesRequired(n int) int { return Decimal256SizeBytes * n }
+
+func (decimal256Traits) PutValue(b []byte, v decimal256.Num) {
+ for i, a := range v.Array() {
+ start := i * 8
+ endian.Native.PutUint64(b[start:], a)
+ }
+}
+
+// CastFromBytes reinterprets the slice b to a slice of decimal256
+func (decimal256Traits) CastFromBytes(b []byte) []decimal256.Num {
+ h := (*reflect.SliceHeader)(unsafe.Pointer(&b))
+
+ return unsafe.Slice((*decimal256.Num)(unsafe.Pointer(h.Data)), cap(b)/Decimal256SizeBytes)[:len(b)/Decimal256SizeBytes]
+}
+
+func (decimal256Traits) CastToBytes(b []decimal256.Num) []byte {
+ h := (*reflect.SliceHeader)(unsafe.Pointer(&b))
+
+ return unsafe.Slice((*byte)(unsafe.Pointer(h.Data)), cap(b)*Decimal256SizeBytes)[:len(b)*Decimal256SizeBytes]
+}
+
+func (decimal256Traits) Copy(dst, src []decimal256.Num) { copy(dst, src) }
diff --git a/vendor/github.com/apache/arrow/go/v14/arrow/type_traits_float16.go b/vendor/github.com/apache/arrow/go/v14/arrow/type_traits_float16.go
new file mode 100644
index 000000000..c40363d37
--- /dev/null
+++ b/vendor/github.com/apache/arrow/go/v14/arrow/type_traits_float16.go
@@ -0,0 +1,62 @@
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements. See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership. The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License. You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package arrow
+
+import (
+ "reflect"
+ "unsafe"
+
+ "github.com/apache/arrow/go/v14/arrow/endian"
+ "github.com/apache/arrow/go/v14/arrow/float16"
+)
+
+// Float16 traits
+var Float16Traits float16Traits
+
+const (
+ // Float16SizeBytes specifies the number of bytes required to store a single float16 in memory
+ Float16SizeBytes = int(unsafe.Sizeof(uint16(0)))
+)
+
+type float16Traits struct{}
+
+// BytesRequired returns the number of bytes required to store n elements in memory.
+func (float16Traits) BytesRequired(n int) int { return Float16SizeBytes * n }
+
+// PutValue
+func (float16Traits) PutValue(b []byte, v float16.Num) {
+ endian.Native.PutUint16(b, uint16(v.Uint16()))
+}
+
+// CastFromBytes reinterprets the slice b to a slice of type uint16.
+//
+// NOTE: len(b) must be a multiple of Uint16SizeBytes.
+func (float16Traits) CastFromBytes(b []byte) []float16.Num {
+ h := (*reflect.SliceHeader)(unsafe.Pointer(&b))
+
+ return unsafe.Slice((*float16.Num)(unsafe.Pointer(h.Data)), cap(b)/Float16SizeBytes)[:len(b)/Float16SizeBytes]
+}
+
+// CastToBytes reinterprets the slice b to a slice of bytes.
+func (float16Traits) CastToBytes(b []float16.Num) []byte {
+ h := (*reflect.SliceHeader)(unsafe.Pointer(&b))
+
+ return unsafe.Slice((*byte)(unsafe.Pointer(h.Data)), cap(b)*Float16SizeBytes)[:len(b)*Float16SizeBytes]
+}
+
+// Copy copies src to dst.
+func (float16Traits) Copy(dst, src []float16.Num) { copy(dst, src) }
diff --git a/vendor/github.com/apache/arrow/go/v14/arrow/type_traits_interval.go b/vendor/github.com/apache/arrow/go/v14/arrow/type_traits_interval.go
new file mode 100644
index 000000000..35e605709
--- /dev/null
+++ b/vendor/github.com/apache/arrow/go/v14/arrow/type_traits_interval.go
@@ -0,0 +1,148 @@
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements. See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership. The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License. You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package arrow
+
+import (
+ "reflect"
+ "unsafe"
+
+ "github.com/apache/arrow/go/v14/arrow/endian"
+ "github.com/apache/arrow/go/v14/arrow/internal/debug"
+)
+
+var (
+ MonthIntervalTraits monthTraits
+ DayTimeIntervalTraits daytimeTraits
+ MonthDayNanoIntervalTraits monthDayNanoTraits
+)
+
+func init() {
+ debug.Assert(MonthIntervalSizeBytes == 4, "MonthIntervalSizeBytes should be 4")
+ debug.Assert(DayTimeIntervalSizeBytes == 8, "DayTimeIntervalSizeBytes should be 8")
+ debug.Assert(MonthDayNanoIntervalSizeBytes == 16, "MonthDayNanoIntervalSizeBytes should be 16")
+}
+
+// MonthInterval traits
+
+const (
+ // MonthIntervalSizeBytes specifies the number of bytes required to store a single MonthInterval in memory
+ MonthIntervalSizeBytes = int(unsafe.Sizeof(MonthInterval(0)))
+)
+
+type monthTraits struct{}
+
+// BytesRequired returns the number of bytes required to store n elements in memory.
+func (monthTraits) BytesRequired(n int) int { return MonthIntervalSizeBytes * n }
+
+// PutValue
+func (monthTraits) PutValue(b []byte, v MonthInterval) {
+ endian.Native.PutUint32(b, uint32(v))
+}
+
+// CastFromBytes reinterprets the slice b to a slice of type MonthInterval.
+//
+// NOTE: len(b) must be a multiple of MonthIntervalSizeBytes.
+func (monthTraits) CastFromBytes(b []byte) []MonthInterval {
+ h := (*reflect.SliceHeader)(unsafe.Pointer(&b))
+
+ return unsafe.Slice((*MonthInterval)(unsafe.Pointer(h.Data)), cap(b)/MonthIntervalSizeBytes)[:len(b)/MonthIntervalSizeBytes]
+}
+
+// CastToBytes reinterprets the slice b to a slice of bytes.
+func (monthTraits) CastToBytes(b []MonthInterval) []byte {
+ h := (*reflect.SliceHeader)(unsafe.Pointer(&b))
+
+ return unsafe.Slice((*byte)(unsafe.Pointer(h.Data)), cap(b)*MonthIntervalSizeBytes)[:len(b)*MonthIntervalSizeBytes]
+}
+
+// Copy copies src to dst.
+func (monthTraits) Copy(dst, src []MonthInterval) { copy(dst, src) }
+
+// DayTimeInterval traits
+
+const (
+ // DayTimeIntervalSizeBytes specifies the number of bytes required to store a single DayTimeInterval in memory
+ DayTimeIntervalSizeBytes = int(unsafe.Sizeof(DayTimeInterval{}))
+)
+
+type daytimeTraits struct{}
+
+// BytesRequired returns the number of bytes required to store n elements in memory.
+func (daytimeTraits) BytesRequired(n int) int { return DayTimeIntervalSizeBytes * n }
+
+// PutValue
+func (daytimeTraits) PutValue(b []byte, v DayTimeInterval) {
+ endian.Native.PutUint32(b[0:4], uint32(v.Days))
+ endian.Native.PutUint32(b[4:8], uint32(v.Milliseconds))
+}
+
+// CastFromBytes reinterprets the slice b to a slice of type DayTimeInterval.
+//
+// NOTE: len(b) must be a multiple of DayTimeIntervalSizeBytes.
+func (daytimeTraits) CastFromBytes(b []byte) []DayTimeInterval {
+ h := (*reflect.SliceHeader)(unsafe.Pointer(&b))
+
+ return unsafe.Slice((*DayTimeInterval)(unsafe.Pointer(h.Data)), cap(b)/DayTimeIntervalSizeBytes)[:len(b)/DayTimeIntervalSizeBytes]
+}
+
+// CastToBytes reinterprets the slice b to a slice of bytes.
+func (daytimeTraits) CastToBytes(b []DayTimeInterval) []byte {
+ h := (*reflect.SliceHeader)(unsafe.Pointer(&b))
+
+ return unsafe.Slice((*byte)(unsafe.Pointer(h.Data)), cap(b)*DayTimeIntervalSizeBytes)[:len(b)*DayTimeIntervalSizeBytes]
+}
+
+// Copy copies src to dst.
+func (daytimeTraits) Copy(dst, src []DayTimeInterval) { copy(dst, src) }
+
+// DayTimeInterval traits
+
+const (
+ // MonthDayNanoIntervalSizeBytes specifies the number of bytes required to store a single DayTimeInterval in memory
+ MonthDayNanoIntervalSizeBytes = int(unsafe.Sizeof(MonthDayNanoInterval{}))
+)
+
+type monthDayNanoTraits struct{}
+
+// BytesRequired returns the number of bytes required to store n elements in memory.
+func (monthDayNanoTraits) BytesRequired(n int) int { return MonthDayNanoIntervalSizeBytes * n }
+
+// PutValue
+func (monthDayNanoTraits) PutValue(b []byte, v MonthDayNanoInterval) {
+ endian.Native.PutUint32(b[0:4], uint32(v.Months))
+ endian.Native.PutUint32(b[4:8], uint32(v.Days))
+ endian.Native.PutUint64(b[8:], uint64(v.Nanoseconds))
+}
+
+// CastFromBytes reinterprets the slice b to a slice of type MonthDayNanoInterval.
+//
+// NOTE: len(b) must be a multiple of MonthDayNanoIntervalSizeBytes.
+func (monthDayNanoTraits) CastFromBytes(b []byte) []MonthDayNanoInterval {
+ h := (*reflect.SliceHeader)(unsafe.Pointer(&b))
+
+ return unsafe.Slice((*MonthDayNanoInterval)(unsafe.Pointer(h.Data)), cap(b)/MonthDayNanoIntervalSizeBytes)[:len(b)/MonthDayNanoIntervalSizeBytes]
+}
+
+// CastToBytes reinterprets the slice b to a slice of bytes.
+func (monthDayNanoTraits) CastToBytes(b []MonthDayNanoInterval) []byte {
+ h := (*reflect.SliceHeader)(unsafe.Pointer(&b))
+
+ return unsafe.Slice((*byte)(unsafe.Pointer(h.Data)), cap(b)*MonthDayNanoIntervalSizeBytes)[:len(b)*MonthDayNanoIntervalSizeBytes]
+}
+
+// Copy copies src to dst.
+func (monthDayNanoTraits) Copy(dst, src []MonthDayNanoInterval) { copy(dst, src) }
diff --git a/vendor/github.com/apache/arrow/go/v14/arrow/type_traits_numeric.gen.go b/vendor/github.com/apache/arrow/go/v14/arrow/type_traits_numeric.gen.go
new file mode 100644
index 000000000..6edd75291
--- /dev/null
+++ b/vendor/github.com/apache/arrow/go/v14/arrow/type_traits_numeric.gen.go
@@ -0,0 +1,585 @@
+// Code generated by type_traits_numeric.gen.go.tmpl. DO NOT EDIT.
+
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements. See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership. The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License. You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package arrow
+
+import (
+ "math"
+ "reflect"
+ "unsafe"
+
+ "github.com/apache/arrow/go/v14/arrow/endian"
+)
+
+var (
+ Int64Traits int64Traits
+ Uint64Traits uint64Traits
+ Float64Traits float64Traits
+ Int32Traits int32Traits
+ Uint32Traits uint32Traits
+ Float32Traits float32Traits
+ Int16Traits int16Traits
+ Uint16Traits uint16Traits
+ Int8Traits int8Traits
+ Uint8Traits uint8Traits
+ Time32Traits time32Traits
+ Time64Traits time64Traits
+ Date32Traits date32Traits
+ Date64Traits date64Traits
+ DurationTraits durationTraits
+)
+
+// Int64 traits
+
+const (
+ // Int64SizeBytes specifies the number of bytes required to store a single int64 in memory
+ Int64SizeBytes = int(unsafe.Sizeof(int64(0)))
+)
+
+type int64Traits struct{}
+
+// BytesRequired returns the number of bytes required to store n elements in memory.
+func (int64Traits) BytesRequired(n int) int { return Int64SizeBytes * n }
+
+// PutValue
+func (int64Traits) PutValue(b []byte, v int64) {
+ endian.Native.PutUint64(b, uint64(v))
+}
+
+// CastFromBytes reinterprets the slice b to a slice of type int64.
+//
+// NOTE: len(b) must be a multiple of Int64SizeBytes.
+func (int64Traits) CastFromBytes(b []byte) []int64 {
+ h := (*reflect.SliceHeader)(unsafe.Pointer(&b))
+
+ return unsafe.Slice((*int64)(unsafe.Pointer(h.Data)), cap(b)/Int64SizeBytes)[:len(b)/Int64SizeBytes]
+}
+
+// CastToBytes reinterprets the slice b to a slice of bytes.
+func (int64Traits) CastToBytes(b []int64) []byte {
+ h := (*reflect.SliceHeader)(unsafe.Pointer(&b))
+
+ return unsafe.Slice((*byte)(unsafe.Pointer(h.Data)), cap(b)*Int64SizeBytes)[:len(b)*Int64SizeBytes]
+}
+
+// Copy copies src to dst.
+func (int64Traits) Copy(dst, src []int64) { copy(dst, src) }
+
+// Uint64 traits
+
+const (
+ // Uint64SizeBytes specifies the number of bytes required to store a single uint64 in memory
+ Uint64SizeBytes = int(unsafe.Sizeof(uint64(0)))
+)
+
+type uint64Traits struct{}
+
+// BytesRequired returns the number of bytes required to store n elements in memory.
+func (uint64Traits) BytesRequired(n int) int { return Uint64SizeBytes * n }
+
+// PutValue
+func (uint64Traits) PutValue(b []byte, v uint64) {
+ endian.Native.PutUint64(b, uint64(v))
+}
+
+// CastFromBytes reinterprets the slice b to a slice of type uint64.
+//
+// NOTE: len(b) must be a multiple of Uint64SizeBytes.
+func (uint64Traits) CastFromBytes(b []byte) []uint64 {
+ h := (*reflect.SliceHeader)(unsafe.Pointer(&b))
+
+ return unsafe.Slice((*uint64)(unsafe.Pointer(h.Data)), cap(b)/Uint64SizeBytes)[:len(b)/Uint64SizeBytes]
+}
+
+// CastToBytes reinterprets the slice b to a slice of bytes.
+func (uint64Traits) CastToBytes(b []uint64) []byte {
+ h := (*reflect.SliceHeader)(unsafe.Pointer(&b))
+
+ return unsafe.Slice((*byte)(unsafe.Pointer(h.Data)), cap(b)*Uint64SizeBytes)[:len(b)*Uint64SizeBytes]
+}
+
+// Copy copies src to dst.
+func (uint64Traits) Copy(dst, src []uint64) { copy(dst, src) }
+
+// Float64 traits
+
+const (
+ // Float64SizeBytes specifies the number of bytes required to store a single float64 in memory
+ Float64SizeBytes = int(unsafe.Sizeof(float64(0)))
+)
+
+type float64Traits struct{}
+
+// BytesRequired returns the number of bytes required to store n elements in memory.
+func (float64Traits) BytesRequired(n int) int { return Float64SizeBytes * n }
+
+// PutValue
+func (float64Traits) PutValue(b []byte, v float64) {
+ endian.Native.PutUint64(b, math.Float64bits(v))
+}
+
+// CastFromBytes reinterprets the slice b to a slice of type float64.
+//
+// NOTE: len(b) must be a multiple of Float64SizeBytes.
+func (float64Traits) CastFromBytes(b []byte) []float64 {
+ h := (*reflect.SliceHeader)(unsafe.Pointer(&b))
+
+ return unsafe.Slice((*float64)(unsafe.Pointer(h.Data)), cap(b)/Float64SizeBytes)[:len(b)/Float64SizeBytes]
+}
+
+// CastToBytes reinterprets the slice b to a slice of bytes.
+func (float64Traits) CastToBytes(b []float64) []byte {
+ h := (*reflect.SliceHeader)(unsafe.Pointer(&b))
+
+ return unsafe.Slice((*byte)(unsafe.Pointer(h.Data)), cap(b)*Float64SizeBytes)[:len(b)*Float64SizeBytes]
+}
+
+// Copy copies src to dst.
+func (float64Traits) Copy(dst, src []float64) { copy(dst, src) }
+
+// Int32 traits
+
+const (
+ // Int32SizeBytes specifies the number of bytes required to store a single int32 in memory
+ Int32SizeBytes = int(unsafe.Sizeof(int32(0)))
+)
+
+type int32Traits struct{}
+
+// BytesRequired returns the number of bytes required to store n elements in memory.
+func (int32Traits) BytesRequired(n int) int { return Int32SizeBytes * n }
+
+// PutValue
+func (int32Traits) PutValue(b []byte, v int32) {
+ endian.Native.PutUint32(b, uint32(v))
+}
+
+// CastFromBytes reinterprets the slice b to a slice of type int32.
+//
+// NOTE: len(b) must be a multiple of Int32SizeBytes.
+func (int32Traits) CastFromBytes(b []byte) []int32 {
+ h := (*reflect.SliceHeader)(unsafe.Pointer(&b))
+
+ return unsafe.Slice((*int32)(unsafe.Pointer(h.Data)), cap(b)/Int32SizeBytes)[:len(b)/Int32SizeBytes]
+}
+
+// CastToBytes reinterprets the slice b to a slice of bytes.
+func (int32Traits) CastToBytes(b []int32) []byte {
+ h := (*reflect.SliceHeader)(unsafe.Pointer(&b))
+
+ return unsafe.Slice((*byte)(unsafe.Pointer(h.Data)), cap(b)*Int32SizeBytes)[:len(b)*Int32SizeBytes]
+}
+
+// Copy copies src to dst.
+func (int32Traits) Copy(dst, src []int32) { copy(dst, src) }
+
+// Uint32 traits
+
+const (
+ // Uint32SizeBytes specifies the number of bytes required to store a single uint32 in memory
+ Uint32SizeBytes = int(unsafe.Sizeof(uint32(0)))
+)
+
+type uint32Traits struct{}
+
+// BytesRequired returns the number of bytes required to store n elements in memory.
+func (uint32Traits) BytesRequired(n int) int { return Uint32SizeBytes * n }
+
+// PutValue
+func (uint32Traits) PutValue(b []byte, v uint32) {
+ endian.Native.PutUint32(b, uint32(v))
+}
+
+// CastFromBytes reinterprets the slice b to a slice of type uint32.
+//
+// NOTE: len(b) must be a multiple of Uint32SizeBytes.
+func (uint32Traits) CastFromBytes(b []byte) []uint32 {
+ h := (*reflect.SliceHeader)(unsafe.Pointer(&b))
+
+ return unsafe.Slice((*uint32)(unsafe.Pointer(h.Data)), cap(b)/Uint32SizeBytes)[:len(b)/Uint32SizeBytes]
+}
+
+// CastToBytes reinterprets the slice b to a slice of bytes.
+func (uint32Traits) CastToBytes(b []uint32) []byte {
+ h := (*reflect.SliceHeader)(unsafe.Pointer(&b))
+
+ return unsafe.Slice((*byte)(unsafe.Pointer(h.Data)), cap(b)*Uint32SizeBytes)[:len(b)*Uint32SizeBytes]
+}
+
+// Copy copies src to dst.
+func (uint32Traits) Copy(dst, src []uint32) { copy(dst, src) }
+
+// Float32 traits
+
+const (
+ // Float32SizeBytes specifies the number of bytes required to store a single float32 in memory
+ Float32SizeBytes = int(unsafe.Sizeof(float32(0)))
+)
+
+type float32Traits struct{}
+
+// BytesRequired returns the number of bytes required to store n elements in memory.
+func (float32Traits) BytesRequired(n int) int { return Float32SizeBytes * n }
+
+// PutValue
+func (float32Traits) PutValue(b []byte, v float32) {
+ endian.Native.PutUint32(b, math.Float32bits(v))
+}
+
+// CastFromBytes reinterprets the slice b to a slice of type float32.
+//
+// NOTE: len(b) must be a multiple of Float32SizeBytes.
+func (float32Traits) CastFromBytes(b []byte) []float32 {
+ h := (*reflect.SliceHeader)(unsafe.Pointer(&b))
+
+ return unsafe.Slice((*float32)(unsafe.Pointer(h.Data)), cap(b)/Float32SizeBytes)[:len(b)/Float32SizeBytes]
+}
+
+// CastToBytes reinterprets the slice b to a slice of bytes.
+func (float32Traits) CastToBytes(b []float32) []byte {
+ h := (*reflect.SliceHeader)(unsafe.Pointer(&b))
+
+ return unsafe.Slice((*byte)(unsafe.Pointer(h.Data)), cap(b)*Float32SizeBytes)[:len(b)*Float32SizeBytes]
+}
+
+// Copy copies src to dst.
+func (float32Traits) Copy(dst, src []float32) { copy(dst, src) }
+
+// Int16 traits
+
+const (
+ // Int16SizeBytes specifies the number of bytes required to store a single int16 in memory
+ Int16SizeBytes = int(unsafe.Sizeof(int16(0)))
+)
+
+type int16Traits struct{}
+
+// BytesRequired returns the number of bytes required to store n elements in memory.
+func (int16Traits) BytesRequired(n int) int { return Int16SizeBytes * n }
+
+// PutValue
+func (int16Traits) PutValue(b []byte, v int16) {
+ endian.Native.PutUint16(b, uint16(v))
+}
+
+// CastFromBytes reinterprets the slice b to a slice of type int16.
+//
+// NOTE: len(b) must be a multiple of Int16SizeBytes.
+func (int16Traits) CastFromBytes(b []byte) []int16 {
+ h := (*reflect.SliceHeader)(unsafe.Pointer(&b))
+
+ return unsafe.Slice((*int16)(unsafe.Pointer(h.Data)), cap(b)/Int16SizeBytes)[:len(b)/Int16SizeBytes]
+}
+
+// CastToBytes reinterprets the slice b to a slice of bytes.
+func (int16Traits) CastToBytes(b []int16) []byte {
+ h := (*reflect.SliceHeader)(unsafe.Pointer(&b))
+
+ return unsafe.Slice((*byte)(unsafe.Pointer(h.Data)), cap(b)*Int16SizeBytes)[:len(b)*Int16SizeBytes]
+}
+
+// Copy copies src to dst.
+func (int16Traits) Copy(dst, src []int16) { copy(dst, src) }
+
+// Uint16 traits
+
+const (
+ // Uint16SizeBytes specifies the number of bytes required to store a single uint16 in memory
+ Uint16SizeBytes = int(unsafe.Sizeof(uint16(0)))
+)
+
+type uint16Traits struct{}
+
+// BytesRequired returns the number of bytes required to store n elements in memory.
+func (uint16Traits) BytesRequired(n int) int { return Uint16SizeBytes * n }
+
+// PutValue
+func (uint16Traits) PutValue(b []byte, v uint16) {
+ endian.Native.PutUint16(b, uint16(v))
+}
+
+// CastFromBytes reinterprets the slice b to a slice of type uint16.
+//
+// NOTE: len(b) must be a multiple of Uint16SizeBytes.
+func (uint16Traits) CastFromBytes(b []byte) []uint16 {
+ h := (*reflect.SliceHeader)(unsafe.Pointer(&b))
+
+ return unsafe.Slice((*uint16)(unsafe.Pointer(h.Data)), cap(b)/Uint16SizeBytes)[:len(b)/Uint16SizeBytes]
+}
+
+// CastToBytes reinterprets the slice b to a slice of bytes.
+func (uint16Traits) CastToBytes(b []uint16) []byte {
+ h := (*reflect.SliceHeader)(unsafe.Pointer(&b))
+
+ return unsafe.Slice((*byte)(unsafe.Pointer(h.Data)), cap(b)*Uint16SizeBytes)[:len(b)*Uint16SizeBytes]
+}
+
+// Copy copies src to dst.
+func (uint16Traits) Copy(dst, src []uint16) { copy(dst, src) }
+
+// Int8 traits
+
+const (
+ // Int8SizeBytes specifies the number of bytes required to store a single int8 in memory
+ Int8SizeBytes = int(unsafe.Sizeof(int8(0)))
+)
+
+type int8Traits struct{}
+
+// BytesRequired returns the number of bytes required to store n elements in memory.
+func (int8Traits) BytesRequired(n int) int { return Int8SizeBytes * n }
+
+// PutValue
+func (int8Traits) PutValue(b []byte, v int8) {
+ b[0] = byte(v)
+}
+
+// CastFromBytes reinterprets the slice b to a slice of type int8.
+//
+// NOTE: len(b) must be a multiple of Int8SizeBytes.
+func (int8Traits) CastFromBytes(b []byte) []int8 {
+ h := (*reflect.SliceHeader)(unsafe.Pointer(&b))
+
+ return unsafe.Slice((*int8)(unsafe.Pointer(h.Data)), cap(b)/Int8SizeBytes)[:len(b)/Int8SizeBytes]
+}
+
+// CastToBytes reinterprets the slice b to a slice of bytes.
+func (int8Traits) CastToBytes(b []int8) []byte {
+ h := (*reflect.SliceHeader)(unsafe.Pointer(&b))
+
+ return unsafe.Slice((*byte)(unsafe.Pointer(h.Data)), cap(b)*Int8SizeBytes)[:len(b)*Int8SizeBytes]
+}
+
+// Copy copies src to dst.
+func (int8Traits) Copy(dst, src []int8) { copy(dst, src) }
+
+// Uint8 traits
+
+const (
+ // Uint8SizeBytes specifies the number of bytes required to store a single uint8 in memory
+ Uint8SizeBytes = int(unsafe.Sizeof(uint8(0)))
+)
+
+type uint8Traits struct{}
+
+// BytesRequired returns the number of bytes required to store n elements in memory.
+func (uint8Traits) BytesRequired(n int) int { return Uint8SizeBytes * n }
+
+// PutValue
+func (uint8Traits) PutValue(b []byte, v uint8) {
+ b[0] = byte(v)
+}
+
+// CastFromBytes reinterprets the slice b to a slice of type uint8.
+//
+// NOTE: len(b) must be a multiple of Uint8SizeBytes.
+func (uint8Traits) CastFromBytes(b []byte) []uint8 {
+ h := (*reflect.SliceHeader)(unsafe.Pointer(&b))
+
+ return unsafe.Slice((*uint8)(unsafe.Pointer(h.Data)), cap(b)/Uint8SizeBytes)[:len(b)/Uint8SizeBytes]
+}
+
+// CastToBytes reinterprets the slice b to a slice of bytes.
+func (uint8Traits) CastToBytes(b []uint8) []byte {
+ h := (*reflect.SliceHeader)(unsafe.Pointer(&b))
+
+ return unsafe.Slice((*byte)(unsafe.Pointer(h.Data)), cap(b)*Uint8SizeBytes)[:len(b)*Uint8SizeBytes]
+}
+
+// Copy copies src to dst.
+func (uint8Traits) Copy(dst, src []uint8) { copy(dst, src) }
+
+// Time32 traits
+
+const (
+ // Time32SizeBytes specifies the number of bytes required to store a single Time32 in memory
+ Time32SizeBytes = int(unsafe.Sizeof(Time32(0)))
+)
+
+type time32Traits struct{}
+
+// BytesRequired returns the number of bytes required to store n elements in memory.
+func (time32Traits) BytesRequired(n int) int { return Time32SizeBytes * n }
+
+// PutValue
+func (time32Traits) PutValue(b []byte, v Time32) {
+ endian.Native.PutUint32(b, uint32(v))
+}
+
+// CastFromBytes reinterprets the slice b to a slice of type Time32.
+//
+// NOTE: len(b) must be a multiple of Time32SizeBytes.
+func (time32Traits) CastFromBytes(b []byte) []Time32 {
+ h := (*reflect.SliceHeader)(unsafe.Pointer(&b))
+
+ return unsafe.Slice((*Time32)(unsafe.Pointer(h.Data)), cap(b)/Time32SizeBytes)[:len(b)/Time32SizeBytes]
+}
+
+// CastToBytes reinterprets the slice b to a slice of bytes.
+func (time32Traits) CastToBytes(b []Time32) []byte {
+ h := (*reflect.SliceHeader)(unsafe.Pointer(&b))
+
+ return unsafe.Slice((*byte)(unsafe.Pointer(h.Data)), cap(b)*Time32SizeBytes)[:len(b)*Time32SizeBytes]
+}
+
+// Copy copies src to dst.
+func (time32Traits) Copy(dst, src []Time32) { copy(dst, src) }
+
+// Time64 traits
+
+const (
+ // Time64SizeBytes specifies the number of bytes required to store a single Time64 in memory
+ Time64SizeBytes = int(unsafe.Sizeof(Time64(0)))
+)
+
+type time64Traits struct{}
+
+// BytesRequired returns the number of bytes required to store n elements in memory.
+func (time64Traits) BytesRequired(n int) int { return Time64SizeBytes * n }
+
+// PutValue
+func (time64Traits) PutValue(b []byte, v Time64) {
+ endian.Native.PutUint64(b, uint64(v))
+}
+
+// CastFromBytes reinterprets the slice b to a slice of type Time64.
+//
+// NOTE: len(b) must be a multiple of Time64SizeBytes.
+func (time64Traits) CastFromBytes(b []byte) []Time64 {
+ h := (*reflect.SliceHeader)(unsafe.Pointer(&b))
+
+ return unsafe.Slice((*Time64)(unsafe.Pointer(h.Data)), cap(b)/Time64SizeBytes)[:len(b)/Time64SizeBytes]
+}
+
+// CastToBytes reinterprets the slice b to a slice of bytes.
+func (time64Traits) CastToBytes(b []Time64) []byte {
+ h := (*reflect.SliceHeader)(unsafe.Pointer(&b))
+
+ return unsafe.Slice((*byte)(unsafe.Pointer(h.Data)), cap(b)*Time64SizeBytes)[:len(b)*Time64SizeBytes]
+}
+
+// Copy copies src to dst.
+func (time64Traits) Copy(dst, src []Time64) { copy(dst, src) }
+
+// Date32 traits
+
+const (
+ // Date32SizeBytes specifies the number of bytes required to store a single Date32 in memory
+ Date32SizeBytes = int(unsafe.Sizeof(Date32(0)))
+)
+
+type date32Traits struct{}
+
+// BytesRequired returns the number of bytes required to store n elements in memory.
+func (date32Traits) BytesRequired(n int) int { return Date32SizeBytes * n }
+
+// PutValue
+func (date32Traits) PutValue(b []byte, v Date32) {
+ endian.Native.PutUint32(b, uint32(v))
+}
+
+// CastFromBytes reinterprets the slice b to a slice of type Date32.
+//
+// NOTE: len(b) must be a multiple of Date32SizeBytes.
+func (date32Traits) CastFromBytes(b []byte) []Date32 {
+ h := (*reflect.SliceHeader)(unsafe.Pointer(&b))
+
+ return unsafe.Slice((*Date32)(unsafe.Pointer(h.Data)), cap(b)/Date32SizeBytes)[:len(b)/Date32SizeBytes]
+}
+
+// CastToBytes reinterprets the slice b to a slice of bytes.
+func (date32Traits) CastToBytes(b []Date32) []byte {
+ h := (*reflect.SliceHeader)(unsafe.Pointer(&b))
+
+ return unsafe.Slice((*byte)(unsafe.Pointer(h.Data)), cap(b)*Date32SizeBytes)[:len(b)*Date32SizeBytes]
+}
+
+// Copy copies src to dst.
+func (date32Traits) Copy(dst, src []Date32) { copy(dst, src) }
+
+// Date64 traits
+
+const (
+ // Date64SizeBytes specifies the number of bytes required to store a single Date64 in memory
+ Date64SizeBytes = int(unsafe.Sizeof(Date64(0)))
+)
+
+type date64Traits struct{}
+
+// BytesRequired returns the number of bytes required to store n elements in memory.
+func (date64Traits) BytesRequired(n int) int { return Date64SizeBytes * n }
+
+// PutValue
+func (date64Traits) PutValue(b []byte, v Date64) {
+ endian.Native.PutUint64(b, uint64(v))
+}
+
+// CastFromBytes reinterprets the slice b to a slice of type Date64.
+//
+// NOTE: len(b) must be a multiple of Date64SizeBytes.
+func (date64Traits) CastFromBytes(b []byte) []Date64 {
+ h := (*reflect.SliceHeader)(unsafe.Pointer(&b))
+
+ return unsafe.Slice((*Date64)(unsafe.Pointer(h.Data)), cap(b)/Date64SizeBytes)[:len(b)/Date64SizeBytes]
+}
+
+// CastToBytes reinterprets the slice b to a slice of bytes.
+func (date64Traits) CastToBytes(b []Date64) []byte {
+ h := (*reflect.SliceHeader)(unsafe.Pointer(&b))
+
+ return unsafe.Slice((*byte)(unsafe.Pointer(h.Data)), cap(b)*Date64SizeBytes)[:len(b)*Date64SizeBytes]
+}
+
+// Copy copies src to dst.
+func (date64Traits) Copy(dst, src []Date64) { copy(dst, src) }
+
+// Duration traits
+
+const (
+ // DurationSizeBytes specifies the number of bytes required to store a single Duration in memory
+ DurationSizeBytes = int(unsafe.Sizeof(Duration(0)))
+)
+
+type durationTraits struct{}
+
+// BytesRequired returns the number of bytes required to store n elements in memory.
+func (durationTraits) BytesRequired(n int) int { return DurationSizeBytes * n }
+
+// PutValue
+func (durationTraits) PutValue(b []byte, v Duration) {
+ endian.Native.PutUint64(b, uint64(v))
+}
+
+// CastFromBytes reinterprets the slice b to a slice of type Duration.
+//
+// NOTE: len(b) must be a multiple of DurationSizeBytes.
+func (durationTraits) CastFromBytes(b []byte) []Duration {
+ h := (*reflect.SliceHeader)(unsafe.Pointer(&b))
+
+ return unsafe.Slice((*Duration)(unsafe.Pointer(h.Data)), cap(b)/DurationSizeBytes)[:len(b)/DurationSizeBytes]
+}
+
+// CastToBytes reinterprets the slice b to a slice of bytes.
+func (durationTraits) CastToBytes(b []Duration) []byte {
+ h := (*reflect.SliceHeader)(unsafe.Pointer(&b))
+
+ return unsafe.Slice((*byte)(unsafe.Pointer(h.Data)), cap(b)*DurationSizeBytes)[:len(b)*DurationSizeBytes]
+}
+
+// Copy copies src to dst.
+func (durationTraits) Copy(dst, src []Duration) { copy(dst, src) }
diff --git a/vendor/github.com/apache/arrow/go/v14/arrow/type_traits_numeric.gen.go.tmpl b/vendor/github.com/apache/arrow/go/v14/arrow/type_traits_numeric.gen.go.tmpl
new file mode 100644
index 000000000..ffae975c1
--- /dev/null
+++ b/vendor/github.com/apache/arrow/go/v14/arrow/type_traits_numeric.gen.go.tmpl
@@ -0,0 +1,83 @@
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements. See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership. The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License. You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package arrow
+
+import (
+ "math"
+ "reflect"
+ "unsafe"
+
+ "github.com/apache/arrow/go/v14/arrow/endian"
+)
+
+var (
+{{range .In}}
+ {{.Name}}Traits {{.name}}Traits
+{{- end}}
+)
+
+{{range .In}}
+// {{.Name}} traits
+
+const (
+ // {{.Name}}SizeBytes specifies the number of bytes required to store a single {{.Type}} in memory
+ {{.Name}}SizeBytes = int(unsafe.Sizeof({{.Type}}({{.Default}})))
+)
+
+type {{.name}}Traits struct{}
+
+// BytesRequired returns the number of bytes required to store n elements in memory.
+func ({{.name}}Traits) BytesRequired(n int) int { return {{.Name}}SizeBytes * n }
+
+// PutValue
+func ({{.name}}Traits) PutValue(b []byte, v {{.Type}}) {
+{{- if eq .Type "float32" -}}
+ endian.Native.PutUint32(b, math.Float32bits(v))
+{{- else if eq .Type "float64" -}}
+ endian.Native.PutUint64(b, math.Float64bits(v))
+{{- else if eq .Size "1" -}}
+ b[0] = byte(v)
+{{- else if eq .Size "2" -}}
+ endian.Native.PutUint16(b, uint16(v))
+{{- else if eq .Size "4" -}}
+ endian.Native.PutUint32(b, uint32(v))
+{{- else if eq .Size "8" -}}
+ endian.Native.PutUint64(b, uint64(v))
+{{- else -}}
+ panic("invalid type {{.Type}}")
+{{end}}
+}
+
+// CastFromBytes reinterprets the slice b to a slice of type {{.Type}}.
+//
+// NOTE: len(b) must be a multiple of {{.Name}}SizeBytes.
+func ({{.name}}Traits) CastFromBytes(b []byte) []{{.Type}} {
+ h := (*reflect.SliceHeader)(unsafe.Pointer(&b))
+
+ return unsafe.Slice((*{{.Type}})(unsafe.Pointer(h.Data)), cap(b)/{{.Name}}SizeBytes)[:len(b)/{{.Name}}SizeBytes]
+}
+
+// CastToBytes reinterprets the slice b to a slice of bytes.
+func ({{.name}}Traits) CastToBytes(b []{{.Type}}) []byte {
+ h := (*reflect.SliceHeader)(unsafe.Pointer(&b))
+
+ return unsafe.Slice((*byte)(unsafe.Pointer(h.Data)), cap(b)*{{.Name}}SizeBytes)[:len(b)*{{.Name}}SizeBytes]
+}
+
+// Copy copies src to dst.
+func ({{.name}}Traits) Copy(dst, src []{{.Type}}) { copy(dst, src) }
+{{end}}
diff --git a/vendor/github.com/apache/arrow/go/v14/arrow/type_traits_numeric.gen_test.go.tmpl b/vendor/github.com/apache/arrow/go/v14/arrow/type_traits_numeric.gen_test.go.tmpl
new file mode 100644
index 000000000..96685f313
--- /dev/null
+++ b/vendor/github.com/apache/arrow/go/v14/arrow/type_traits_numeric.gen_test.go.tmpl
@@ -0,0 +1,61 @@
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements. See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership. The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License. You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package arrow_test
+
+import (
+ "reflect"
+ "testing"
+
+ "github.com/apache/arrow/go/v14/arrow"
+)
+
+{{- range .In}}
+
+func Test{{.Name}}Traits(t *testing.T) {
+ const N = 10
+ b1 := arrow.{{.Name}}Traits.CastToBytes([]{{or .QualifiedType .Type}}{
+ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9,
+ })
+
+ b2 := make([]byte, arrow.{{.Name}}Traits.BytesRequired(N))
+ for i := 0; i < N; i++ {
+ beg := i * arrow.{{.Name}}SizeBytes
+ end := (i + 1) * arrow.{{.Name}}SizeBytes
+ arrow.{{.Name}}Traits.PutValue(b2[beg:end], {{or .QualifiedType .Type}}(i))
+ }
+
+ if !reflect.DeepEqual(b1, b2) {
+ v1 := arrow.{{.Name}}Traits.CastFromBytes(b1)
+ v2 := arrow.{{.Name}}Traits.CastFromBytes(b2)
+ t.Fatalf("invalid values:\nb1=%v\nb2=%v\nv1=%v\nv2=%v\n", b1, b2, v1, v2)
+ }
+
+ v1 := arrow.{{.Name}}Traits.CastFromBytes(b1)
+ for i, v := range v1 {
+ if got, want := v, {{or .QualifiedType .Type}}(i); got != want {
+ t.Fatalf("invalid value[%d]. got=%v, want=%v", i, got, want)
+ }
+ }
+
+ v2 := make([]{{or .QualifiedType .Type}}, N)
+ arrow.{{.Name}}Traits.Copy(v2, v1)
+
+ if !reflect.DeepEqual(v1, v2) {
+ t.Fatalf("invalid values:\nv1=%v\nv2=%v\n", v1, v2)
+ }
+}
+{{end}}
diff --git a/vendor/github.com/apache/arrow/go/v14/arrow/type_traits_timestamp.go b/vendor/github.com/apache/arrow/go/v14/arrow/type_traits_timestamp.go
new file mode 100644
index 000000000..7c393b355
--- /dev/null
+++ b/vendor/github.com/apache/arrow/go/v14/arrow/type_traits_timestamp.go
@@ -0,0 +1,59 @@
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements. See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership. The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License. You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package arrow
+
+import (
+ "reflect"
+ "unsafe"
+
+ "github.com/apache/arrow/go/v14/arrow/endian"
+)
+
+var TimestampTraits timestampTraits
+
+const (
+ // TimestampSizeBytes specifies the number of bytes required to store a single Timestamp in memory
+ TimestampSizeBytes = int(unsafe.Sizeof(Timestamp(0)))
+)
+
+type timestampTraits struct{}
+
+// BytesRequired returns the number of bytes required to store n elements in memory.
+func (timestampTraits) BytesRequired(n int) int { return TimestampSizeBytes * n }
+
+func (timestampTraits) PutValue(b []byte, v Timestamp) {
+ endian.Native.PutUint64(b, uint64(v))
+}
+
+// CastFromBytes reinterprets the slice b to a slice of type Timestamp.
+//
+// NOTE: len(b) must be a multiple of TimestampSizeBytes.
+func (timestampTraits) CastFromBytes(b []byte) []Timestamp {
+ h := (*reflect.SliceHeader)(unsafe.Pointer(&b))
+
+ return unsafe.Slice((*Timestamp)(unsafe.Pointer(h.Data)), cap(b)/TimestampSizeBytes)[:len(b)/TimestampSizeBytes]
+}
+
+// CastToBytes reinterprets the slice b to a slice of bytes.
+func (timestampTraits) CastToBytes(b []Timestamp) []byte {
+ h := (*reflect.SliceHeader)(unsafe.Pointer(&b))
+
+ return unsafe.Slice((*byte)(unsafe.Pointer(h.Data)), cap(b)*TimestampSizeBytes)[:len(b)*TimestampSizeBytes]
+}
+
+// Copy copies src to dst.
+func (timestampTraits) Copy(dst, src []Timestamp) { copy(dst, src) }
diff --git a/vendor/github.com/apache/arrow/go/v14/arrow/unionmode_string.go b/vendor/github.com/apache/arrow/go/v14/arrow/unionmode_string.go
new file mode 100644
index 000000000..394d4f664
--- /dev/null
+++ b/vendor/github.com/apache/arrow/go/v14/arrow/unionmode_string.go
@@ -0,0 +1,25 @@
+// Code generated by "stringer -type=UnionMode -linecomment"; DO NOT EDIT.
+
+package arrow
+
+import "strconv"
+
+func _() {
+ // An "invalid array index" compiler error signifies that the constant values have changed.
+ // Re-run the stringer command to generate them again.
+ var x [1]struct{}
+ _ = x[SparseMode-2]
+ _ = x[DenseMode-3]
+}
+
+const _UnionMode_name = "SPARSEDENSE"
+
+var _UnionMode_index = [...]uint8{0, 6, 11}
+
+func (i UnionMode) String() string {
+ i -= 2
+ if i < 0 || i >= UnionMode(len(_UnionMode_index)-1) {
+ return "UnionMode(" + strconv.FormatInt(int64(i+2), 10) + ")"
+ }
+ return _UnionMode_name[_UnionMode_index[i]:_UnionMode_index[i+1]]
+}
diff --git a/vendor/github.com/apache/arrow/go/v14/internal/bitutils/bit_block_counter.go b/vendor/github.com/apache/arrow/go/v14/internal/bitutils/bit_block_counter.go
new file mode 100644
index 000000000..86818bfd4
--- /dev/null
+++ b/vendor/github.com/apache/arrow/go/v14/internal/bitutils/bit_block_counter.go
@@ -0,0 +1,452 @@
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements. See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership. The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License. You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package bitutils
+
+import (
+ "math"
+ "math/bits"
+ "unsafe"
+
+ "github.com/apache/arrow/go/v14/arrow/bitutil"
+ "github.com/apache/arrow/go/v14/internal/utils"
+)
+
+func loadWord(byt []byte) uint64 {
+ return utils.ToLEUint64(*(*uint64)(unsafe.Pointer(&byt[0])))
+}
+
+func shiftWord(current, next uint64, shift int64) uint64 {
+ if shift == 0 {
+ return current
+ }
+ return (current >> shift) | (next << (64 - shift))
+}
+
+// BitBlockCount is returned by the various bit block counter utilities
+// in order to return a length of bits and the population count of that
+// slice of bits.
+type BitBlockCount struct {
+ Len int16
+ Popcnt int16
+}
+
+// NoneSet returns true if ALL the bits were 0 in this set, ie: Popcnt == 0
+func (b BitBlockCount) NoneSet() bool {
+ return b.Popcnt == 0
+}
+
+// AllSet returns true if ALL the bits were 1 in this set, ie: Popcnt == Len
+func (b BitBlockCount) AllSet() bool {
+ return b.Len == b.Popcnt
+}
+
+// BitBlockCounter is a utility for grabbing chunks of a bitmap at a time and efficiently
+// counting the number of bits which are 1.
+type BitBlockCounter struct {
+ bitmap []byte
+ bitsRemaining int64
+ bitOffset int8
+}
+
+const (
+ wordBits int64 = 64
+ fourWordsBits int64 = wordBits * 4
+)
+
+// NewBitBlockCounter returns a BitBlockCounter for the passed bitmap starting at startOffset
+// of length nbits.
+func NewBitBlockCounter(bitmap []byte, startOffset, nbits int64) *BitBlockCounter {
+ return &BitBlockCounter{
+ bitmap: bitmap[startOffset/8:],
+ bitsRemaining: nbits,
+ bitOffset: int8(startOffset % 8),
+ }
+}
+
+// getBlockSlow is for returning a block of the requested size when there aren't
+// enough bits remaining to do a full word computation.
+func (b *BitBlockCounter) getBlockSlow(blockSize int64) BitBlockCount {
+ runlen := int16(utils.Min(b.bitsRemaining, blockSize))
+ popcnt := int16(bitutil.CountSetBits(b.bitmap, int(b.bitOffset), int(runlen)))
+ b.bitsRemaining -= int64(runlen)
+ b.bitmap = b.bitmap[runlen/8:]
+ return BitBlockCount{runlen, popcnt}
+}
+
+// NextFourWords returns the next run of available bits, usually 256. The
+// returned pair contains the size of run and the number of true values.
+// The last block will have a length less than 256 if the bitmap length
+// is not a multiple of 256, and will return 0-length blocks in subsequent
+// invocations.
+func (b *BitBlockCounter) NextFourWords() BitBlockCount {
+ if b.bitsRemaining == 0 {
+ return BitBlockCount{0, 0}
+ }
+
+ totalPopcnt := 0
+ if b.bitOffset == 0 {
+ // if we're aligned at 0 bitoffset, then we can easily just jump from
+ // word to word nice and easy.
+ if b.bitsRemaining < fourWordsBits {
+ return b.getBlockSlow(fourWordsBits)
+ }
+ totalPopcnt += bits.OnesCount64(loadWord(b.bitmap))
+ totalPopcnt += bits.OnesCount64(loadWord(b.bitmap[8:]))
+ totalPopcnt += bits.OnesCount64(loadWord(b.bitmap[16:]))
+ totalPopcnt += bits.OnesCount64(loadWord(b.bitmap[24:]))
+ } else {
+ // When the offset is > 0, we need there to be a word beyond the last
+ // aligned word in the bitmap for the bit shifting logic.
+ if b.bitsRemaining < 5*fourWordsBits-int64(b.bitOffset) {
+ return b.getBlockSlow(fourWordsBits)
+ }
+
+ current := loadWord(b.bitmap)
+ next := loadWord(b.bitmap[8:])
+ totalPopcnt += bits.OnesCount64(shiftWord(current, next, int64(b.bitOffset)))
+
+ current = next
+ next = loadWord(b.bitmap[16:])
+ totalPopcnt += bits.OnesCount64(shiftWord(current, next, int64(b.bitOffset)))
+
+ current = next
+ next = loadWord(b.bitmap[24:])
+ totalPopcnt += bits.OnesCount64(shiftWord(current, next, int64(b.bitOffset)))
+
+ current = next
+ next = loadWord(b.bitmap[32:])
+ totalPopcnt += bits.OnesCount64(shiftWord(current, next, int64(b.bitOffset)))
+ }
+ b.bitmap = b.bitmap[bitutil.BytesForBits(fourWordsBits):]
+ b.bitsRemaining -= fourWordsBits
+ return BitBlockCount{256, int16(totalPopcnt)}
+}
+
+// NextWord returns the next run of available bits, usually 64. The returned
+// pair contains the size of run and the number of true values. The last
+// block will have a length less than 64 if the bitmap length is not a
+// multiple of 64, and will return 0-length blocks in subsequent
+// invocations.
+func (b *BitBlockCounter) NextWord() BitBlockCount {
+ if b.bitsRemaining == 0 {
+ return BitBlockCount{0, 0}
+ }
+ popcnt := 0
+ if b.bitOffset == 0 {
+ if b.bitsRemaining < wordBits {
+ return b.getBlockSlow(wordBits)
+ }
+ popcnt = bits.OnesCount64(loadWord(b.bitmap))
+ } else {
+ // When the offset is > 0, we need there to be a word beyond the last
+ // aligned word in the bitmap for the bit shifting logic.
+ if b.bitsRemaining < (2*wordBits - int64(b.bitOffset)) {
+ return b.getBlockSlow(wordBits)
+ }
+ popcnt = bits.OnesCount64(shiftWord(loadWord(b.bitmap), loadWord(b.bitmap[8:]), int64(b.bitOffset)))
+ }
+ b.bitmap = b.bitmap[wordBits/8:]
+ b.bitsRemaining -= wordBits
+ return BitBlockCount{64, int16(popcnt)}
+}
+
+// OptionalBitBlockCounter is a useful counter to iterate through a possibly
+// non-existent validity bitmap to allow us to write one code path for both
+// the with-nulls and no-nulls cases without giving up a lot of performance.
+type OptionalBitBlockCounter struct {
+ hasBitmap bool
+ pos int64
+ len int64
+ counter *BitBlockCounter
+}
+
+// NewOptionalBitBlockCounter constructs and returns a new bit block counter that
+// can properly handle the case when a bitmap is null, if it is guaranteed that the
+// the bitmap is not nil, then prefer NewBitBlockCounter here.
+func NewOptionalBitBlockCounter(bitmap []byte, offset, length int64) *OptionalBitBlockCounter {
+ var counter *BitBlockCounter
+ if bitmap != nil {
+ counter = NewBitBlockCounter(bitmap, offset, length)
+ }
+ return &OptionalBitBlockCounter{
+ hasBitmap: bitmap != nil,
+ pos: 0,
+ len: length,
+ counter: counter,
+ }
+}
+
+// NextBlock returns block count for next word when the bitmap is available otherwise
+// return a block with length up to INT16_MAX when there is no validity
+// bitmap (so all the referenced values are not null).
+func (obc *OptionalBitBlockCounter) NextBlock() BitBlockCount {
+ const maxBlockSize = math.MaxInt16
+ if obc.hasBitmap {
+ block := obc.counter.NextWord()
+ obc.pos += int64(block.Len)
+ return block
+ }
+
+ blockSize := int16(utils.Min(maxBlockSize, obc.len-obc.pos))
+ obc.pos += int64(blockSize)
+ // all values are non-null
+ return BitBlockCount{blockSize, blockSize}
+}
+
+// NextWord is like NextBlock, but returns a word-sized block even when there is no
+// validity bitmap
+func (obc *OptionalBitBlockCounter) NextWord() BitBlockCount {
+ const wordsize = 64
+ if obc.hasBitmap {
+ block := obc.counter.NextWord()
+ obc.pos += int64(block.Len)
+ return block
+ }
+ blockSize := int16(utils.Min(wordsize, obc.len-obc.pos))
+ obc.pos += int64(blockSize)
+ // all values are non-null
+ return BitBlockCount{blockSize, blockSize}
+}
+
+// VisitBitBlocks is a utility for easily iterating through the blocks of bits in a bitmap,
+// calling the appropriate visitValid/visitInvalid function as we iterate through the bits.
+// visitValid is called with the bitoffset of the valid bit. Don't use this inside a tight
+// loop when performance is needed and instead prefer manually constructing these loops
+// in that scenario.
+func VisitBitBlocks(bitmap []byte, offset, length int64, visitValid func(pos int64), visitInvalid func()) {
+ counter := NewOptionalBitBlockCounter(bitmap, offset, length)
+ pos := int64(0)
+ for pos < length {
+ block := counter.NextBlock()
+ if block.AllSet() {
+ for i := 0; i < int(block.Len); i, pos = i+1, pos+1 {
+ visitValid(pos)
+ }
+ } else if block.NoneSet() {
+ for i := 0; i < int(block.Len); i, pos = i+1, pos+1 {
+ visitInvalid()
+ }
+ } else {
+ for i := 0; i < int(block.Len); i, pos = i+1, pos+1 {
+ if bitutil.BitIsSet(bitmap, int(offset+pos)) {
+ visitValid(pos)
+ } else {
+ visitInvalid()
+ }
+ }
+ }
+ }
+}
+
+// VisitBitBlocks is a utility for easily iterating through the blocks of bits in a bitmap,
+// calling the appropriate visitValid/visitInvalid function as we iterate through the bits.
+// visitValid is called with the bitoffset of the valid bit. Don't use this inside a tight
+// loop when performance is needed and instead prefer manually constructing these loops
+// in that scenario.
+func VisitBitBlocksShort(bitmap []byte, offset, length int64, visitValid func(pos int64) error, visitInvalid func() error) error {
+ counter := NewOptionalBitBlockCounter(bitmap, offset, length)
+ pos := int64(0)
+ for pos < length {
+ block := counter.NextBlock()
+ if block.AllSet() {
+ for i := 0; i < int(block.Len); i, pos = i+1, pos+1 {
+ if err := visitValid(pos); err != nil {
+ return err
+ }
+ }
+ } else if block.NoneSet() {
+ for i := 0; i < int(block.Len); i, pos = i+1, pos+1 {
+ if err := visitInvalid(); err != nil {
+ return err
+ }
+ }
+ } else {
+ for i := 0; i < int(block.Len); i, pos = i+1, pos+1 {
+ if bitutil.BitIsSet(bitmap, int(offset+pos)) {
+ if err := visitValid(pos); err != nil {
+ return err
+ }
+ } else {
+ if err := visitInvalid(); err != nil {
+ return err
+ }
+ }
+ }
+ }
+ }
+ return nil
+}
+
+func VisitTwoBitBlocks(leftBitmap, rightBitmap []byte, leftOffset, rightOffset int64, len int64, visitValid func(pos int64), visitNull func()) {
+ if leftBitmap == nil || rightBitmap == nil {
+ // at most one is present
+ if leftBitmap == nil {
+ VisitBitBlocks(rightBitmap, rightOffset, len, visitValid, visitNull)
+ } else {
+ VisitBitBlocks(leftBitmap, leftOffset, len, visitValid, visitNull)
+ }
+ return
+ }
+
+ bitCounter := NewBinaryBitBlockCounter(leftBitmap, rightBitmap, leftOffset, rightOffset, len)
+ var pos int64
+ for pos < len {
+ block := bitCounter.NextAndWord()
+ if block.AllSet() {
+ for i := 0; i < int(block.Len); i, pos = i+1, pos+1 {
+ visitValid(pos)
+ }
+ } else if block.NoneSet() {
+ for i := 0; i < int(block.Len); i, pos = i+1, pos+1 {
+ visitNull()
+ }
+ } else {
+ for i := 0; i < int(block.Len); i, pos = i+1, pos+1 {
+ if bitutil.BitIsSet(leftBitmap, int(leftOffset+pos)) && bitutil.BitIsSet(rightBitmap, int(rightOffset+pos)) {
+ visitValid(pos)
+ } else {
+ visitNull()
+ }
+ }
+ }
+ }
+}
+
+type bitOp struct {
+ bit func(bool, bool) bool
+ word func(uint64, uint64) uint64
+}
+
+var (
+ bitBlockAnd = bitOp{
+ bit: func(a, b bool) bool { return a && b },
+ word: func(a, b uint64) uint64 { return a & b },
+ }
+ bitBlockAndNot = bitOp{
+ bit: func(a, b bool) bool { return a && !b },
+ word: func(a, b uint64) uint64 { return a &^ b },
+ }
+ bitBlockOr = bitOp{
+ bit: func(a, b bool) bool { return a || b },
+ word: func(a, b uint64) uint64 { return a | b },
+ }
+ bitBlockOrNot = bitOp{
+ bit: func(a, b bool) bool { return a || !b },
+ word: func(a, b uint64) uint64 { return a | ^b },
+ }
+)
+
+// BinaryBitBlockCounter computes popcounts on the result of bitwise
+// operations between two bitmaps, 64 bits at a time. A 64-bit word
+// is loaded from each bitmap, then the popcount is computed on
+// e.g. the bitwise-and of the two words
+type BinaryBitBlockCounter struct {
+ left []byte
+ right []byte
+ bitsRemaining int64
+ leftOffset, rightOffset int64
+
+ bitsRequiredForWords int64
+}
+
+// NewBinaryBitBlockCounter constructs a binary bit block counter for
+// computing the popcounts on the results of operations between
+// the passed in bitmaps, with their respective offsets.
+func NewBinaryBitBlockCounter(left, right []byte, leftOffset, rightOffset int64, length int64) *BinaryBitBlockCounter {
+ ret := &BinaryBitBlockCounter{
+ left: left[leftOffset/8:],
+ right: right[rightOffset/8:],
+ leftOffset: leftOffset % 8,
+ rightOffset: rightOffset % 8,
+ bitsRemaining: length,
+ }
+
+ leftBitsReq := int64(64)
+ if ret.leftOffset != 0 {
+ leftBitsReq = 64 + (64 - ret.leftOffset)
+ }
+ rightBitsReq := int64(64)
+ if ret.rightOffset != 0 {
+ rightBitsReq = 64 + (64 - ret.rightOffset)
+ }
+
+ if leftBitsReq > rightBitsReq {
+ ret.bitsRequiredForWords = leftBitsReq
+ } else {
+ ret.bitsRequiredForWords = rightBitsReq
+ }
+
+ return ret
+}
+
+// NextAndWord returns the popcount of the bitwise-and of the next run
+// of available bits, up to 64. The returned pair contains the size of
+// the run and the number of true values. the last block will have a
+// length less than 64 if the bitmap length is not a multiple of 64,
+// and will return 0-length blocks in subsequent invocations
+func (b *BinaryBitBlockCounter) NextAndWord() BitBlockCount { return b.nextWord(bitBlockAnd) }
+
+// NextAndNotWord is like NextAndWord but performs x &^ y on each run
+func (b *BinaryBitBlockCounter) NextAndNotWord() BitBlockCount { return b.nextWord(bitBlockAndNot) }
+
+// NextOrWord is like NextAndWord but performs x | y on each run
+func (b *BinaryBitBlockCounter) NextOrWord() BitBlockCount { return b.nextWord(bitBlockOr) }
+
+// NextOrWord is like NextAndWord but performs x | ^y on each run
+func (b *BinaryBitBlockCounter) NextOrNotWord() BitBlockCount { return b.nextWord(bitBlockOrNot) }
+
+func (b *BinaryBitBlockCounter) nextWord(op bitOp) BitBlockCount {
+ if b.bitsRemaining == 0 {
+ return BitBlockCount{}
+ }
+
+ // when offset is >0, we need there to be a word beyond the last
+ // aligned word in the bitmap for the bit shifting logic
+ if b.bitsRemaining < b.bitsRequiredForWords {
+ runLength := int16(b.bitsRemaining)
+ if runLength > int16(wordBits) {
+ runLength = int16(wordBits)
+ }
+
+ var popcount int16
+ for i := int16(0); i < runLength; i++ {
+ if op.bit(bitutil.BitIsSet(b.left, int(b.leftOffset)+int(i)),
+ bitutil.BitIsSet(b.right, int(b.rightOffset)+int(i))) {
+ popcount++
+ }
+ }
+ // this code path should trigger _at most_ 2 times. in the "two times"
+ // case, the first time the run length will be a multiple of 8.
+ b.left = b.left[runLength/8:]
+ b.right = b.right[runLength/8:]
+ b.bitsRemaining -= int64(runLength)
+ return BitBlockCount{Len: runLength, Popcnt: popcount}
+ }
+
+ var popcount int
+ if b.leftOffset == 0 && b.rightOffset == 0 {
+ popcount = bits.OnesCount64(op.word(loadWord(b.left), loadWord(b.right)))
+ } else {
+ leftWord := shiftWord(loadWord(b.left), loadWord(b.left[8:]), b.leftOffset)
+ rightWord := shiftWord(loadWord(b.right), loadWord(b.right[8:]), b.rightOffset)
+ popcount = bits.OnesCount64(op.word(leftWord, rightWord))
+ }
+ b.left = b.left[wordBits/8:]
+ b.right = b.right[wordBits/8:]
+ b.bitsRemaining -= wordBits
+ return BitBlockCount{Len: int16(wordBits), Popcnt: int16(popcount)}
+}
diff --git a/vendor/github.com/apache/arrow/go/v14/internal/bitutils/bit_run_reader.go b/vendor/github.com/apache/arrow/go/v14/internal/bitutils/bit_run_reader.go
new file mode 100644
index 000000000..a1686a490
--- /dev/null
+++ b/vendor/github.com/apache/arrow/go/v14/internal/bitutils/bit_run_reader.go
@@ -0,0 +1,151 @@
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements. See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership. The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License. You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package bitutils
+
+import (
+ "encoding/binary"
+ "fmt"
+ "math/bits"
+ "unsafe"
+
+ "github.com/apache/arrow/go/v14/arrow"
+ "github.com/apache/arrow/go/v14/arrow/bitutil"
+ "github.com/apache/arrow/go/v14/internal/utils"
+)
+
+// BitRun represents a run of bits with the same value of length Len
+// with Set representing if the group of bits were 1 or 0.
+type BitRun struct {
+ Len int64
+ Set bool
+}
+
+// BitRunReader is an interface that is usable by multiple callers to provide
+// multiple types of bit run readers such as a reverse reader and so on.
+//
+// It's a convenience interface for counting contiguous set/unset bits in a bitmap.
+// In places where BitBlockCounter can be used, then it would be preferred to use that
+// as it would be faster than using BitRunReader.
+type BitRunReader interface {
+ NextRun() BitRun
+}
+
+func (b BitRun) String() string {
+ return fmt.Sprintf("{Length: %d, set=%t}", b.Len, b.Set)
+}
+
+type bitRunReader struct {
+ bitmap []byte
+ pos int64
+ length int64
+ word uint64
+ curRunBitSet bool
+}
+
+// NewBitRunReader returns a reader for the given bitmap, offset and length that
+// grabs runs of the same value bit at a time for easy iteration.
+func NewBitRunReader(bitmap []byte, offset int64, length int64) BitRunReader {
+ ret := &bitRunReader{
+ bitmap: bitmap[offset/8:],
+ pos: offset % 8,
+ length: (offset % 8) + length,
+ }
+
+ if length == 0 {
+ return ret
+ }
+
+ ret.curRunBitSet = bitutil.BitIsNotSet(bitmap, int(offset))
+ bitsRemaining := length + ret.pos
+ ret.loadWord(bitsRemaining)
+ ret.word = ret.word &^ LeastSignificantBitMask(ret.pos)
+ return ret
+}
+
+// NextRun returns a new BitRun containing the number of contiguous bits with the
+// same value. Len == 0 indicates the end of the bitmap.
+func (b *bitRunReader) NextRun() BitRun {
+ if b.pos >= b.length {
+ return BitRun{0, false}
+ }
+
+ // This implementation relies on a efficient implementations of
+ // CountTrailingZeros and assumes that runs are more often then
+ // not. The logic is to incrementally find the next bit change
+ // from the current position. This is done by zeroing all
+ // bits in word_ up to position_ and using the TrailingZeroCount
+ // to find the index of the next set bit.
+
+ // The runs alternate on each call, so flip the bit.
+ b.curRunBitSet = !b.curRunBitSet
+
+ start := b.pos
+ startOffset := start & 63
+
+ // Invert the word for proper use of CountTrailingZeros and
+ // clear bits so CountTrailingZeros can do it magic.
+ b.word = ^b.word &^ LeastSignificantBitMask(startOffset)
+
+ // Go forward until the next change from unset to set.
+ newbits := int64(bits.TrailingZeros64(b.word)) - startOffset
+ b.pos += newbits
+
+ if IsMultipleOf64(b.pos) && b.pos < b.length {
+ b.advanceUntilChange()
+ }
+ return BitRun{b.pos - start, b.curRunBitSet}
+}
+
+func (b *bitRunReader) advanceUntilChange() {
+ newbits := int64(0)
+ for {
+ b.bitmap = b.bitmap[arrow.Uint64SizeBytes:]
+ b.loadNextWord()
+ newbits = int64(bits.TrailingZeros64(b.word))
+ b.pos += newbits
+ if !IsMultipleOf64(b.pos) || b.pos >= b.length || newbits <= 0 {
+ break
+ }
+ }
+}
+
+func (b *bitRunReader) loadNextWord() {
+ b.loadWord(b.length - b.pos)
+}
+
+func (b *bitRunReader) loadWord(bitsRemaining int64) {
+ b.word = 0
+ if bitsRemaining >= 64 {
+ b.word = binary.LittleEndian.Uint64(b.bitmap)
+ } else {
+ nbytes := bitutil.BytesForBits(bitsRemaining)
+ wordptr := (*(*[8]byte)(unsafe.Pointer(&b.word)))[:]
+ copy(wordptr, b.bitmap[:nbytes])
+
+ bitutil.SetBitTo(wordptr, int(bitsRemaining), bitutil.BitIsNotSet(wordptr, int(bitsRemaining-1)))
+ // reset the value to little endian for big endian architectures
+ b.word = utils.ToLEUint64(b.word)
+ }
+
+ // Two cases:
+ // 1. For unset, CountTrailingZeros works naturally so we don't
+ // invert the word.
+ // 2. Otherwise invert so we can use CountTrailingZeros.
+ if b.curRunBitSet {
+ b.word = ^b.word
+ }
+}
diff --git a/vendor/github.com/apache/arrow/go/v14/internal/bitutils/bit_set_run_reader.go b/vendor/github.com/apache/arrow/go/v14/internal/bitutils/bit_set_run_reader.go
new file mode 100644
index 000000000..a2269ffec
--- /dev/null
+++ b/vendor/github.com/apache/arrow/go/v14/internal/bitutils/bit_set_run_reader.go
@@ -0,0 +1,361 @@
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements. See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership. The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License. You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package bitutils
+
+import (
+ "encoding/binary"
+ "math/bits"
+
+ "github.com/apache/arrow/go/v14/arrow/bitutil"
+ "github.com/apache/arrow/go/v14/internal/utils"
+)
+
+// IsMultipleOf64 returns whether v is a multiple of 64.
+func IsMultipleOf64(v int64) bool { return v&63 == 0 }
+
+// LeastSignificantBitMask returns a bit mask to return the least significant
+// bits for a value starting from the bit index passed in. ie: if you want a
+// mask for the 4 least significant bits, you call LeastSignificantBitMask(4)
+func LeastSignificantBitMask(index int64) uint64 {
+ return (uint64(1) << index) - 1
+}
+
+// SetBitRun describes a run of contiguous set bits in a bitmap with Pos being
+// the starting position of the run and Length being the number of bits.
+type SetBitRun struct {
+ Pos int64
+ Length int64
+}
+
+// AtEnd returns true if this bit run is the end of the set by checking
+// that the length is 0.
+func (s SetBitRun) AtEnd() bool {
+ return s.Length == 0
+}
+
+// Equal returns whether rhs is the same run as s
+func (s SetBitRun) Equal(rhs SetBitRun) bool {
+ return s.Pos == rhs.Pos && s.Length == rhs.Length
+}
+
+// SetBitRunReader is an interface for reading groups of contiguous set bits
+// from a bitmap. The interface allows us to create different reader implementations
+// that share the same interface easily such as a reverse set reader.
+type SetBitRunReader interface {
+ // NextRun will return the next run of contiguous set bits in the bitmap
+ NextRun() SetBitRun
+ // Reset allows re-using the reader by providing a new bitmap, offset and length. The arguments
+ // match the New function for the reader being used.
+ Reset([]byte, int64, int64)
+ // VisitSetBitRuns calls visitFn for each set in a loop starting from the current position
+ // it's roughly equivalent to simply looping, calling NextRun and calling visitFn on the run
+ // for each run.
+ VisitSetBitRuns(visitFn VisitFn) error
+}
+
+type baseSetBitRunReader struct {
+ bitmap []byte
+ pos int64
+ length int64
+ remaining int64
+ curWord uint64
+ curNumBits int32
+ reversed bool
+
+ firstBit uint64
+}
+
+// NewSetBitRunReader returns a SetBitRunReader for the bitmap starting at startOffset which will read
+// numvalues bits.
+func NewSetBitRunReader(validBits []byte, startOffset, numValues int64) SetBitRunReader {
+ return newBaseSetBitRunReader(validBits, startOffset, numValues, false)
+}
+
+// NewReverseSetBitRunReader returns a SetBitRunReader like NewSetBitRunReader, except it will
+// return runs starting from the end of the bitmap until it reaches startOffset rather than starting
+// at startOffset and reading from there. The SetBitRuns will still operate the same, so Pos
+// will still be the position of the "left-most" bit of the run or the "start" of the run. It
+// just returns runs starting from the end instead of starting from the beginning.
+func NewReverseSetBitRunReader(validBits []byte, startOffset, numValues int64) SetBitRunReader {
+ return newBaseSetBitRunReader(validBits, startOffset, numValues, true)
+}
+
+func newBaseSetBitRunReader(bitmap []byte, startOffset, length int64, reverse bool) *baseSetBitRunReader {
+ ret := &baseSetBitRunReader{reversed: reverse}
+ ret.Reset(bitmap, startOffset, length)
+ return ret
+}
+
+func (br *baseSetBitRunReader) Reset(bitmap []byte, startOffset, length int64) {
+ br.bitmap = bitmap
+ br.length = length
+ br.remaining = length
+ br.curNumBits = 0
+ br.curWord = 0
+
+ if !br.reversed {
+ br.pos = startOffset / 8
+ br.firstBit = 1
+
+ bitOffset := int8(startOffset % 8)
+ if length > 0 && bitOffset != 0 {
+ br.curNumBits = int32(utils.MinInt(int(length), int(8-bitOffset)))
+ br.curWord = br.loadPartial(bitOffset, int64(br.curNumBits))
+ }
+ return
+ }
+
+ br.pos = (startOffset + length) / 8
+ br.firstBit = uint64(0x8000000000000000)
+ endBitOffset := int8((startOffset + length) % 8)
+ if length > 0 && endBitOffset != 0 {
+ br.pos++
+ br.curNumBits = int32(utils.MinInt(int(length), int(endBitOffset)))
+ br.curWord = br.loadPartial(8-endBitOffset, int64(br.curNumBits))
+ }
+}
+
+func (br *baseSetBitRunReader) consumeBits(word uint64, nbits int32) uint64 {
+ if br.reversed {
+ return word << nbits
+ }
+ return word >> nbits
+}
+
+func (br *baseSetBitRunReader) countFirstZeros(word uint64) int32 {
+ if br.reversed {
+ return int32(bits.LeadingZeros64(word))
+ }
+ return int32(bits.TrailingZeros64(word))
+}
+
+func (br *baseSetBitRunReader) loadPartial(bitOffset int8, numBits int64) uint64 {
+ var word [8]byte
+ nbytes := bitutil.BytesForBits(numBits)
+ if br.reversed {
+ br.pos -= nbytes
+ copy(word[8-nbytes:], br.bitmap[br.pos:br.pos+nbytes])
+ return (binary.LittleEndian.Uint64(word[:]) << bitOffset) &^ LeastSignificantBitMask(64-numBits)
+ }
+
+ copy(word[:], br.bitmap[br.pos:br.pos+nbytes])
+ br.pos += nbytes
+ return (binary.LittleEndian.Uint64(word[:]) >> bitOffset) & LeastSignificantBitMask(numBits)
+}
+
+func (br *baseSetBitRunReader) findCurrentRun() SetBitRun {
+ nzeros := br.countFirstZeros(br.curWord)
+ if nzeros >= br.curNumBits {
+ br.remaining -= int64(br.curNumBits)
+ br.curWord = 0
+ br.curNumBits = 0
+ return SetBitRun{0, 0}
+ }
+
+ br.curWord = br.consumeBits(br.curWord, nzeros)
+ br.curNumBits -= nzeros
+ br.remaining -= int64(nzeros)
+ pos := br.position()
+
+ numOnes := br.countFirstZeros(^br.curWord)
+ br.curWord = br.consumeBits(br.curWord, numOnes)
+ br.curNumBits -= numOnes
+ br.remaining -= int64(numOnes)
+ return SetBitRun{pos, int64(numOnes)}
+}
+
+func (br *baseSetBitRunReader) position() int64 {
+ if br.reversed {
+ return br.remaining
+ }
+ return br.length - br.remaining
+}
+
+func (br *baseSetBitRunReader) adjustRun(run SetBitRun) SetBitRun {
+ if br.reversed {
+ run.Pos -= run.Length
+ }
+ return run
+}
+
+func (br *baseSetBitRunReader) loadFull() (ret uint64) {
+ if br.reversed {
+ br.pos -= 8
+ }
+ ret = binary.LittleEndian.Uint64(br.bitmap[br.pos : br.pos+8])
+ if !br.reversed {
+ br.pos += 8
+ }
+ return
+}
+
+func (br *baseSetBitRunReader) skipNextZeros() {
+ for br.remaining >= 64 {
+ br.curWord = br.loadFull()
+ nzeros := br.countFirstZeros(br.curWord)
+ if nzeros < 64 {
+ br.curWord = br.consumeBits(br.curWord, nzeros)
+ br.curNumBits = 64 - nzeros
+ br.remaining -= int64(nzeros)
+ return
+ }
+ br.remaining -= 64
+ }
+ // run of zeros continues in last bitmap word
+ if br.remaining > 0 {
+ br.curWord = br.loadPartial(0, br.remaining)
+ br.curNumBits = int32(br.remaining)
+ nzeros := int32(utils.MinInt(int(br.curNumBits), int(br.countFirstZeros(br.curWord))))
+ br.curWord = br.consumeBits(br.curWord, nzeros)
+ br.curNumBits -= nzeros
+ br.remaining -= int64(nzeros)
+ }
+}
+
+func (br *baseSetBitRunReader) countNextOnes() int64 {
+ var length int64
+ if ^br.curWord != 0 {
+ numOnes := br.countFirstZeros(^br.curWord)
+ br.remaining -= int64(numOnes)
+ br.curWord = br.consumeBits(br.curWord, numOnes)
+ br.curNumBits -= numOnes
+ if br.curNumBits != 0 {
+ return int64(numOnes)
+ }
+ length = int64(numOnes)
+ } else {
+ br.remaining -= 64
+ br.curNumBits = 0
+ length = 64
+ }
+
+ for br.remaining >= 64 {
+ br.curWord = br.loadFull()
+ numOnes := br.countFirstZeros(^br.curWord)
+ length += int64(numOnes)
+ br.remaining -= int64(numOnes)
+ if numOnes < 64 {
+ br.curWord = br.consumeBits(br.curWord, numOnes)
+ br.curNumBits = 64 - numOnes
+ return length
+ }
+ }
+
+ if br.remaining > 0 {
+ br.curWord = br.loadPartial(0, br.remaining)
+ br.curNumBits = int32(br.remaining)
+ numOnes := br.countFirstZeros(^br.curWord)
+ br.curWord = br.consumeBits(br.curWord, numOnes)
+ br.curNumBits -= numOnes
+ br.remaining -= int64(numOnes)
+ length += int64(numOnes)
+ }
+ return length
+}
+
+func (br *baseSetBitRunReader) NextRun() SetBitRun {
+ var (
+ pos int64 = 0
+ length int64 = 0
+ )
+
+ if br.curNumBits != 0 {
+ run := br.findCurrentRun()
+ if run.Length != 0 && br.curNumBits != 0 {
+ return br.adjustRun(run)
+ }
+ pos = run.Pos
+ length = run.Length
+ }
+
+ if length == 0 {
+ // we didn't get any ones in curWord, so we can skip any zeros
+ // in the following words
+ br.skipNextZeros()
+ if br.remaining == 0 {
+ return SetBitRun{0, 0}
+ }
+ pos = br.position()
+ } else if br.curNumBits == 0 {
+ if br.remaining >= 64 {
+ br.curWord = br.loadFull()
+ br.curNumBits = 64
+ } else if br.remaining > 0 {
+ br.curWord = br.loadPartial(0, br.remaining)
+ br.curNumBits = int32(br.remaining)
+ } else {
+ return br.adjustRun(SetBitRun{pos, length})
+ }
+ if (br.curWord & br.firstBit) == 0 {
+ return br.adjustRun(SetBitRun{pos, length})
+ }
+ }
+
+ length += br.countNextOnes()
+ return br.adjustRun(SetBitRun{pos, length})
+}
+
+// VisitFn is a callback function for visiting runs of contiguous bits
+type VisitFn func(pos int64, length int64) error
+
+func (br *baseSetBitRunReader) VisitSetBitRuns(visitFn VisitFn) error {
+ for {
+ run := br.NextRun()
+ if run.Length == 0 {
+ break
+ }
+
+ if err := visitFn(run.Pos, run.Length); err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+// VisitSetBitRuns is just a convenience function for calling NewSetBitRunReader and then VisitSetBitRuns
+func VisitSetBitRuns(bitmap []byte, bitmapOffset int64, length int64, visitFn VisitFn) error {
+ if bitmap == nil {
+ return visitFn(0, length)
+ }
+ rdr := NewSetBitRunReader(bitmap, bitmapOffset, length)
+ for {
+ run := rdr.NextRun()
+ if run.Length == 0 {
+ break
+ }
+
+ if err := visitFn(run.Pos, run.Length); err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+func VisitSetBitRunsNoErr(bitmap []byte, bitmapOffset int64, length int64, visitFn func(pos, length int64)) {
+ if bitmap == nil {
+ visitFn(0, length)
+ return
+ }
+ rdr := NewSetBitRunReader(bitmap, bitmapOffset, length)
+ for {
+ run := rdr.NextRun()
+ if run.Length == 0 {
+ break
+ }
+ visitFn(run.Pos, run.Length)
+ }
+}
diff --git a/vendor/github.com/apache/arrow/go/v14/internal/bitutils/bitmap_generate.go b/vendor/github.com/apache/arrow/go/v14/internal/bitutils/bitmap_generate.go
new file mode 100644
index 000000000..78219d812
--- /dev/null
+++ b/vendor/github.com/apache/arrow/go/v14/internal/bitutils/bitmap_generate.go
@@ -0,0 +1,109 @@
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements. See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership. The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License. You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package bitutils
+
+import "github.com/apache/arrow/go/v14/arrow/bitutil"
+
+// GenerateBits writes sequential bits to a bitmap. Bits preceding the
+// initial start offset are preserved, bits following the bitmap may
+// get clobbered.
+func GenerateBits(bitmap []byte, start, length int64, g func() bool) {
+ if length == 0 {
+ return
+ }
+
+ cur := bitmap[start/8:]
+ mask := bitutil.BitMask[start%8]
+ curbyte := cur[0] & bitutil.PrecedingBitmask[start%8]
+
+ for i := int64(0); i < length; i++ {
+ bit := g()
+ if bit {
+ curbyte = curbyte | mask
+ }
+ mask <<= 1
+ if mask == 0 {
+ mask = 1
+ cur[0] = curbyte
+ cur = cur[1:]
+ curbyte = 0
+ }
+ }
+
+ if mask != 1 {
+ cur[0] = curbyte
+ }
+}
+
+// GenerateBitsUnrolled is like GenerateBits but unrolls its main loop for
+// higher performance.
+//
+// See the benchmarks for evidence.
+func GenerateBitsUnrolled(bitmap []byte, start, length int64, g func() bool) {
+ if length == 0 {
+ return
+ }
+
+ var (
+ curbyte byte
+ cur = bitmap[start/8:]
+ startBitOffset uint64 = uint64(start % 8)
+ mask = bitutil.BitMask[startBitOffset]
+ remaining = length
+ )
+
+ if mask != 0x01 {
+ curbyte = cur[0] & bitutil.PrecedingBitmask[startBitOffset]
+ for mask != 0 && remaining > 0 {
+ if g() {
+ curbyte |= mask
+ }
+ mask <<= 1
+ remaining--
+ }
+ cur[0] = curbyte
+ cur = cur[1:]
+ }
+
+ var outResults [8]byte
+ for remainingBytes := remaining / 8; remainingBytes > 0; remainingBytes-- {
+ for i := 0; i < 8; i++ {
+ if g() {
+ outResults[i] = 1
+ } else {
+ outResults[i] = 0
+ }
+ }
+ cur[0] = (outResults[0] | outResults[1]<<1 | outResults[2]<<2 |
+ outResults[3]<<3 | outResults[4]<<4 | outResults[5]<<5 |
+ outResults[6]<<6 | outResults[7]<<7)
+ cur = cur[1:]
+ }
+
+ remainingBits := remaining % 8
+ if remainingBits > 0 {
+ curbyte = 0
+ mask = 0x01
+ for ; remainingBits > 0; remainingBits-- {
+ if g() {
+ curbyte |= mask
+ }
+ mask <<= 1
+ }
+ cur[0] = curbyte
+ }
+}
diff --git a/vendor/github.com/apache/arrow/go/v14/internal/hashing/hash_funcs.go b/vendor/github.com/apache/arrow/go/v14/internal/hashing/hash_funcs.go
new file mode 100644
index 000000000..c1bdfeb6d
--- /dev/null
+++ b/vendor/github.com/apache/arrow/go/v14/internal/hashing/hash_funcs.go
@@ -0,0 +1,90 @@
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements. See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership. The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License. You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package hashing
+
+import (
+ "math/bits"
+ "unsafe"
+
+ "github.com/zeebo/xxh3"
+)
+
+func hashInt(val uint64, alg uint64) uint64 {
+ // Two of xxhash's prime multipliers (which are chosen for their
+ // bit dispersion properties)
+ var multipliers = [2]uint64{11400714785074694791, 14029467366897019727}
+ // Multiplying by the prime number mixes the low bits into the high bits,
+ // then byte-swapping (which is a single CPU instruction) allows the
+ // combined high and low bits to participate in the initial hash table index.
+ return bits.ReverseBytes64(multipliers[alg] * val)
+}
+
+func hashFloat32(val float32, alg uint64) uint64 {
+ // grab the raw byte pattern of the
+ bt := *(*[4]byte)(unsafe.Pointer(&val))
+ x := uint64(*(*uint32)(unsafe.Pointer(&bt[0])))
+ hx := hashInt(x, alg)
+ hy := hashInt(x, alg^1)
+ return 4 ^ hx ^ hy
+}
+
+func hashFloat64(val float64, alg uint64) uint64 {
+ bt := *(*[8]byte)(unsafe.Pointer(&val))
+ hx := hashInt(uint64(*(*uint32)(unsafe.Pointer(&bt[4]))), alg)
+ hy := hashInt(uint64(*(*uint32)(unsafe.Pointer(&bt[0]))), alg^1)
+ return 8 ^ hx ^ hy
+}
+
+// prime constants used for slightly increasing the hash quality further
+var exprimes = [2]uint64{1609587929392839161, 9650029242287828579}
+
+// for smaller amounts of bytes this is faster than even calling into
+// xxh3 to do the Hash, so we specialize in order to get the benefits
+// of that performance.
+func Hash(b []byte, alg uint64) uint64 {
+ n := uint32(len(b))
+ if n <= 16 {
+ switch {
+ case n > 8:
+ // 8 < length <= 16
+ // apply same principle as above, but as two 64-bit ints
+ x := *(*uint64)(unsafe.Pointer(&b[n-8]))
+ y := *(*uint64)(unsafe.Pointer(&b[0]))
+ hx := hashInt(x, alg)
+ hy := hashInt(y, alg^1)
+ return uint64(n) ^ hx ^ hy
+ case n >= 4:
+ // 4 < length <= 8
+ // we can read the bytes as two overlapping 32-bit ints, apply different
+ // hash functions to each in parallel
+ // then xor the results
+ x := *(*uint32)(unsafe.Pointer(&b[n-4]))
+ y := *(*uint32)(unsafe.Pointer(&b[0]))
+ hx := hashInt(uint64(x), alg)
+ hy := hashInt(uint64(y), alg^1)
+ return uint64(n) ^ hx ^ hy
+ case n > 0:
+ x := uint32((n << 24) ^ (uint32(b[0]) << 16) ^ (uint32(b[n/2]) << 8) ^ uint32(b[n-1]))
+ return hashInt(uint64(x), alg)
+ case n == 0:
+ return 1
+ }
+ }
+
+ // increase differentiation enough to improve hash quality
+ return xxh3.Hash(b) + exprimes[alg]
+}
diff --git a/vendor/github.com/apache/arrow/go/v14/internal/hashing/hash_string.go b/vendor/github.com/apache/arrow/go/v14/internal/hashing/hash_string.go
new file mode 100644
index 000000000..b772c7d7f
--- /dev/null
+++ b/vendor/github.com/apache/arrow/go/v14/internal/hashing/hash_string.go
@@ -0,0 +1,26 @@
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements. See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership. The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License. You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+//go:build go1.20 || tinygo
+
+package hashing
+
+import "unsafe"
+
+func hashString(val string, alg uint64) uint64 {
+ buf := unsafe.Slice(unsafe.StringData(val), len(val))
+ return Hash(buf, alg)
+}
diff --git a/vendor/github.com/apache/arrow/go/v14/internal/hashing/hash_string_go1.19.go b/vendor/github.com/apache/arrow/go/v14/internal/hashing/hash_string_go1.19.go
new file mode 100644
index 000000000..f38eb5c52
--- /dev/null
+++ b/vendor/github.com/apache/arrow/go/v14/internal/hashing/hash_string_go1.19.go
@@ -0,0 +1,37 @@
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements. See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership. The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License. You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+//go:build !go1.20 && !tinygo
+
+package hashing
+
+import (
+ "reflect"
+ "unsafe"
+)
+
+func hashString(val string, alg uint64) uint64 {
+ if val == "" {
+ return Hash([]byte{}, alg)
+ }
+ // highly efficient way to get byte slice without copy before
+ // the introduction of unsafe.StringData in go1.20
+ // (https://stackoverflow.com/questions/59209493/how-to-use-unsafe-get-a-byte-slice-from-a-string-without-memory-copy)
+ const MaxInt32 = 1<<31 - 1
+ buf := (*[MaxInt32]byte)(unsafe.Pointer((*reflect.StringHeader)(
+ unsafe.Pointer(&val)).Data))[: len(val)&MaxInt32 : len(val)&MaxInt32]
+ return Hash(buf, alg)
+}
diff --git a/vendor/github.com/apache/arrow/go/v14/internal/hashing/types.tmpldata b/vendor/github.com/apache/arrow/go/v14/internal/hashing/types.tmpldata
new file mode 100644
index 000000000..0ba6f765d
--- /dev/null
+++ b/vendor/github.com/apache/arrow/go/v14/internal/hashing/types.tmpldata
@@ -0,0 +1,42 @@
+[
+ {
+ "Name": "Int8",
+ "name": "int8"
+ },
+ {
+ "Name": "Uint8",
+ "name": "uint8"
+ },
+ {
+ "Name": "Int16",
+ "name": "int16"
+ },
+ {
+ "Name": "Uint16",
+ "name": "uint16"
+ },
+ {
+ "Name": "Int32",
+ "name": "int32"
+ },
+ {
+ "Name": "Int64",
+ "name": "int64"
+ },
+ {
+ "Name": "Uint32",
+ "name": "uint32"
+ },
+ {
+ "Name": "Uint64",
+ "name": "uint64"
+ },
+ {
+ "Name": "Float32",
+ "name": "float32"
+ },
+ {
+ "Name": "Float64",
+ "name": "float64"
+ }
+]
diff --git a/vendor/github.com/apache/arrow/go/v14/internal/hashing/xxh3_memo_table.gen.go b/vendor/github.com/apache/arrow/go/v14/internal/hashing/xxh3_memo_table.gen.go
new file mode 100644
index 000000000..cc996552b
--- /dev/null
+++ b/vendor/github.com/apache/arrow/go/v14/internal/hashing/xxh3_memo_table.gen.go
@@ -0,0 +1,2833 @@
+// Code generated by xxh3_memo_table.gen.go.tmpl. DO NOT EDIT.
+
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements. See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership. The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License. You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package hashing
+
+import (
+ "math"
+
+ "github.com/apache/arrow/go/v14/arrow"
+ "github.com/apache/arrow/go/v14/arrow/bitutil"
+ "github.com/apache/arrow/go/v14/internal/utils"
+)
+
+type payloadInt8 struct {
+ val int8
+ memoIdx int32
+}
+
+type entryInt8 struct {
+ h uint64
+ payload payloadInt8
+}
+
+func (e entryInt8) Valid() bool { return e.h != sentinel }
+
+// Int8HashTable is a hashtable specifically for int8 that
+// is utilized with the MemoTable to generalize interactions for easier
+// implementation of dictionaries without losing performance.
+type Int8HashTable struct {
+ cap uint64
+ capMask uint64
+ size uint64
+
+ entries []entryInt8
+}
+
+// NewInt8HashTable returns a new hash table for int8 values
+// initialized with the passed in capacity or 32 whichever is larger.
+func NewInt8HashTable(cap uint64) *Int8HashTable {
+ initCap := uint64(bitutil.NextPowerOf2(int(max(cap, 32))))
+ ret := &Int8HashTable{cap: initCap, capMask: initCap - 1, size: 0}
+ ret.entries = make([]entryInt8, initCap)
+ return ret
+}
+
+// Reset drops all of the values in this hash table and re-initializes it
+// with the specified initial capacity as if by calling New, but without having
+// to reallocate the object.
+func (h *Int8HashTable) Reset(cap uint64) {
+ h.cap = uint64(bitutil.NextPowerOf2(int(max(cap, 32))))
+ h.capMask = h.cap - 1
+ h.size = 0
+ h.entries = make([]entryInt8, h.cap)
+}
+
+// CopyValues is used for copying the values out of the hash table into the
+// passed in slice, in the order that they were first inserted
+func (h *Int8HashTable) CopyValues(out []int8) {
+ h.CopyValuesSubset(0, out)
+}
+
+// CopyValuesSubset copies a subset of the values in the hashtable out, starting
+// with the value at start, in the order that they were inserted.
+func (h *Int8HashTable) CopyValuesSubset(start int, out []int8) {
+ h.VisitEntries(func(e *entryInt8) {
+ idx := e.payload.memoIdx - int32(start)
+ if idx >= 0 {
+ out[idx] = e.payload.val
+ }
+ })
+}
+
+func (h *Int8HashTable) WriteOut(out []byte) {
+ h.WriteOutSubset(0, out)
+}
+
+func (h *Int8HashTable) WriteOutSubset(start int, out []byte) {
+ data := arrow.Int8Traits.CastFromBytes(out)
+ h.VisitEntries(func(e *entryInt8) {
+ idx := e.payload.memoIdx - int32(start)
+ if idx >= 0 {
+ data[idx] = e.payload.val
+ }
+ })
+}
+
+func (h *Int8HashTable) needUpsize() bool { return h.size*uint64(loadFactor) >= h.cap }
+
+func (Int8HashTable) fixHash(v uint64) uint64 {
+ if v == sentinel {
+ return 42
+ }
+ return v
+}
+
+// Lookup retrieves the entry for a given hash value assuming it's payload value returns
+// true when passed to the cmp func. Returns a pointer to the entry for the given hash value,
+// and a boolean as to whether it was found. It is not safe to use the pointer if the bool is false.
+func (h *Int8HashTable) Lookup(v uint64, cmp func(int8) bool) (*entryInt8, bool) {
+ idx, ok := h.lookup(v, h.capMask, cmp)
+ return &h.entries[idx], ok
+}
+
+func (h *Int8HashTable) lookup(v uint64, szMask uint64, cmp func(int8) bool) (uint64, bool) {
+ const perturbShift uint8 = 5
+
+ var (
+ idx uint64
+ perturb uint64
+ e *entryInt8
+ )
+
+ v = h.fixHash(v)
+ idx = v & szMask
+ perturb = (v >> uint64(perturbShift)) + 1
+
+ for {
+ e = &h.entries[idx]
+ if e.h == v && cmp(e.payload.val) {
+ return idx, true
+ }
+
+ if e.h == sentinel {
+ return idx, false
+ }
+
+ // perturbation logic inspired from CPython's set/dict object
+ // the goal is that all 64 bits of unmasked hash value eventually
+ // participate int he probing sequence, to minimize clustering
+ idx = (idx + perturb) & szMask
+ perturb = (perturb >> uint64(perturbShift)) + 1
+ }
+}
+
+func (h *Int8HashTable) upsize(newcap uint64) error {
+ newMask := newcap - 1
+
+ oldEntries := h.entries
+ h.entries = make([]entryInt8, newcap)
+ for _, e := range oldEntries {
+ if e.Valid() {
+ idx, _ := h.lookup(e.h, newMask, func(int8) bool { return false })
+ h.entries[idx] = e
+ }
+ }
+ h.cap = newcap
+ h.capMask = newMask
+ return nil
+}
+
+// Insert updates the given entry with the provided hash value, payload value and memo index.
+// The entry pointer must have been retrieved via lookup in order to actually insert properly.
+func (h *Int8HashTable) Insert(e *entryInt8, v uint64, val int8, memoIdx int32) error {
+ e.h = h.fixHash(v)
+ e.payload.val = val
+ e.payload.memoIdx = memoIdx
+ h.size++
+
+ if h.needUpsize() {
+ h.upsize(h.cap * uint64(loadFactor) * 2)
+ }
+ return nil
+}
+
+// VisitEntries will call the passed in function on each *valid* entry in the hash table,
+// a valid entry being one which has had a value inserted into it.
+func (h *Int8HashTable) VisitEntries(visit func(*entryInt8)) {
+ for _, e := range h.entries {
+ if e.Valid() {
+ visit(&e)
+ }
+ }
+}
+
+// Int8MemoTable is a wrapper over the appropriate hashtable to provide an interface
+// conforming to the MemoTable interface defined in the encoding package for general interactions
+// regarding dictionaries.
+type Int8MemoTable struct {
+ tbl *Int8HashTable
+ nullIdx int32
+}
+
+// NewInt8MemoTable returns a new memotable with num entries pre-allocated to reduce further
+// allocations when inserting.
+func NewInt8MemoTable(num int64) *Int8MemoTable {
+ return &Int8MemoTable{tbl: NewInt8HashTable(uint64(num)), nullIdx: KeyNotFound}
+}
+
+func (Int8MemoTable) TypeTraits() TypeTraits {
+ return arrow.Int8Traits
+}
+
+// Reset allows this table to be re-used by dumping all the data currently in the table.
+func (s *Int8MemoTable) Reset() {
+ s.tbl.Reset(32)
+ s.nullIdx = KeyNotFound
+}
+
+// Size returns the current number of inserted elements into the table including if a null
+// has been inserted.
+func (s *Int8MemoTable) Size() int {
+ sz := int(s.tbl.size)
+ if _, ok := s.GetNull(); ok {
+ sz++
+ }
+ return sz
+}
+
+// GetNull returns the index of an inserted null or KeyNotFound along with a bool
+// that will be true if found and false if not.
+func (s *Int8MemoTable) GetNull() (int, bool) {
+ return int(s.nullIdx), s.nullIdx != KeyNotFound
+}
+
+// GetOrInsertNull will return the index of the null entry or insert a null entry
+// if one currently doesn't exist. The found value will be true if there was already
+// a null in the table, and false if it inserted one.
+func (s *Int8MemoTable) GetOrInsertNull() (idx int, found bool) {
+ idx, found = s.GetNull()
+ if !found {
+ idx = s.Size()
+ s.nullIdx = int32(idx)
+ }
+ return
+}
+
+// CopyValues will copy the values from the memo table out into the passed in slice
+// which must be of the appropriate type.
+func (s *Int8MemoTable) CopyValues(out interface{}) {
+ s.CopyValuesSubset(0, out)
+}
+
+// CopyValuesSubset is like CopyValues but only copies a subset of values starting
+// at the provided start index
+func (s *Int8MemoTable) CopyValuesSubset(start int, out interface{}) {
+ s.tbl.CopyValuesSubset(start, out.([]int8))
+}
+
+func (s *Int8MemoTable) WriteOut(out []byte) {
+ s.tbl.CopyValues(arrow.Int8Traits.CastFromBytes(out))
+}
+
+func (s *Int8MemoTable) WriteOutSubset(start int, out []byte) {
+ s.tbl.CopyValuesSubset(start, arrow.Int8Traits.CastFromBytes(out))
+}
+
+func (s *Int8MemoTable) WriteOutLE(out []byte) {
+ s.tbl.WriteOut(out)
+}
+
+func (s *Int8MemoTable) WriteOutSubsetLE(start int, out []byte) {
+ s.tbl.WriteOutSubset(start, out)
+}
+
+// Get returns the index of the requested value in the hash table or KeyNotFound
+// along with a boolean indicating if it was found or not.
+func (s *Int8MemoTable) Get(val interface{}) (int, bool) {
+
+ h := hashInt(uint64(val.(int8)), 0)
+ if e, ok := s.tbl.Lookup(h, func(v int8) bool { return val.(int8) == v }); ok {
+ return int(e.payload.memoIdx), ok
+ }
+ return KeyNotFound, false
+}
+
+// GetOrInsert will return the index of the specified value in the table, or insert the
+// value into the table and return the new index. found indicates whether or not it already
+// existed in the table (true) or was inserted by this call (false).
+func (s *Int8MemoTable) GetOrInsert(val interface{}) (idx int, found bool, err error) {
+
+ h := hashInt(uint64(val.(int8)), 0)
+ e, ok := s.tbl.Lookup(h, func(v int8) bool {
+ return val.(int8) == v
+ })
+
+ if ok {
+ idx = int(e.payload.memoIdx)
+ found = true
+ } else {
+ idx = s.Size()
+ s.tbl.Insert(e, h, val.(int8), int32(idx))
+ }
+ return
+}
+
+// GetOrInsertBytes is unimplemented
+func (s *Int8MemoTable) GetOrInsertBytes(val []byte) (idx int, found bool, err error) {
+ panic("unimplemented")
+}
+
+type payloadUint8 struct {
+ val uint8
+ memoIdx int32
+}
+
+type entryUint8 struct {
+ h uint64
+ payload payloadUint8
+}
+
+func (e entryUint8) Valid() bool { return e.h != sentinel }
+
+// Uint8HashTable is a hashtable specifically for uint8 that
+// is utilized with the MemoTable to generalize interactions for easier
+// implementation of dictionaries without losing performance.
+type Uint8HashTable struct {
+ cap uint64
+ capMask uint64
+ size uint64
+
+ entries []entryUint8
+}
+
+// NewUint8HashTable returns a new hash table for uint8 values
+// initialized with the passed in capacity or 32 whichever is larger.
+func NewUint8HashTable(cap uint64) *Uint8HashTable {
+ initCap := uint64(bitutil.NextPowerOf2(int(max(cap, 32))))
+ ret := &Uint8HashTable{cap: initCap, capMask: initCap - 1, size: 0}
+ ret.entries = make([]entryUint8, initCap)
+ return ret
+}
+
+// Reset drops all of the values in this hash table and re-initializes it
+// with the specified initial capacity as if by calling New, but without having
+// to reallocate the object.
+func (h *Uint8HashTable) Reset(cap uint64) {
+ h.cap = uint64(bitutil.NextPowerOf2(int(max(cap, 32))))
+ h.capMask = h.cap - 1
+ h.size = 0
+ h.entries = make([]entryUint8, h.cap)
+}
+
+// CopyValues is used for copying the values out of the hash table into the
+// passed in slice, in the order that they were first inserted
+func (h *Uint8HashTable) CopyValues(out []uint8) {
+ h.CopyValuesSubset(0, out)
+}
+
+// CopyValuesSubset copies a subset of the values in the hashtable out, starting
+// with the value at start, in the order that they were inserted.
+func (h *Uint8HashTable) CopyValuesSubset(start int, out []uint8) {
+ h.VisitEntries(func(e *entryUint8) {
+ idx := e.payload.memoIdx - int32(start)
+ if idx >= 0 {
+ out[idx] = e.payload.val
+ }
+ })
+}
+
+func (h *Uint8HashTable) WriteOut(out []byte) {
+ h.WriteOutSubset(0, out)
+}
+
+func (h *Uint8HashTable) WriteOutSubset(start int, out []byte) {
+ data := arrow.Uint8Traits.CastFromBytes(out)
+ h.VisitEntries(func(e *entryUint8) {
+ idx := e.payload.memoIdx - int32(start)
+ if idx >= 0 {
+ data[idx] = e.payload.val
+ }
+ })
+}
+
+func (h *Uint8HashTable) needUpsize() bool { return h.size*uint64(loadFactor) >= h.cap }
+
+func (Uint8HashTable) fixHash(v uint64) uint64 {
+ if v == sentinel {
+ return 42
+ }
+ return v
+}
+
+// Lookup retrieves the entry for a given hash value assuming it's payload value returns
+// true when passed to the cmp func. Returns a pointer to the entry for the given hash value,
+// and a boolean as to whether it was found. It is not safe to use the pointer if the bool is false.
+func (h *Uint8HashTable) Lookup(v uint64, cmp func(uint8) bool) (*entryUint8, bool) {
+ idx, ok := h.lookup(v, h.capMask, cmp)
+ return &h.entries[idx], ok
+}
+
+func (h *Uint8HashTable) lookup(v uint64, szMask uint64, cmp func(uint8) bool) (uint64, bool) {
+ const perturbShift uint8 = 5
+
+ var (
+ idx uint64
+ perturb uint64
+ e *entryUint8
+ )
+
+ v = h.fixHash(v)
+ idx = v & szMask
+ perturb = (v >> uint64(perturbShift)) + 1
+
+ for {
+ e = &h.entries[idx]
+ if e.h == v && cmp(e.payload.val) {
+ return idx, true
+ }
+
+ if e.h == sentinel {
+ return idx, false
+ }
+
+ // perturbation logic inspired from CPython's set/dict object
+ // the goal is that all 64 bits of unmasked hash value eventually
+ // participate int he probing sequence, to minimize clustering
+ idx = (idx + perturb) & szMask
+ perturb = (perturb >> uint64(perturbShift)) + 1
+ }
+}
+
+func (h *Uint8HashTable) upsize(newcap uint64) error {
+ newMask := newcap - 1
+
+ oldEntries := h.entries
+ h.entries = make([]entryUint8, newcap)
+ for _, e := range oldEntries {
+ if e.Valid() {
+ idx, _ := h.lookup(e.h, newMask, func(uint8) bool { return false })
+ h.entries[idx] = e
+ }
+ }
+ h.cap = newcap
+ h.capMask = newMask
+ return nil
+}
+
+// Insert updates the given entry with the provided hash value, payload value and memo index.
+// The entry pointer must have been retrieved via lookup in order to actually insert properly.
+func (h *Uint8HashTable) Insert(e *entryUint8, v uint64, val uint8, memoIdx int32) error {
+ e.h = h.fixHash(v)
+ e.payload.val = val
+ e.payload.memoIdx = memoIdx
+ h.size++
+
+ if h.needUpsize() {
+ h.upsize(h.cap * uint64(loadFactor) * 2)
+ }
+ return nil
+}
+
+// VisitEntries will call the passed in function on each *valid* entry in the hash table,
+// a valid entry being one which has had a value inserted into it.
+func (h *Uint8HashTable) VisitEntries(visit func(*entryUint8)) {
+ for _, e := range h.entries {
+ if e.Valid() {
+ visit(&e)
+ }
+ }
+}
+
+// Uint8MemoTable is a wrapper over the appropriate hashtable to provide an interface
+// conforming to the MemoTable interface defined in the encoding package for general interactions
+// regarding dictionaries.
+type Uint8MemoTable struct {
+ tbl *Uint8HashTable
+ nullIdx int32
+}
+
+// NewUint8MemoTable returns a new memotable with num entries pre-allocated to reduce further
+// allocations when inserting.
+func NewUint8MemoTable(num int64) *Uint8MemoTable {
+ return &Uint8MemoTable{tbl: NewUint8HashTable(uint64(num)), nullIdx: KeyNotFound}
+}
+
+func (Uint8MemoTable) TypeTraits() TypeTraits {
+ return arrow.Uint8Traits
+}
+
+// Reset allows this table to be re-used by dumping all the data currently in the table.
+func (s *Uint8MemoTable) Reset() {
+ s.tbl.Reset(32)
+ s.nullIdx = KeyNotFound
+}
+
+// Size returns the current number of inserted elements into the table including if a null
+// has been inserted.
+func (s *Uint8MemoTable) Size() int {
+ sz := int(s.tbl.size)
+ if _, ok := s.GetNull(); ok {
+ sz++
+ }
+ return sz
+}
+
+// GetNull returns the index of an inserted null or KeyNotFound along with a bool
+// that will be true if found and false if not.
+func (s *Uint8MemoTable) GetNull() (int, bool) {
+ return int(s.nullIdx), s.nullIdx != KeyNotFound
+}
+
+// GetOrInsertNull will return the index of the null entry or insert a null entry
+// if one currently doesn't exist. The found value will be true if there was already
+// a null in the table, and false if it inserted one.
+func (s *Uint8MemoTable) GetOrInsertNull() (idx int, found bool) {
+ idx, found = s.GetNull()
+ if !found {
+ idx = s.Size()
+ s.nullIdx = int32(idx)
+ }
+ return
+}
+
+// CopyValues will copy the values from the memo table out into the passed in slice
+// which must be of the appropriate type.
+func (s *Uint8MemoTable) CopyValues(out interface{}) {
+ s.CopyValuesSubset(0, out)
+}
+
+// CopyValuesSubset is like CopyValues but only copies a subset of values starting
+// at the provided start index
+func (s *Uint8MemoTable) CopyValuesSubset(start int, out interface{}) {
+ s.tbl.CopyValuesSubset(start, out.([]uint8))
+}
+
+func (s *Uint8MemoTable) WriteOut(out []byte) {
+ s.tbl.CopyValues(arrow.Uint8Traits.CastFromBytes(out))
+}
+
+func (s *Uint8MemoTable) WriteOutSubset(start int, out []byte) {
+ s.tbl.CopyValuesSubset(start, arrow.Uint8Traits.CastFromBytes(out))
+}
+
+func (s *Uint8MemoTable) WriteOutLE(out []byte) {
+ s.tbl.WriteOut(out)
+}
+
+func (s *Uint8MemoTable) WriteOutSubsetLE(start int, out []byte) {
+ s.tbl.WriteOutSubset(start, out)
+}
+
+// Get returns the index of the requested value in the hash table or KeyNotFound
+// along with a boolean indicating if it was found or not.
+func (s *Uint8MemoTable) Get(val interface{}) (int, bool) {
+
+ h := hashInt(uint64(val.(uint8)), 0)
+ if e, ok := s.tbl.Lookup(h, func(v uint8) bool { return val.(uint8) == v }); ok {
+ return int(e.payload.memoIdx), ok
+ }
+ return KeyNotFound, false
+}
+
+// GetOrInsert will return the index of the specified value in the table, or insert the
+// value into the table and return the new index. found indicates whether or not it already
+// existed in the table (true) or was inserted by this call (false).
+func (s *Uint8MemoTable) GetOrInsert(val interface{}) (idx int, found bool, err error) {
+
+ h := hashInt(uint64(val.(uint8)), 0)
+ e, ok := s.tbl.Lookup(h, func(v uint8) bool {
+ return val.(uint8) == v
+ })
+
+ if ok {
+ idx = int(e.payload.memoIdx)
+ found = true
+ } else {
+ idx = s.Size()
+ s.tbl.Insert(e, h, val.(uint8), int32(idx))
+ }
+ return
+}
+
+// GetOrInsertBytes is unimplemented
+func (s *Uint8MemoTable) GetOrInsertBytes(val []byte) (idx int, found bool, err error) {
+ panic("unimplemented")
+}
+
+type payloadInt16 struct {
+ val int16
+ memoIdx int32
+}
+
+type entryInt16 struct {
+ h uint64
+ payload payloadInt16
+}
+
+func (e entryInt16) Valid() bool { return e.h != sentinel }
+
+// Int16HashTable is a hashtable specifically for int16 that
+// is utilized with the MemoTable to generalize interactions for easier
+// implementation of dictionaries without losing performance.
+type Int16HashTable struct {
+ cap uint64
+ capMask uint64
+ size uint64
+
+ entries []entryInt16
+}
+
+// NewInt16HashTable returns a new hash table for int16 values
+// initialized with the passed in capacity or 32 whichever is larger.
+func NewInt16HashTable(cap uint64) *Int16HashTable {
+ initCap := uint64(bitutil.NextPowerOf2(int(max(cap, 32))))
+ ret := &Int16HashTable{cap: initCap, capMask: initCap - 1, size: 0}
+ ret.entries = make([]entryInt16, initCap)
+ return ret
+}
+
+// Reset drops all of the values in this hash table and re-initializes it
+// with the specified initial capacity as if by calling New, but without having
+// to reallocate the object.
+func (h *Int16HashTable) Reset(cap uint64) {
+ h.cap = uint64(bitutil.NextPowerOf2(int(max(cap, 32))))
+ h.capMask = h.cap - 1
+ h.size = 0
+ h.entries = make([]entryInt16, h.cap)
+}
+
+// CopyValues is used for copying the values out of the hash table into the
+// passed in slice, in the order that they were first inserted
+func (h *Int16HashTable) CopyValues(out []int16) {
+ h.CopyValuesSubset(0, out)
+}
+
+// CopyValuesSubset copies a subset of the values in the hashtable out, starting
+// with the value at start, in the order that they were inserted.
+func (h *Int16HashTable) CopyValuesSubset(start int, out []int16) {
+ h.VisitEntries(func(e *entryInt16) {
+ idx := e.payload.memoIdx - int32(start)
+ if idx >= 0 {
+ out[idx] = e.payload.val
+ }
+ })
+}
+
+func (h *Int16HashTable) WriteOut(out []byte) {
+ h.WriteOutSubset(0, out)
+}
+
+func (h *Int16HashTable) WriteOutSubset(start int, out []byte) {
+ data := arrow.Int16Traits.CastFromBytes(out)
+ h.VisitEntries(func(e *entryInt16) {
+ idx := e.payload.memoIdx - int32(start)
+ if idx >= 0 {
+ data[idx] = utils.ToLEInt16(e.payload.val)
+ }
+ })
+}
+
+func (h *Int16HashTable) needUpsize() bool { return h.size*uint64(loadFactor) >= h.cap }
+
+func (Int16HashTable) fixHash(v uint64) uint64 {
+ if v == sentinel {
+ return 42
+ }
+ return v
+}
+
+// Lookup retrieves the entry for a given hash value assuming it's payload value returns
+// true when passed to the cmp func. Returns a pointer to the entry for the given hash value,
+// and a boolean as to whether it was found. It is not safe to use the pointer if the bool is false.
+func (h *Int16HashTable) Lookup(v uint64, cmp func(int16) bool) (*entryInt16, bool) {
+ idx, ok := h.lookup(v, h.capMask, cmp)
+ return &h.entries[idx], ok
+}
+
+func (h *Int16HashTable) lookup(v uint64, szMask uint64, cmp func(int16) bool) (uint64, bool) {
+ const perturbShift uint8 = 5
+
+ var (
+ idx uint64
+ perturb uint64
+ e *entryInt16
+ )
+
+ v = h.fixHash(v)
+ idx = v & szMask
+ perturb = (v >> uint64(perturbShift)) + 1
+
+ for {
+ e = &h.entries[idx]
+ if e.h == v && cmp(e.payload.val) {
+ return idx, true
+ }
+
+ if e.h == sentinel {
+ return idx, false
+ }
+
+ // perturbation logic inspired from CPython's set/dict object
+ // the goal is that all 64 bits of unmasked hash value eventually
+ // participate int he probing sequence, to minimize clustering
+ idx = (idx + perturb) & szMask
+ perturb = (perturb >> uint64(perturbShift)) + 1
+ }
+}
+
+func (h *Int16HashTable) upsize(newcap uint64) error {
+ newMask := newcap - 1
+
+ oldEntries := h.entries
+ h.entries = make([]entryInt16, newcap)
+ for _, e := range oldEntries {
+ if e.Valid() {
+ idx, _ := h.lookup(e.h, newMask, func(int16) bool { return false })
+ h.entries[idx] = e
+ }
+ }
+ h.cap = newcap
+ h.capMask = newMask
+ return nil
+}
+
+// Insert updates the given entry with the provided hash value, payload value and memo index.
+// The entry pointer must have been retrieved via lookup in order to actually insert properly.
+func (h *Int16HashTable) Insert(e *entryInt16, v uint64, val int16, memoIdx int32) error {
+ e.h = h.fixHash(v)
+ e.payload.val = val
+ e.payload.memoIdx = memoIdx
+ h.size++
+
+ if h.needUpsize() {
+ h.upsize(h.cap * uint64(loadFactor) * 2)
+ }
+ return nil
+}
+
+// VisitEntries will call the passed in function on each *valid* entry in the hash table,
+// a valid entry being one which has had a value inserted into it.
+func (h *Int16HashTable) VisitEntries(visit func(*entryInt16)) {
+ for _, e := range h.entries {
+ if e.Valid() {
+ visit(&e)
+ }
+ }
+}
+
+// Int16MemoTable is a wrapper over the appropriate hashtable to provide an interface
+// conforming to the MemoTable interface defined in the encoding package for general interactions
+// regarding dictionaries.
+type Int16MemoTable struct {
+ tbl *Int16HashTable
+ nullIdx int32
+}
+
+// NewInt16MemoTable returns a new memotable with num entries pre-allocated to reduce further
+// allocations when inserting.
+func NewInt16MemoTable(num int64) *Int16MemoTable {
+ return &Int16MemoTable{tbl: NewInt16HashTable(uint64(num)), nullIdx: KeyNotFound}
+}
+
+func (Int16MemoTable) TypeTraits() TypeTraits {
+ return arrow.Int16Traits
+}
+
+// Reset allows this table to be re-used by dumping all the data currently in the table.
+func (s *Int16MemoTable) Reset() {
+ s.tbl.Reset(32)
+ s.nullIdx = KeyNotFound
+}
+
+// Size returns the current number of inserted elements into the table including if a null
+// has been inserted.
+func (s *Int16MemoTable) Size() int {
+ sz := int(s.tbl.size)
+ if _, ok := s.GetNull(); ok {
+ sz++
+ }
+ return sz
+}
+
+// GetNull returns the index of an inserted null or KeyNotFound along with a bool
+// that will be true if found and false if not.
+func (s *Int16MemoTable) GetNull() (int, bool) {
+ return int(s.nullIdx), s.nullIdx != KeyNotFound
+}
+
+// GetOrInsertNull will return the index of the null entry or insert a null entry
+// if one currently doesn't exist. The found value will be true if there was already
+// a null in the table, and false if it inserted one.
+func (s *Int16MemoTable) GetOrInsertNull() (idx int, found bool) {
+ idx, found = s.GetNull()
+ if !found {
+ idx = s.Size()
+ s.nullIdx = int32(idx)
+ }
+ return
+}
+
+// CopyValues will copy the values from the memo table out into the passed in slice
+// which must be of the appropriate type.
+func (s *Int16MemoTable) CopyValues(out interface{}) {
+ s.CopyValuesSubset(0, out)
+}
+
+// CopyValuesSubset is like CopyValues but only copies a subset of values starting
+// at the provided start index
+func (s *Int16MemoTable) CopyValuesSubset(start int, out interface{}) {
+ s.tbl.CopyValuesSubset(start, out.([]int16))
+}
+
+func (s *Int16MemoTable) WriteOut(out []byte) {
+ s.tbl.CopyValues(arrow.Int16Traits.CastFromBytes(out))
+}
+
+func (s *Int16MemoTable) WriteOutSubset(start int, out []byte) {
+ s.tbl.CopyValuesSubset(start, arrow.Int16Traits.CastFromBytes(out))
+}
+
+func (s *Int16MemoTable) WriteOutLE(out []byte) {
+ s.tbl.WriteOut(out)
+}
+
+func (s *Int16MemoTable) WriteOutSubsetLE(start int, out []byte) {
+ s.tbl.WriteOutSubset(start, out)
+}
+
+// Get returns the index of the requested value in the hash table or KeyNotFound
+// along with a boolean indicating if it was found or not.
+func (s *Int16MemoTable) Get(val interface{}) (int, bool) {
+
+ h := hashInt(uint64(val.(int16)), 0)
+ if e, ok := s.tbl.Lookup(h, func(v int16) bool { return val.(int16) == v }); ok {
+ return int(e.payload.memoIdx), ok
+ }
+ return KeyNotFound, false
+}
+
+// GetOrInsert will return the index of the specified value in the table, or insert the
+// value into the table and return the new index. found indicates whether or not it already
+// existed in the table (true) or was inserted by this call (false).
+func (s *Int16MemoTable) GetOrInsert(val interface{}) (idx int, found bool, err error) {
+
+ h := hashInt(uint64(val.(int16)), 0)
+ e, ok := s.tbl.Lookup(h, func(v int16) bool {
+ return val.(int16) == v
+ })
+
+ if ok {
+ idx = int(e.payload.memoIdx)
+ found = true
+ } else {
+ idx = s.Size()
+ s.tbl.Insert(e, h, val.(int16), int32(idx))
+ }
+ return
+}
+
+// GetOrInsertBytes is unimplemented
+func (s *Int16MemoTable) GetOrInsertBytes(val []byte) (idx int, found bool, err error) {
+ panic("unimplemented")
+}
+
+type payloadUint16 struct {
+ val uint16
+ memoIdx int32
+}
+
+type entryUint16 struct {
+ h uint64
+ payload payloadUint16
+}
+
+func (e entryUint16) Valid() bool { return e.h != sentinel }
+
+// Uint16HashTable is a hashtable specifically for uint16 that
+// is utilized with the MemoTable to generalize interactions for easier
+// implementation of dictionaries without losing performance.
+type Uint16HashTable struct {
+ cap uint64
+ capMask uint64
+ size uint64
+
+ entries []entryUint16
+}
+
+// NewUint16HashTable returns a new hash table for uint16 values
+// initialized with the passed in capacity or 32 whichever is larger.
+func NewUint16HashTable(cap uint64) *Uint16HashTable {
+ initCap := uint64(bitutil.NextPowerOf2(int(max(cap, 32))))
+ ret := &Uint16HashTable{cap: initCap, capMask: initCap - 1, size: 0}
+ ret.entries = make([]entryUint16, initCap)
+ return ret
+}
+
+// Reset drops all of the values in this hash table and re-initializes it
+// with the specified initial capacity as if by calling New, but without having
+// to reallocate the object.
+func (h *Uint16HashTable) Reset(cap uint64) {
+ h.cap = uint64(bitutil.NextPowerOf2(int(max(cap, 32))))
+ h.capMask = h.cap - 1
+ h.size = 0
+ h.entries = make([]entryUint16, h.cap)
+}
+
+// CopyValues is used for copying the values out of the hash table into the
+// passed in slice, in the order that they were first inserted
+func (h *Uint16HashTable) CopyValues(out []uint16) {
+ h.CopyValuesSubset(0, out)
+}
+
+// CopyValuesSubset copies a subset of the values in the hashtable out, starting
+// with the value at start, in the order that they were inserted.
+func (h *Uint16HashTable) CopyValuesSubset(start int, out []uint16) {
+ h.VisitEntries(func(e *entryUint16) {
+ idx := e.payload.memoIdx - int32(start)
+ if idx >= 0 {
+ out[idx] = e.payload.val
+ }
+ })
+}
+
+func (h *Uint16HashTable) WriteOut(out []byte) {
+ h.WriteOutSubset(0, out)
+}
+
+func (h *Uint16HashTable) WriteOutSubset(start int, out []byte) {
+ data := arrow.Uint16Traits.CastFromBytes(out)
+ h.VisitEntries(func(e *entryUint16) {
+ idx := e.payload.memoIdx - int32(start)
+ if idx >= 0 {
+ data[idx] = utils.ToLEUint16(e.payload.val)
+ }
+ })
+}
+
+func (h *Uint16HashTable) needUpsize() bool { return h.size*uint64(loadFactor) >= h.cap }
+
+func (Uint16HashTable) fixHash(v uint64) uint64 {
+ if v == sentinel {
+ return 42
+ }
+ return v
+}
+
+// Lookup retrieves the entry for a given hash value assuming it's payload value returns
+// true when passed to the cmp func. Returns a pointer to the entry for the given hash value,
+// and a boolean as to whether it was found. It is not safe to use the pointer if the bool is false.
+func (h *Uint16HashTable) Lookup(v uint64, cmp func(uint16) bool) (*entryUint16, bool) {
+ idx, ok := h.lookup(v, h.capMask, cmp)
+ return &h.entries[idx], ok
+}
+
+func (h *Uint16HashTable) lookup(v uint64, szMask uint64, cmp func(uint16) bool) (uint64, bool) {
+ const perturbShift uint8 = 5
+
+ var (
+ idx uint64
+ perturb uint64
+ e *entryUint16
+ )
+
+ v = h.fixHash(v)
+ idx = v & szMask
+ perturb = (v >> uint64(perturbShift)) + 1
+
+ for {
+ e = &h.entries[idx]
+ if e.h == v && cmp(e.payload.val) {
+ return idx, true
+ }
+
+ if e.h == sentinel {
+ return idx, false
+ }
+
+ // perturbation logic inspired from CPython's set/dict object
+ // the goal is that all 64 bits of unmasked hash value eventually
+ // participate int he probing sequence, to minimize clustering
+ idx = (idx + perturb) & szMask
+ perturb = (perturb >> uint64(perturbShift)) + 1
+ }
+}
+
+func (h *Uint16HashTable) upsize(newcap uint64) error {
+ newMask := newcap - 1
+
+ oldEntries := h.entries
+ h.entries = make([]entryUint16, newcap)
+ for _, e := range oldEntries {
+ if e.Valid() {
+ idx, _ := h.lookup(e.h, newMask, func(uint16) bool { return false })
+ h.entries[idx] = e
+ }
+ }
+ h.cap = newcap
+ h.capMask = newMask
+ return nil
+}
+
+// Insert updates the given entry with the provided hash value, payload value and memo index.
+// The entry pointer must have been retrieved via lookup in order to actually insert properly.
+func (h *Uint16HashTable) Insert(e *entryUint16, v uint64, val uint16, memoIdx int32) error {
+ e.h = h.fixHash(v)
+ e.payload.val = val
+ e.payload.memoIdx = memoIdx
+ h.size++
+
+ if h.needUpsize() {
+ h.upsize(h.cap * uint64(loadFactor) * 2)
+ }
+ return nil
+}
+
+// VisitEntries will call the passed in function on each *valid* entry in the hash table,
+// a valid entry being one which has had a value inserted into it.
+func (h *Uint16HashTable) VisitEntries(visit func(*entryUint16)) {
+ for _, e := range h.entries {
+ if e.Valid() {
+ visit(&e)
+ }
+ }
+}
+
+// Uint16MemoTable is a wrapper over the appropriate hashtable to provide an interface
+// conforming to the MemoTable interface defined in the encoding package for general interactions
+// regarding dictionaries.
+type Uint16MemoTable struct {
+ tbl *Uint16HashTable
+ nullIdx int32
+}
+
+// NewUint16MemoTable returns a new memotable with num entries pre-allocated to reduce further
+// allocations when inserting.
+func NewUint16MemoTable(num int64) *Uint16MemoTable {
+ return &Uint16MemoTable{tbl: NewUint16HashTable(uint64(num)), nullIdx: KeyNotFound}
+}
+
+func (Uint16MemoTable) TypeTraits() TypeTraits {
+ return arrow.Uint16Traits
+}
+
+// Reset allows this table to be re-used by dumping all the data currently in the table.
+func (s *Uint16MemoTable) Reset() {
+ s.tbl.Reset(32)
+ s.nullIdx = KeyNotFound
+}
+
+// Size returns the current number of inserted elements into the table including if a null
+// has been inserted.
+func (s *Uint16MemoTable) Size() int {
+ sz := int(s.tbl.size)
+ if _, ok := s.GetNull(); ok {
+ sz++
+ }
+ return sz
+}
+
+// GetNull returns the index of an inserted null or KeyNotFound along with a bool
+// that will be true if found and false if not.
+func (s *Uint16MemoTable) GetNull() (int, bool) {
+ return int(s.nullIdx), s.nullIdx != KeyNotFound
+}
+
+// GetOrInsertNull will return the index of the null entry or insert a null entry
+// if one currently doesn't exist. The found value will be true if there was already
+// a null in the table, and false if it inserted one.
+func (s *Uint16MemoTable) GetOrInsertNull() (idx int, found bool) {
+ idx, found = s.GetNull()
+ if !found {
+ idx = s.Size()
+ s.nullIdx = int32(idx)
+ }
+ return
+}
+
+// CopyValues will copy the values from the memo table out into the passed in slice
+// which must be of the appropriate type.
+func (s *Uint16MemoTable) CopyValues(out interface{}) {
+ s.CopyValuesSubset(0, out)
+}
+
+// CopyValuesSubset is like CopyValues but only copies a subset of values starting
+// at the provided start index
+func (s *Uint16MemoTable) CopyValuesSubset(start int, out interface{}) {
+ s.tbl.CopyValuesSubset(start, out.([]uint16))
+}
+
+func (s *Uint16MemoTable) WriteOut(out []byte) {
+ s.tbl.CopyValues(arrow.Uint16Traits.CastFromBytes(out))
+}
+
+func (s *Uint16MemoTable) WriteOutSubset(start int, out []byte) {
+ s.tbl.CopyValuesSubset(start, arrow.Uint16Traits.CastFromBytes(out))
+}
+
+func (s *Uint16MemoTable) WriteOutLE(out []byte) {
+ s.tbl.WriteOut(out)
+}
+
+func (s *Uint16MemoTable) WriteOutSubsetLE(start int, out []byte) {
+ s.tbl.WriteOutSubset(start, out)
+}
+
+// Get returns the index of the requested value in the hash table or KeyNotFound
+// along with a boolean indicating if it was found or not.
+func (s *Uint16MemoTable) Get(val interface{}) (int, bool) {
+
+ h := hashInt(uint64(val.(uint16)), 0)
+ if e, ok := s.tbl.Lookup(h, func(v uint16) bool { return val.(uint16) == v }); ok {
+ return int(e.payload.memoIdx), ok
+ }
+ return KeyNotFound, false
+}
+
+// GetOrInsert will return the index of the specified value in the table, or insert the
+// value into the table and return the new index. found indicates whether or not it already
+// existed in the table (true) or was inserted by this call (false).
+func (s *Uint16MemoTable) GetOrInsert(val interface{}) (idx int, found bool, err error) {
+
+ h := hashInt(uint64(val.(uint16)), 0)
+ e, ok := s.tbl.Lookup(h, func(v uint16) bool {
+ return val.(uint16) == v
+ })
+
+ if ok {
+ idx = int(e.payload.memoIdx)
+ found = true
+ } else {
+ idx = s.Size()
+ s.tbl.Insert(e, h, val.(uint16), int32(idx))
+ }
+ return
+}
+
+// GetOrInsertBytes is unimplemented
+func (s *Uint16MemoTable) GetOrInsertBytes(val []byte) (idx int, found bool, err error) {
+ panic("unimplemented")
+}
+
+type payloadInt32 struct {
+ val int32
+ memoIdx int32
+}
+
+type entryInt32 struct {
+ h uint64
+ payload payloadInt32
+}
+
+func (e entryInt32) Valid() bool { return e.h != sentinel }
+
+// Int32HashTable is a hashtable specifically for int32 that
+// is utilized with the MemoTable to generalize interactions for easier
+// implementation of dictionaries without losing performance.
+type Int32HashTable struct {
+ cap uint64
+ capMask uint64
+ size uint64
+
+ entries []entryInt32
+}
+
+// NewInt32HashTable returns a new hash table for int32 values
+// initialized with the passed in capacity or 32 whichever is larger.
+func NewInt32HashTable(cap uint64) *Int32HashTable {
+ initCap := uint64(bitutil.NextPowerOf2(int(max(cap, 32))))
+ ret := &Int32HashTable{cap: initCap, capMask: initCap - 1, size: 0}
+ ret.entries = make([]entryInt32, initCap)
+ return ret
+}
+
+// Reset drops all of the values in this hash table and re-initializes it
+// with the specified initial capacity as if by calling New, but without having
+// to reallocate the object.
+func (h *Int32HashTable) Reset(cap uint64) {
+ h.cap = uint64(bitutil.NextPowerOf2(int(max(cap, 32))))
+ h.capMask = h.cap - 1
+ h.size = 0
+ h.entries = make([]entryInt32, h.cap)
+}
+
+// CopyValues is used for copying the values out of the hash table into the
+// passed in slice, in the order that they were first inserted
+func (h *Int32HashTable) CopyValues(out []int32) {
+ h.CopyValuesSubset(0, out)
+}
+
+// CopyValuesSubset copies a subset of the values in the hashtable out, starting
+// with the value at start, in the order that they were inserted.
+func (h *Int32HashTable) CopyValuesSubset(start int, out []int32) {
+ h.VisitEntries(func(e *entryInt32) {
+ idx := e.payload.memoIdx - int32(start)
+ if idx >= 0 {
+ out[idx] = e.payload.val
+ }
+ })
+}
+
+func (h *Int32HashTable) WriteOut(out []byte) {
+ h.WriteOutSubset(0, out)
+}
+
+func (h *Int32HashTable) WriteOutSubset(start int, out []byte) {
+ data := arrow.Int32Traits.CastFromBytes(out)
+ h.VisitEntries(func(e *entryInt32) {
+ idx := e.payload.memoIdx - int32(start)
+ if idx >= 0 {
+ data[idx] = utils.ToLEInt32(e.payload.val)
+ }
+ })
+}
+
+func (h *Int32HashTable) needUpsize() bool { return h.size*uint64(loadFactor) >= h.cap }
+
+func (Int32HashTable) fixHash(v uint64) uint64 {
+ if v == sentinel {
+ return 42
+ }
+ return v
+}
+
+// Lookup retrieves the entry for a given hash value assuming it's payload value returns
+// true when passed to the cmp func. Returns a pointer to the entry for the given hash value,
+// and a boolean as to whether it was found. It is not safe to use the pointer if the bool is false.
+func (h *Int32HashTable) Lookup(v uint64, cmp func(int32) bool) (*entryInt32, bool) {
+ idx, ok := h.lookup(v, h.capMask, cmp)
+ return &h.entries[idx], ok
+}
+
+func (h *Int32HashTable) lookup(v uint64, szMask uint64, cmp func(int32) bool) (uint64, bool) {
+ const perturbShift uint8 = 5
+
+ var (
+ idx uint64
+ perturb uint64
+ e *entryInt32
+ )
+
+ v = h.fixHash(v)
+ idx = v & szMask
+ perturb = (v >> uint64(perturbShift)) + 1
+
+ for {
+ e = &h.entries[idx]
+ if e.h == v && cmp(e.payload.val) {
+ return idx, true
+ }
+
+ if e.h == sentinel {
+ return idx, false
+ }
+
+ // perturbation logic inspired from CPython's set/dict object
+ // the goal is that all 64 bits of unmasked hash value eventually
+ // participate int he probing sequence, to minimize clustering
+ idx = (idx + perturb) & szMask
+ perturb = (perturb >> uint64(perturbShift)) + 1
+ }
+}
+
+func (h *Int32HashTable) upsize(newcap uint64) error {
+ newMask := newcap - 1
+
+ oldEntries := h.entries
+ h.entries = make([]entryInt32, newcap)
+ for _, e := range oldEntries {
+ if e.Valid() {
+ idx, _ := h.lookup(e.h, newMask, func(int32) bool { return false })
+ h.entries[idx] = e
+ }
+ }
+ h.cap = newcap
+ h.capMask = newMask
+ return nil
+}
+
+// Insert updates the given entry with the provided hash value, payload value and memo index.
+// The entry pointer must have been retrieved via lookup in order to actually insert properly.
+func (h *Int32HashTable) Insert(e *entryInt32, v uint64, val int32, memoIdx int32) error {
+ e.h = h.fixHash(v)
+ e.payload.val = val
+ e.payload.memoIdx = memoIdx
+ h.size++
+
+ if h.needUpsize() {
+ h.upsize(h.cap * uint64(loadFactor) * 2)
+ }
+ return nil
+}
+
+// VisitEntries will call the passed in function on each *valid* entry in the hash table,
+// a valid entry being one which has had a value inserted into it.
+func (h *Int32HashTable) VisitEntries(visit func(*entryInt32)) {
+ for _, e := range h.entries {
+ if e.Valid() {
+ visit(&e)
+ }
+ }
+}
+
+// Int32MemoTable is a wrapper over the appropriate hashtable to provide an interface
+// conforming to the MemoTable interface defined in the encoding package for general interactions
+// regarding dictionaries.
+type Int32MemoTable struct {
+ tbl *Int32HashTable
+ nullIdx int32
+}
+
+// NewInt32MemoTable returns a new memotable with num entries pre-allocated to reduce further
+// allocations when inserting.
+func NewInt32MemoTable(num int64) *Int32MemoTable {
+ return &Int32MemoTable{tbl: NewInt32HashTable(uint64(num)), nullIdx: KeyNotFound}
+}
+
+func (Int32MemoTable) TypeTraits() TypeTraits {
+ return arrow.Int32Traits
+}
+
+// Reset allows this table to be re-used by dumping all the data currently in the table.
+func (s *Int32MemoTable) Reset() {
+ s.tbl.Reset(32)
+ s.nullIdx = KeyNotFound
+}
+
+// Size returns the current number of inserted elements into the table including if a null
+// has been inserted.
+func (s *Int32MemoTable) Size() int {
+ sz := int(s.tbl.size)
+ if _, ok := s.GetNull(); ok {
+ sz++
+ }
+ return sz
+}
+
+// GetNull returns the index of an inserted null or KeyNotFound along with a bool
+// that will be true if found and false if not.
+func (s *Int32MemoTable) GetNull() (int, bool) {
+ return int(s.nullIdx), s.nullIdx != KeyNotFound
+}
+
+// GetOrInsertNull will return the index of the null entry or insert a null entry
+// if one currently doesn't exist. The found value will be true if there was already
+// a null in the table, and false if it inserted one.
+func (s *Int32MemoTable) GetOrInsertNull() (idx int, found bool) {
+ idx, found = s.GetNull()
+ if !found {
+ idx = s.Size()
+ s.nullIdx = int32(idx)
+ }
+ return
+}
+
+// CopyValues will copy the values from the memo table out into the passed in slice
+// which must be of the appropriate type.
+func (s *Int32MemoTable) CopyValues(out interface{}) {
+ s.CopyValuesSubset(0, out)
+}
+
+// CopyValuesSubset is like CopyValues but only copies a subset of values starting
+// at the provided start index
+func (s *Int32MemoTable) CopyValuesSubset(start int, out interface{}) {
+ s.tbl.CopyValuesSubset(start, out.([]int32))
+}
+
+func (s *Int32MemoTable) WriteOut(out []byte) {
+ s.tbl.CopyValues(arrow.Int32Traits.CastFromBytes(out))
+}
+
+func (s *Int32MemoTable) WriteOutSubset(start int, out []byte) {
+ s.tbl.CopyValuesSubset(start, arrow.Int32Traits.CastFromBytes(out))
+}
+
+func (s *Int32MemoTable) WriteOutLE(out []byte) {
+ s.tbl.WriteOut(out)
+}
+
+func (s *Int32MemoTable) WriteOutSubsetLE(start int, out []byte) {
+ s.tbl.WriteOutSubset(start, out)
+}
+
+// Get returns the index of the requested value in the hash table or KeyNotFound
+// along with a boolean indicating if it was found or not.
+func (s *Int32MemoTable) Get(val interface{}) (int, bool) {
+
+ h := hashInt(uint64(val.(int32)), 0)
+ if e, ok := s.tbl.Lookup(h, func(v int32) bool { return val.(int32) == v }); ok {
+ return int(e.payload.memoIdx), ok
+ }
+ return KeyNotFound, false
+}
+
+// GetOrInsert will return the index of the specified value in the table, or insert the
+// value into the table and return the new index. found indicates whether or not it already
+// existed in the table (true) or was inserted by this call (false).
+func (s *Int32MemoTable) GetOrInsert(val interface{}) (idx int, found bool, err error) {
+
+ h := hashInt(uint64(val.(int32)), 0)
+ e, ok := s.tbl.Lookup(h, func(v int32) bool {
+ return val.(int32) == v
+ })
+
+ if ok {
+ idx = int(e.payload.memoIdx)
+ found = true
+ } else {
+ idx = s.Size()
+ s.tbl.Insert(e, h, val.(int32), int32(idx))
+ }
+ return
+}
+
+// GetOrInsertBytes is unimplemented
+func (s *Int32MemoTable) GetOrInsertBytes(val []byte) (idx int, found bool, err error) {
+ panic("unimplemented")
+}
+
+type payloadInt64 struct {
+ val int64
+ memoIdx int32
+}
+
+type entryInt64 struct {
+ h uint64
+ payload payloadInt64
+}
+
+func (e entryInt64) Valid() bool { return e.h != sentinel }
+
+// Int64HashTable is a hashtable specifically for int64 that
+// is utilized with the MemoTable to generalize interactions for easier
+// implementation of dictionaries without losing performance.
+type Int64HashTable struct {
+ cap uint64
+ capMask uint64
+ size uint64
+
+ entries []entryInt64
+}
+
+// NewInt64HashTable returns a new hash table for int64 values
+// initialized with the passed in capacity or 32 whichever is larger.
+func NewInt64HashTable(cap uint64) *Int64HashTable {
+ initCap := uint64(bitutil.NextPowerOf2(int(max(cap, 32))))
+ ret := &Int64HashTable{cap: initCap, capMask: initCap - 1, size: 0}
+ ret.entries = make([]entryInt64, initCap)
+ return ret
+}
+
+// Reset drops all of the values in this hash table and re-initializes it
+// with the specified initial capacity as if by calling New, but without having
+// to reallocate the object.
+func (h *Int64HashTable) Reset(cap uint64) {
+ h.cap = uint64(bitutil.NextPowerOf2(int(max(cap, 32))))
+ h.capMask = h.cap - 1
+ h.size = 0
+ h.entries = make([]entryInt64, h.cap)
+}
+
+// CopyValues is used for copying the values out of the hash table into the
+// passed in slice, in the order that they were first inserted
+func (h *Int64HashTable) CopyValues(out []int64) {
+ h.CopyValuesSubset(0, out)
+}
+
+// CopyValuesSubset copies a subset of the values in the hashtable out, starting
+// with the value at start, in the order that they were inserted.
+func (h *Int64HashTable) CopyValuesSubset(start int, out []int64) {
+ h.VisitEntries(func(e *entryInt64) {
+ idx := e.payload.memoIdx - int32(start)
+ if idx >= 0 {
+ out[idx] = e.payload.val
+ }
+ })
+}
+
+func (h *Int64HashTable) WriteOut(out []byte) {
+ h.WriteOutSubset(0, out)
+}
+
+func (h *Int64HashTable) WriteOutSubset(start int, out []byte) {
+ data := arrow.Int64Traits.CastFromBytes(out)
+ h.VisitEntries(func(e *entryInt64) {
+ idx := e.payload.memoIdx - int32(start)
+ if idx >= 0 {
+ data[idx] = utils.ToLEInt64(e.payload.val)
+ }
+ })
+}
+
+func (h *Int64HashTable) needUpsize() bool { return h.size*uint64(loadFactor) >= h.cap }
+
+func (Int64HashTable) fixHash(v uint64) uint64 {
+ if v == sentinel {
+ return 42
+ }
+ return v
+}
+
+// Lookup retrieves the entry for a given hash value assuming it's payload value returns
+// true when passed to the cmp func. Returns a pointer to the entry for the given hash value,
+// and a boolean as to whether it was found. It is not safe to use the pointer if the bool is false.
+func (h *Int64HashTable) Lookup(v uint64, cmp func(int64) bool) (*entryInt64, bool) {
+ idx, ok := h.lookup(v, h.capMask, cmp)
+ return &h.entries[idx], ok
+}
+
+func (h *Int64HashTable) lookup(v uint64, szMask uint64, cmp func(int64) bool) (uint64, bool) {
+ const perturbShift uint8 = 5
+
+ var (
+ idx uint64
+ perturb uint64
+ e *entryInt64
+ )
+
+ v = h.fixHash(v)
+ idx = v & szMask
+ perturb = (v >> uint64(perturbShift)) + 1
+
+ for {
+ e = &h.entries[idx]
+ if e.h == v && cmp(e.payload.val) {
+ return idx, true
+ }
+
+ if e.h == sentinel {
+ return idx, false
+ }
+
+ // perturbation logic inspired from CPython's set/dict object
+ // the goal is that all 64 bits of unmasked hash value eventually
+ // participate int he probing sequence, to minimize clustering
+ idx = (idx + perturb) & szMask
+ perturb = (perturb >> uint64(perturbShift)) + 1
+ }
+}
+
+func (h *Int64HashTable) upsize(newcap uint64) error {
+ newMask := newcap - 1
+
+ oldEntries := h.entries
+ h.entries = make([]entryInt64, newcap)
+ for _, e := range oldEntries {
+ if e.Valid() {
+ idx, _ := h.lookup(e.h, newMask, func(int64) bool { return false })
+ h.entries[idx] = e
+ }
+ }
+ h.cap = newcap
+ h.capMask = newMask
+ return nil
+}
+
+// Insert updates the given entry with the provided hash value, payload value and memo index.
+// The entry pointer must have been retrieved via lookup in order to actually insert properly.
+func (h *Int64HashTable) Insert(e *entryInt64, v uint64, val int64, memoIdx int32) error {
+ e.h = h.fixHash(v)
+ e.payload.val = val
+ e.payload.memoIdx = memoIdx
+ h.size++
+
+ if h.needUpsize() {
+ h.upsize(h.cap * uint64(loadFactor) * 2)
+ }
+ return nil
+}
+
+// VisitEntries will call the passed in function on each *valid* entry in the hash table,
+// a valid entry being one which has had a value inserted into it.
+func (h *Int64HashTable) VisitEntries(visit func(*entryInt64)) {
+ for _, e := range h.entries {
+ if e.Valid() {
+ visit(&e)
+ }
+ }
+}
+
+// Int64MemoTable is a wrapper over the appropriate hashtable to provide an interface
+// conforming to the MemoTable interface defined in the encoding package for general interactions
+// regarding dictionaries.
+type Int64MemoTable struct {
+ tbl *Int64HashTable
+ nullIdx int32
+}
+
+// NewInt64MemoTable returns a new memotable with num entries pre-allocated to reduce further
+// allocations when inserting.
+func NewInt64MemoTable(num int64) *Int64MemoTable {
+ return &Int64MemoTable{tbl: NewInt64HashTable(uint64(num)), nullIdx: KeyNotFound}
+}
+
+func (Int64MemoTable) TypeTraits() TypeTraits {
+ return arrow.Int64Traits
+}
+
+// Reset allows this table to be re-used by dumping all the data currently in the table.
+func (s *Int64MemoTable) Reset() {
+ s.tbl.Reset(32)
+ s.nullIdx = KeyNotFound
+}
+
+// Size returns the current number of inserted elements into the table including if a null
+// has been inserted.
+func (s *Int64MemoTable) Size() int {
+ sz := int(s.tbl.size)
+ if _, ok := s.GetNull(); ok {
+ sz++
+ }
+ return sz
+}
+
+// GetNull returns the index of an inserted null or KeyNotFound along with a bool
+// that will be true if found and false if not.
+func (s *Int64MemoTable) GetNull() (int, bool) {
+ return int(s.nullIdx), s.nullIdx != KeyNotFound
+}
+
+// GetOrInsertNull will return the index of the null entry or insert a null entry
+// if one currently doesn't exist. The found value will be true if there was already
+// a null in the table, and false if it inserted one.
+func (s *Int64MemoTable) GetOrInsertNull() (idx int, found bool) {
+ idx, found = s.GetNull()
+ if !found {
+ idx = s.Size()
+ s.nullIdx = int32(idx)
+ }
+ return
+}
+
+// CopyValues will copy the values from the memo table out into the passed in slice
+// which must be of the appropriate type.
+func (s *Int64MemoTable) CopyValues(out interface{}) {
+ s.CopyValuesSubset(0, out)
+}
+
+// CopyValuesSubset is like CopyValues but only copies a subset of values starting
+// at the provided start index
+func (s *Int64MemoTable) CopyValuesSubset(start int, out interface{}) {
+ s.tbl.CopyValuesSubset(start, out.([]int64))
+}
+
+func (s *Int64MemoTable) WriteOut(out []byte) {
+ s.tbl.CopyValues(arrow.Int64Traits.CastFromBytes(out))
+}
+
+func (s *Int64MemoTable) WriteOutSubset(start int, out []byte) {
+ s.tbl.CopyValuesSubset(start, arrow.Int64Traits.CastFromBytes(out))
+}
+
+func (s *Int64MemoTable) WriteOutLE(out []byte) {
+ s.tbl.WriteOut(out)
+}
+
+func (s *Int64MemoTable) WriteOutSubsetLE(start int, out []byte) {
+ s.tbl.WriteOutSubset(start, out)
+}
+
+// Get returns the index of the requested value in the hash table or KeyNotFound
+// along with a boolean indicating if it was found or not.
+func (s *Int64MemoTable) Get(val interface{}) (int, bool) {
+
+ h := hashInt(uint64(val.(int64)), 0)
+ if e, ok := s.tbl.Lookup(h, func(v int64) bool { return val.(int64) == v }); ok {
+ return int(e.payload.memoIdx), ok
+ }
+ return KeyNotFound, false
+}
+
+// GetOrInsert will return the index of the specified value in the table, or insert the
+// value into the table and return the new index. found indicates whether or not it already
+// existed in the table (true) or was inserted by this call (false).
+func (s *Int64MemoTable) GetOrInsert(val interface{}) (idx int, found bool, err error) {
+
+ h := hashInt(uint64(val.(int64)), 0)
+ e, ok := s.tbl.Lookup(h, func(v int64) bool {
+ return val.(int64) == v
+ })
+
+ if ok {
+ idx = int(e.payload.memoIdx)
+ found = true
+ } else {
+ idx = s.Size()
+ s.tbl.Insert(e, h, val.(int64), int32(idx))
+ }
+ return
+}
+
+// GetOrInsertBytes is unimplemented
+func (s *Int64MemoTable) GetOrInsertBytes(val []byte) (idx int, found bool, err error) {
+ panic("unimplemented")
+}
+
+type payloadUint32 struct {
+ val uint32
+ memoIdx int32
+}
+
+type entryUint32 struct {
+ h uint64
+ payload payloadUint32
+}
+
+func (e entryUint32) Valid() bool { return e.h != sentinel }
+
+// Uint32HashTable is a hashtable specifically for uint32 that
+// is utilized with the MemoTable to generalize interactions for easier
+// implementation of dictionaries without losing performance.
+type Uint32HashTable struct {
+ cap uint64
+ capMask uint64
+ size uint64
+
+ entries []entryUint32
+}
+
+// NewUint32HashTable returns a new hash table for uint32 values
+// initialized with the passed in capacity or 32 whichever is larger.
+func NewUint32HashTable(cap uint64) *Uint32HashTable {
+ initCap := uint64(bitutil.NextPowerOf2(int(max(cap, 32))))
+ ret := &Uint32HashTable{cap: initCap, capMask: initCap - 1, size: 0}
+ ret.entries = make([]entryUint32, initCap)
+ return ret
+}
+
+// Reset drops all of the values in this hash table and re-initializes it
+// with the specified initial capacity as if by calling New, but without having
+// to reallocate the object.
+func (h *Uint32HashTable) Reset(cap uint64) {
+ h.cap = uint64(bitutil.NextPowerOf2(int(max(cap, 32))))
+ h.capMask = h.cap - 1
+ h.size = 0
+ h.entries = make([]entryUint32, h.cap)
+}
+
+// CopyValues is used for copying the values out of the hash table into the
+// passed in slice, in the order that they were first inserted
+func (h *Uint32HashTable) CopyValues(out []uint32) {
+ h.CopyValuesSubset(0, out)
+}
+
+// CopyValuesSubset copies a subset of the values in the hashtable out, starting
+// with the value at start, in the order that they were inserted.
+func (h *Uint32HashTable) CopyValuesSubset(start int, out []uint32) {
+ h.VisitEntries(func(e *entryUint32) {
+ idx := e.payload.memoIdx - int32(start)
+ if idx >= 0 {
+ out[idx] = e.payload.val
+ }
+ })
+}
+
+func (h *Uint32HashTable) WriteOut(out []byte) {
+ h.WriteOutSubset(0, out)
+}
+
+func (h *Uint32HashTable) WriteOutSubset(start int, out []byte) {
+ data := arrow.Uint32Traits.CastFromBytes(out)
+ h.VisitEntries(func(e *entryUint32) {
+ idx := e.payload.memoIdx - int32(start)
+ if idx >= 0 {
+ data[idx] = utils.ToLEUint32(e.payload.val)
+ }
+ })
+}
+
+func (h *Uint32HashTable) needUpsize() bool { return h.size*uint64(loadFactor) >= h.cap }
+
+func (Uint32HashTable) fixHash(v uint64) uint64 {
+ if v == sentinel {
+ return 42
+ }
+ return v
+}
+
+// Lookup retrieves the entry for a given hash value assuming it's payload value returns
+// true when passed to the cmp func. Returns a pointer to the entry for the given hash value,
+// and a boolean as to whether it was found. It is not safe to use the pointer if the bool is false.
+func (h *Uint32HashTable) Lookup(v uint64, cmp func(uint32) bool) (*entryUint32, bool) {
+ idx, ok := h.lookup(v, h.capMask, cmp)
+ return &h.entries[idx], ok
+}
+
+func (h *Uint32HashTable) lookup(v uint64, szMask uint64, cmp func(uint32) bool) (uint64, bool) {
+ const perturbShift uint8 = 5
+
+ var (
+ idx uint64
+ perturb uint64
+ e *entryUint32
+ )
+
+ v = h.fixHash(v)
+ idx = v & szMask
+ perturb = (v >> uint64(perturbShift)) + 1
+
+ for {
+ e = &h.entries[idx]
+ if e.h == v && cmp(e.payload.val) {
+ return idx, true
+ }
+
+ if e.h == sentinel {
+ return idx, false
+ }
+
+ // perturbation logic inspired from CPython's set/dict object
+ // the goal is that all 64 bits of unmasked hash value eventually
+ // participate int he probing sequence, to minimize clustering
+ idx = (idx + perturb) & szMask
+ perturb = (perturb >> uint64(perturbShift)) + 1
+ }
+}
+
+func (h *Uint32HashTable) upsize(newcap uint64) error {
+ newMask := newcap - 1
+
+ oldEntries := h.entries
+ h.entries = make([]entryUint32, newcap)
+ for _, e := range oldEntries {
+ if e.Valid() {
+ idx, _ := h.lookup(e.h, newMask, func(uint32) bool { return false })
+ h.entries[idx] = e
+ }
+ }
+ h.cap = newcap
+ h.capMask = newMask
+ return nil
+}
+
+// Insert updates the given entry with the provided hash value, payload value and memo index.
+// The entry pointer must have been retrieved via lookup in order to actually insert properly.
+func (h *Uint32HashTable) Insert(e *entryUint32, v uint64, val uint32, memoIdx int32) error {
+ e.h = h.fixHash(v)
+ e.payload.val = val
+ e.payload.memoIdx = memoIdx
+ h.size++
+
+ if h.needUpsize() {
+ h.upsize(h.cap * uint64(loadFactor) * 2)
+ }
+ return nil
+}
+
+// VisitEntries will call the passed in function on each *valid* entry in the hash table,
+// a valid entry being one which has had a value inserted into it.
+func (h *Uint32HashTable) VisitEntries(visit func(*entryUint32)) {
+ for _, e := range h.entries {
+ if e.Valid() {
+ visit(&e)
+ }
+ }
+}
+
+// Uint32MemoTable is a wrapper over the appropriate hashtable to provide an interface
+// conforming to the MemoTable interface defined in the encoding package for general interactions
+// regarding dictionaries.
+type Uint32MemoTable struct {
+ tbl *Uint32HashTable
+ nullIdx int32
+}
+
+// NewUint32MemoTable returns a new memotable with num entries pre-allocated to reduce further
+// allocations when inserting.
+func NewUint32MemoTable(num int64) *Uint32MemoTable {
+ return &Uint32MemoTable{tbl: NewUint32HashTable(uint64(num)), nullIdx: KeyNotFound}
+}
+
+func (Uint32MemoTable) TypeTraits() TypeTraits {
+ return arrow.Uint32Traits
+}
+
+// Reset allows this table to be re-used by dumping all the data currently in the table.
+func (s *Uint32MemoTable) Reset() {
+ s.tbl.Reset(32)
+ s.nullIdx = KeyNotFound
+}
+
+// Size returns the current number of inserted elements into the table including if a null
+// has been inserted.
+func (s *Uint32MemoTable) Size() int {
+ sz := int(s.tbl.size)
+ if _, ok := s.GetNull(); ok {
+ sz++
+ }
+ return sz
+}
+
+// GetNull returns the index of an inserted null or KeyNotFound along with a bool
+// that will be true if found and false if not.
+func (s *Uint32MemoTable) GetNull() (int, bool) {
+ return int(s.nullIdx), s.nullIdx != KeyNotFound
+}
+
+// GetOrInsertNull will return the index of the null entry or insert a null entry
+// if one currently doesn't exist. The found value will be true if there was already
+// a null in the table, and false if it inserted one.
+func (s *Uint32MemoTable) GetOrInsertNull() (idx int, found bool) {
+ idx, found = s.GetNull()
+ if !found {
+ idx = s.Size()
+ s.nullIdx = int32(idx)
+ }
+ return
+}
+
+// CopyValues will copy the values from the memo table out into the passed in slice
+// which must be of the appropriate type.
+func (s *Uint32MemoTable) CopyValues(out interface{}) {
+ s.CopyValuesSubset(0, out)
+}
+
+// CopyValuesSubset is like CopyValues but only copies a subset of values starting
+// at the provided start index
+func (s *Uint32MemoTable) CopyValuesSubset(start int, out interface{}) {
+ s.tbl.CopyValuesSubset(start, out.([]uint32))
+}
+
+func (s *Uint32MemoTable) WriteOut(out []byte) {
+ s.tbl.CopyValues(arrow.Uint32Traits.CastFromBytes(out))
+}
+
+func (s *Uint32MemoTable) WriteOutSubset(start int, out []byte) {
+ s.tbl.CopyValuesSubset(start, arrow.Uint32Traits.CastFromBytes(out))
+}
+
+func (s *Uint32MemoTable) WriteOutLE(out []byte) {
+ s.tbl.WriteOut(out)
+}
+
+func (s *Uint32MemoTable) WriteOutSubsetLE(start int, out []byte) {
+ s.tbl.WriteOutSubset(start, out)
+}
+
+// Get returns the index of the requested value in the hash table or KeyNotFound
+// along with a boolean indicating if it was found or not.
+func (s *Uint32MemoTable) Get(val interface{}) (int, bool) {
+
+ h := hashInt(uint64(val.(uint32)), 0)
+ if e, ok := s.tbl.Lookup(h, func(v uint32) bool { return val.(uint32) == v }); ok {
+ return int(e.payload.memoIdx), ok
+ }
+ return KeyNotFound, false
+}
+
+// GetOrInsert will return the index of the specified value in the table, or insert the
+// value into the table and return the new index. found indicates whether or not it already
+// existed in the table (true) or was inserted by this call (false).
+func (s *Uint32MemoTable) GetOrInsert(val interface{}) (idx int, found bool, err error) {
+
+ h := hashInt(uint64(val.(uint32)), 0)
+ e, ok := s.tbl.Lookup(h, func(v uint32) bool {
+ return val.(uint32) == v
+ })
+
+ if ok {
+ idx = int(e.payload.memoIdx)
+ found = true
+ } else {
+ idx = s.Size()
+ s.tbl.Insert(e, h, val.(uint32), int32(idx))
+ }
+ return
+}
+
+// GetOrInsertBytes is unimplemented
+func (s *Uint32MemoTable) GetOrInsertBytes(val []byte) (idx int, found bool, err error) {
+ panic("unimplemented")
+}
+
+type payloadUint64 struct {
+ val uint64
+ memoIdx int32
+}
+
+type entryUint64 struct {
+ h uint64
+ payload payloadUint64
+}
+
+func (e entryUint64) Valid() bool { return e.h != sentinel }
+
+// Uint64HashTable is a hashtable specifically for uint64 that
+// is utilized with the MemoTable to generalize interactions for easier
+// implementation of dictionaries without losing performance.
+type Uint64HashTable struct {
+ cap uint64
+ capMask uint64
+ size uint64
+
+ entries []entryUint64
+}
+
+// NewUint64HashTable returns a new hash table for uint64 values
+// initialized with the passed in capacity or 32 whichever is larger.
+func NewUint64HashTable(cap uint64) *Uint64HashTable {
+ initCap := uint64(bitutil.NextPowerOf2(int(max(cap, 32))))
+ ret := &Uint64HashTable{cap: initCap, capMask: initCap - 1, size: 0}
+ ret.entries = make([]entryUint64, initCap)
+ return ret
+}
+
+// Reset drops all of the values in this hash table and re-initializes it
+// with the specified initial capacity as if by calling New, but without having
+// to reallocate the object.
+func (h *Uint64HashTable) Reset(cap uint64) {
+ h.cap = uint64(bitutil.NextPowerOf2(int(max(cap, 32))))
+ h.capMask = h.cap - 1
+ h.size = 0
+ h.entries = make([]entryUint64, h.cap)
+}
+
+// CopyValues is used for copying the values out of the hash table into the
+// passed in slice, in the order that they were first inserted
+func (h *Uint64HashTable) CopyValues(out []uint64) {
+ h.CopyValuesSubset(0, out)
+}
+
+// CopyValuesSubset copies a subset of the values in the hashtable out, starting
+// with the value at start, in the order that they were inserted.
+func (h *Uint64HashTable) CopyValuesSubset(start int, out []uint64) {
+ h.VisitEntries(func(e *entryUint64) {
+ idx := e.payload.memoIdx - int32(start)
+ if idx >= 0 {
+ out[idx] = e.payload.val
+ }
+ })
+}
+
+func (h *Uint64HashTable) WriteOut(out []byte) {
+ h.WriteOutSubset(0, out)
+}
+
+func (h *Uint64HashTable) WriteOutSubset(start int, out []byte) {
+ data := arrow.Uint64Traits.CastFromBytes(out)
+ h.VisitEntries(func(e *entryUint64) {
+ idx := e.payload.memoIdx - int32(start)
+ if idx >= 0 {
+ data[idx] = utils.ToLEUint64(e.payload.val)
+ }
+ })
+}
+
+func (h *Uint64HashTable) needUpsize() bool { return h.size*uint64(loadFactor) >= h.cap }
+
+func (Uint64HashTable) fixHash(v uint64) uint64 {
+ if v == sentinel {
+ return 42
+ }
+ return v
+}
+
+// Lookup retrieves the entry for a given hash value assuming it's payload value returns
+// true when passed to the cmp func. Returns a pointer to the entry for the given hash value,
+// and a boolean as to whether it was found. It is not safe to use the pointer if the bool is false.
+func (h *Uint64HashTable) Lookup(v uint64, cmp func(uint64) bool) (*entryUint64, bool) {
+ idx, ok := h.lookup(v, h.capMask, cmp)
+ return &h.entries[idx], ok
+}
+
+func (h *Uint64HashTable) lookup(v uint64, szMask uint64, cmp func(uint64) bool) (uint64, bool) {
+ const perturbShift uint8 = 5
+
+ var (
+ idx uint64
+ perturb uint64
+ e *entryUint64
+ )
+
+ v = h.fixHash(v)
+ idx = v & szMask
+ perturb = (v >> uint64(perturbShift)) + 1
+
+ for {
+ e = &h.entries[idx]
+ if e.h == v && cmp(e.payload.val) {
+ return idx, true
+ }
+
+ if e.h == sentinel {
+ return idx, false
+ }
+
+ // perturbation logic inspired from CPython's set/dict object
+ // the goal is that all 64 bits of unmasked hash value eventually
+ // participate int he probing sequence, to minimize clustering
+ idx = (idx + perturb) & szMask
+ perturb = (perturb >> uint64(perturbShift)) + 1
+ }
+}
+
+func (h *Uint64HashTable) upsize(newcap uint64) error {
+ newMask := newcap - 1
+
+ oldEntries := h.entries
+ h.entries = make([]entryUint64, newcap)
+ for _, e := range oldEntries {
+ if e.Valid() {
+ idx, _ := h.lookup(e.h, newMask, func(uint64) bool { return false })
+ h.entries[idx] = e
+ }
+ }
+ h.cap = newcap
+ h.capMask = newMask
+ return nil
+}
+
+// Insert updates the given entry with the provided hash value, payload value and memo index.
+// The entry pointer must have been retrieved via lookup in order to actually insert properly.
+func (h *Uint64HashTable) Insert(e *entryUint64, v uint64, val uint64, memoIdx int32) error {
+ e.h = h.fixHash(v)
+ e.payload.val = val
+ e.payload.memoIdx = memoIdx
+ h.size++
+
+ if h.needUpsize() {
+ h.upsize(h.cap * uint64(loadFactor) * 2)
+ }
+ return nil
+}
+
+// VisitEntries will call the passed in function on each *valid* entry in the hash table,
+// a valid entry being one which has had a value inserted into it.
+func (h *Uint64HashTable) VisitEntries(visit func(*entryUint64)) {
+ for _, e := range h.entries {
+ if e.Valid() {
+ visit(&e)
+ }
+ }
+}
+
+// Uint64MemoTable is a wrapper over the appropriate hashtable to provide an interface
+// conforming to the MemoTable interface defined in the encoding package for general interactions
+// regarding dictionaries.
+type Uint64MemoTable struct {
+ tbl *Uint64HashTable
+ nullIdx int32
+}
+
+// NewUint64MemoTable returns a new memotable with num entries pre-allocated to reduce further
+// allocations when inserting.
+func NewUint64MemoTable(num int64) *Uint64MemoTable {
+ return &Uint64MemoTable{tbl: NewUint64HashTable(uint64(num)), nullIdx: KeyNotFound}
+}
+
+func (Uint64MemoTable) TypeTraits() TypeTraits {
+ return arrow.Uint64Traits
+}
+
+// Reset allows this table to be re-used by dumping all the data currently in the table.
+func (s *Uint64MemoTable) Reset() {
+ s.tbl.Reset(32)
+ s.nullIdx = KeyNotFound
+}
+
+// Size returns the current number of inserted elements into the table including if a null
+// has been inserted.
+func (s *Uint64MemoTable) Size() int {
+ sz := int(s.tbl.size)
+ if _, ok := s.GetNull(); ok {
+ sz++
+ }
+ return sz
+}
+
+// GetNull returns the index of an inserted null or KeyNotFound along with a bool
+// that will be true if found and false if not.
+func (s *Uint64MemoTable) GetNull() (int, bool) {
+ return int(s.nullIdx), s.nullIdx != KeyNotFound
+}
+
+// GetOrInsertNull will return the index of the null entry or insert a null entry
+// if one currently doesn't exist. The found value will be true if there was already
+// a null in the table, and false if it inserted one.
+func (s *Uint64MemoTable) GetOrInsertNull() (idx int, found bool) {
+ idx, found = s.GetNull()
+ if !found {
+ idx = s.Size()
+ s.nullIdx = int32(idx)
+ }
+ return
+}
+
+// CopyValues will copy the values from the memo table out into the passed in slice
+// which must be of the appropriate type.
+func (s *Uint64MemoTable) CopyValues(out interface{}) {
+ s.CopyValuesSubset(0, out)
+}
+
+// CopyValuesSubset is like CopyValues but only copies a subset of values starting
+// at the provided start index
+func (s *Uint64MemoTable) CopyValuesSubset(start int, out interface{}) {
+ s.tbl.CopyValuesSubset(start, out.([]uint64))
+}
+
+func (s *Uint64MemoTable) WriteOut(out []byte) {
+ s.tbl.CopyValues(arrow.Uint64Traits.CastFromBytes(out))
+}
+
+func (s *Uint64MemoTable) WriteOutSubset(start int, out []byte) {
+ s.tbl.CopyValuesSubset(start, arrow.Uint64Traits.CastFromBytes(out))
+}
+
+func (s *Uint64MemoTable) WriteOutLE(out []byte) {
+ s.tbl.WriteOut(out)
+}
+
+func (s *Uint64MemoTable) WriteOutSubsetLE(start int, out []byte) {
+ s.tbl.WriteOutSubset(start, out)
+}
+
+// Get returns the index of the requested value in the hash table or KeyNotFound
+// along with a boolean indicating if it was found or not.
+func (s *Uint64MemoTable) Get(val interface{}) (int, bool) {
+
+ h := hashInt(uint64(val.(uint64)), 0)
+ if e, ok := s.tbl.Lookup(h, func(v uint64) bool { return val.(uint64) == v }); ok {
+ return int(e.payload.memoIdx), ok
+ }
+ return KeyNotFound, false
+}
+
+// GetOrInsert will return the index of the specified value in the table, or insert the
+// value into the table and return the new index. found indicates whether or not it already
+// existed in the table (true) or was inserted by this call (false).
+func (s *Uint64MemoTable) GetOrInsert(val interface{}) (idx int, found bool, err error) {
+
+ h := hashInt(uint64(val.(uint64)), 0)
+ e, ok := s.tbl.Lookup(h, func(v uint64) bool {
+ return val.(uint64) == v
+ })
+
+ if ok {
+ idx = int(e.payload.memoIdx)
+ found = true
+ } else {
+ idx = s.Size()
+ s.tbl.Insert(e, h, val.(uint64), int32(idx))
+ }
+ return
+}
+
+// GetOrInsertBytes is unimplemented
+func (s *Uint64MemoTable) GetOrInsertBytes(val []byte) (idx int, found bool, err error) {
+ panic("unimplemented")
+}
+
+type payloadFloat32 struct {
+ val float32
+ memoIdx int32
+}
+
+type entryFloat32 struct {
+ h uint64
+ payload payloadFloat32
+}
+
+func (e entryFloat32) Valid() bool { return e.h != sentinel }
+
+// Float32HashTable is a hashtable specifically for float32 that
+// is utilized with the MemoTable to generalize interactions for easier
+// implementation of dictionaries without losing performance.
+type Float32HashTable struct {
+ cap uint64
+ capMask uint64
+ size uint64
+
+ entries []entryFloat32
+}
+
+// NewFloat32HashTable returns a new hash table for float32 values
+// initialized with the passed in capacity or 32 whichever is larger.
+func NewFloat32HashTable(cap uint64) *Float32HashTable {
+ initCap := uint64(bitutil.NextPowerOf2(int(max(cap, 32))))
+ ret := &Float32HashTable{cap: initCap, capMask: initCap - 1, size: 0}
+ ret.entries = make([]entryFloat32, initCap)
+ return ret
+}
+
+// Reset drops all of the values in this hash table and re-initializes it
+// with the specified initial capacity as if by calling New, but without having
+// to reallocate the object.
+func (h *Float32HashTable) Reset(cap uint64) {
+ h.cap = uint64(bitutil.NextPowerOf2(int(max(cap, 32))))
+ h.capMask = h.cap - 1
+ h.size = 0
+ h.entries = make([]entryFloat32, h.cap)
+}
+
+// CopyValues is used for copying the values out of the hash table into the
+// passed in slice, in the order that they were first inserted
+func (h *Float32HashTable) CopyValues(out []float32) {
+ h.CopyValuesSubset(0, out)
+}
+
+// CopyValuesSubset copies a subset of the values in the hashtable out, starting
+// with the value at start, in the order that they were inserted.
+func (h *Float32HashTable) CopyValuesSubset(start int, out []float32) {
+ h.VisitEntries(func(e *entryFloat32) {
+ idx := e.payload.memoIdx - int32(start)
+ if idx >= 0 {
+ out[idx] = e.payload.val
+ }
+ })
+}
+
+func (h *Float32HashTable) WriteOut(out []byte) {
+ h.WriteOutSubset(0, out)
+}
+
+func (h *Float32HashTable) WriteOutSubset(start int, out []byte) {
+ data := arrow.Float32Traits.CastFromBytes(out)
+ h.VisitEntries(func(e *entryFloat32) {
+ idx := e.payload.memoIdx - int32(start)
+ if idx >= 0 {
+ data[idx] = utils.ToLEFloat32(e.payload.val)
+ }
+ })
+}
+
+func (h *Float32HashTable) needUpsize() bool { return h.size*uint64(loadFactor) >= h.cap }
+
+func (Float32HashTable) fixHash(v uint64) uint64 {
+ if v == sentinel {
+ return 42
+ }
+ return v
+}
+
+// Lookup retrieves the entry for a given hash value assuming it's payload value returns
+// true when passed to the cmp func. Returns a pointer to the entry for the given hash value,
+// and a boolean as to whether it was found. It is not safe to use the pointer if the bool is false.
+func (h *Float32HashTable) Lookup(v uint64, cmp func(float32) bool) (*entryFloat32, bool) {
+ idx, ok := h.lookup(v, h.capMask, cmp)
+ return &h.entries[idx], ok
+}
+
+func (h *Float32HashTable) lookup(v uint64, szMask uint64, cmp func(float32) bool) (uint64, bool) {
+ const perturbShift uint8 = 5
+
+ var (
+ idx uint64
+ perturb uint64
+ e *entryFloat32
+ )
+
+ v = h.fixHash(v)
+ idx = v & szMask
+ perturb = (v >> uint64(perturbShift)) + 1
+
+ for {
+ e = &h.entries[idx]
+ if e.h == v && cmp(e.payload.val) {
+ return idx, true
+ }
+
+ if e.h == sentinel {
+ return idx, false
+ }
+
+ // perturbation logic inspired from CPython's set/dict object
+ // the goal is that all 64 bits of unmasked hash value eventually
+ // participate int he probing sequence, to minimize clustering
+ idx = (idx + perturb) & szMask
+ perturb = (perturb >> uint64(perturbShift)) + 1
+ }
+}
+
+func (h *Float32HashTable) upsize(newcap uint64) error {
+ newMask := newcap - 1
+
+ oldEntries := h.entries
+ h.entries = make([]entryFloat32, newcap)
+ for _, e := range oldEntries {
+ if e.Valid() {
+ idx, _ := h.lookup(e.h, newMask, func(float32) bool { return false })
+ h.entries[idx] = e
+ }
+ }
+ h.cap = newcap
+ h.capMask = newMask
+ return nil
+}
+
+// Insert updates the given entry with the provided hash value, payload value and memo index.
+// The entry pointer must have been retrieved via lookup in order to actually insert properly.
+func (h *Float32HashTable) Insert(e *entryFloat32, v uint64, val float32, memoIdx int32) error {
+ e.h = h.fixHash(v)
+ e.payload.val = val
+ e.payload.memoIdx = memoIdx
+ h.size++
+
+ if h.needUpsize() {
+ h.upsize(h.cap * uint64(loadFactor) * 2)
+ }
+ return nil
+}
+
+// VisitEntries will call the passed in function on each *valid* entry in the hash table,
+// a valid entry being one which has had a value inserted into it.
+func (h *Float32HashTable) VisitEntries(visit func(*entryFloat32)) {
+ for _, e := range h.entries {
+ if e.Valid() {
+ visit(&e)
+ }
+ }
+}
+
+// Float32MemoTable is a wrapper over the appropriate hashtable to provide an interface
+// conforming to the MemoTable interface defined in the encoding package for general interactions
+// regarding dictionaries.
+type Float32MemoTable struct {
+ tbl *Float32HashTable
+ nullIdx int32
+}
+
+// NewFloat32MemoTable returns a new memotable with num entries pre-allocated to reduce further
+// allocations when inserting.
+func NewFloat32MemoTable(num int64) *Float32MemoTable {
+ return &Float32MemoTable{tbl: NewFloat32HashTable(uint64(num)), nullIdx: KeyNotFound}
+}
+
+func (Float32MemoTable) TypeTraits() TypeTraits {
+ return arrow.Float32Traits
+}
+
+// Reset allows this table to be re-used by dumping all the data currently in the table.
+func (s *Float32MemoTable) Reset() {
+ s.tbl.Reset(32)
+ s.nullIdx = KeyNotFound
+}
+
+// Size returns the current number of inserted elements into the table including if a null
+// has been inserted.
+func (s *Float32MemoTable) Size() int {
+ sz := int(s.tbl.size)
+ if _, ok := s.GetNull(); ok {
+ sz++
+ }
+ return sz
+}
+
+// GetNull returns the index of an inserted null or KeyNotFound along with a bool
+// that will be true if found and false if not.
+func (s *Float32MemoTable) GetNull() (int, bool) {
+ return int(s.nullIdx), s.nullIdx != KeyNotFound
+}
+
+// GetOrInsertNull will return the index of the null entry or insert a null entry
+// if one currently doesn't exist. The found value will be true if there was already
+// a null in the table, and false if it inserted one.
+func (s *Float32MemoTable) GetOrInsertNull() (idx int, found bool) {
+ idx, found = s.GetNull()
+ if !found {
+ idx = s.Size()
+ s.nullIdx = int32(idx)
+ }
+ return
+}
+
+// CopyValues will copy the values from the memo table out into the passed in slice
+// which must be of the appropriate type.
+func (s *Float32MemoTable) CopyValues(out interface{}) {
+ s.CopyValuesSubset(0, out)
+}
+
+// CopyValuesSubset is like CopyValues but only copies a subset of values starting
+// at the provided start index
+func (s *Float32MemoTable) CopyValuesSubset(start int, out interface{}) {
+ s.tbl.CopyValuesSubset(start, out.([]float32))
+}
+
+func (s *Float32MemoTable) WriteOut(out []byte) {
+ s.tbl.CopyValues(arrow.Float32Traits.CastFromBytes(out))
+}
+
+func (s *Float32MemoTable) WriteOutSubset(start int, out []byte) {
+ s.tbl.CopyValuesSubset(start, arrow.Float32Traits.CastFromBytes(out))
+}
+
+func (s *Float32MemoTable) WriteOutLE(out []byte) {
+ s.tbl.WriteOut(out)
+}
+
+func (s *Float32MemoTable) WriteOutSubsetLE(start int, out []byte) {
+ s.tbl.WriteOutSubset(start, out)
+}
+
+// Get returns the index of the requested value in the hash table or KeyNotFound
+// along with a boolean indicating if it was found or not.
+func (s *Float32MemoTable) Get(val interface{}) (int, bool) {
+ var cmp func(float32) bool
+
+ if math.IsNaN(float64(val.(float32))) {
+ cmp = isNan32Cmp
+ // use consistent internal bit pattern for NaN regardless of the pattern
+ // that is passed to us. NaN is NaN is NaN
+ val = float32(math.NaN())
+ } else {
+ cmp = func(v float32) bool { return val.(float32) == v }
+ }
+
+ h := hashFloat32(val.(float32), 0)
+ if e, ok := s.tbl.Lookup(h, cmp); ok {
+ return int(e.payload.memoIdx), ok
+ }
+ return KeyNotFound, false
+}
+
+// GetOrInsert will return the index of the specified value in the table, or insert the
+// value into the table and return the new index. found indicates whether or not it already
+// existed in the table (true) or was inserted by this call (false).
+func (s *Float32MemoTable) GetOrInsert(val interface{}) (idx int, found bool, err error) {
+
+ var cmp func(float32) bool
+
+ if math.IsNaN(float64(val.(float32))) {
+ cmp = isNan32Cmp
+ // use consistent internal bit pattern for NaN regardless of the pattern
+ // that is passed to us. NaN is NaN is NaN
+ val = float32(math.NaN())
+ } else {
+ cmp = func(v float32) bool { return val.(float32) == v }
+ }
+
+ h := hashFloat32(val.(float32), 0)
+ e, ok := s.tbl.Lookup(h, cmp)
+
+ if ok {
+ idx = int(e.payload.memoIdx)
+ found = true
+ } else {
+ idx = s.Size()
+ s.tbl.Insert(e, h, val.(float32), int32(idx))
+ }
+ return
+}
+
+// GetOrInsertBytes is unimplemented
+func (s *Float32MemoTable) GetOrInsertBytes(val []byte) (idx int, found bool, err error) {
+ panic("unimplemented")
+}
+
+type payloadFloat64 struct {
+ val float64
+ memoIdx int32
+}
+
+type entryFloat64 struct {
+ h uint64
+ payload payloadFloat64
+}
+
+func (e entryFloat64) Valid() bool { return e.h != sentinel }
+
+// Float64HashTable is a hashtable specifically for float64 that
+// is utilized with the MemoTable to generalize interactions for easier
+// implementation of dictionaries without losing performance.
+type Float64HashTable struct {
+ cap uint64
+ capMask uint64
+ size uint64
+
+ entries []entryFloat64
+}
+
+// NewFloat64HashTable returns a new hash table for float64 values
+// initialized with the passed in capacity or 32 whichever is larger.
+func NewFloat64HashTable(cap uint64) *Float64HashTable {
+ initCap := uint64(bitutil.NextPowerOf2(int(max(cap, 32))))
+ ret := &Float64HashTable{cap: initCap, capMask: initCap - 1, size: 0}
+ ret.entries = make([]entryFloat64, initCap)
+ return ret
+}
+
+// Reset drops all of the values in this hash table and re-initializes it
+// with the specified initial capacity as if by calling New, but without having
+// to reallocate the object.
+func (h *Float64HashTable) Reset(cap uint64) {
+ h.cap = uint64(bitutil.NextPowerOf2(int(max(cap, 32))))
+ h.capMask = h.cap - 1
+ h.size = 0
+ h.entries = make([]entryFloat64, h.cap)
+}
+
+// CopyValues is used for copying the values out of the hash table into the
+// passed in slice, in the order that they were first inserted
+func (h *Float64HashTable) CopyValues(out []float64) {
+ h.CopyValuesSubset(0, out)
+}
+
+// CopyValuesSubset copies a subset of the values in the hashtable out, starting
+// with the value at start, in the order that they were inserted.
+func (h *Float64HashTable) CopyValuesSubset(start int, out []float64) {
+ h.VisitEntries(func(e *entryFloat64) {
+ idx := e.payload.memoIdx - int32(start)
+ if idx >= 0 {
+ out[idx] = e.payload.val
+ }
+ })
+}
+
+func (h *Float64HashTable) WriteOut(out []byte) {
+ h.WriteOutSubset(0, out)
+}
+
+func (h *Float64HashTable) WriteOutSubset(start int, out []byte) {
+ data := arrow.Float64Traits.CastFromBytes(out)
+ h.VisitEntries(func(e *entryFloat64) {
+ idx := e.payload.memoIdx - int32(start)
+ if idx >= 0 {
+ data[idx] = utils.ToLEFloat64(e.payload.val)
+ }
+ })
+}
+
+func (h *Float64HashTable) needUpsize() bool { return h.size*uint64(loadFactor) >= h.cap }
+
+func (Float64HashTable) fixHash(v uint64) uint64 {
+ if v == sentinel {
+ return 42
+ }
+ return v
+}
+
+// Lookup retrieves the entry for a given hash value assuming it's payload value returns
+// true when passed to the cmp func. Returns a pointer to the entry for the given hash value,
+// and a boolean as to whether it was found. It is not safe to use the pointer if the bool is false.
+func (h *Float64HashTable) Lookup(v uint64, cmp func(float64) bool) (*entryFloat64, bool) {
+ idx, ok := h.lookup(v, h.capMask, cmp)
+ return &h.entries[idx], ok
+}
+
+func (h *Float64HashTable) lookup(v uint64, szMask uint64, cmp func(float64) bool) (uint64, bool) {
+ const perturbShift uint8 = 5
+
+ var (
+ idx uint64
+ perturb uint64
+ e *entryFloat64
+ )
+
+ v = h.fixHash(v)
+ idx = v & szMask
+ perturb = (v >> uint64(perturbShift)) + 1
+
+ for {
+ e = &h.entries[idx]
+ if e.h == v && cmp(e.payload.val) {
+ return idx, true
+ }
+
+ if e.h == sentinel {
+ return idx, false
+ }
+
+ // perturbation logic inspired from CPython's set/dict object
+ // the goal is that all 64 bits of unmasked hash value eventually
+ // participate int he probing sequence, to minimize clustering
+ idx = (idx + perturb) & szMask
+ perturb = (perturb >> uint64(perturbShift)) + 1
+ }
+}
+
+func (h *Float64HashTable) upsize(newcap uint64) error {
+ newMask := newcap - 1
+
+ oldEntries := h.entries
+ h.entries = make([]entryFloat64, newcap)
+ for _, e := range oldEntries {
+ if e.Valid() {
+ idx, _ := h.lookup(e.h, newMask, func(float64) bool { return false })
+ h.entries[idx] = e
+ }
+ }
+ h.cap = newcap
+ h.capMask = newMask
+ return nil
+}
+
+// Insert updates the given entry with the provided hash value, payload value and memo index.
+// The entry pointer must have been retrieved via lookup in order to actually insert properly.
+func (h *Float64HashTable) Insert(e *entryFloat64, v uint64, val float64, memoIdx int32) error {
+ e.h = h.fixHash(v)
+ e.payload.val = val
+ e.payload.memoIdx = memoIdx
+ h.size++
+
+ if h.needUpsize() {
+ h.upsize(h.cap * uint64(loadFactor) * 2)
+ }
+ return nil
+}
+
+// VisitEntries will call the passed in function on each *valid* entry in the hash table,
+// a valid entry being one which has had a value inserted into it.
+func (h *Float64HashTable) VisitEntries(visit func(*entryFloat64)) {
+ for _, e := range h.entries {
+ if e.Valid() {
+ visit(&e)
+ }
+ }
+}
+
+// Float64MemoTable is a wrapper over the appropriate hashtable to provide an interface
+// conforming to the MemoTable interface defined in the encoding package for general interactions
+// regarding dictionaries.
+type Float64MemoTable struct {
+ tbl *Float64HashTable
+ nullIdx int32
+}
+
+// NewFloat64MemoTable returns a new memotable with num entries pre-allocated to reduce further
+// allocations when inserting.
+func NewFloat64MemoTable(num int64) *Float64MemoTable {
+ return &Float64MemoTable{tbl: NewFloat64HashTable(uint64(num)), nullIdx: KeyNotFound}
+}
+
+func (Float64MemoTable) TypeTraits() TypeTraits {
+ return arrow.Float64Traits
+}
+
+// Reset allows this table to be re-used by dumping all the data currently in the table.
+func (s *Float64MemoTable) Reset() {
+ s.tbl.Reset(32)
+ s.nullIdx = KeyNotFound
+}
+
+// Size returns the current number of inserted elements into the table including if a null
+// has been inserted.
+func (s *Float64MemoTable) Size() int {
+ sz := int(s.tbl.size)
+ if _, ok := s.GetNull(); ok {
+ sz++
+ }
+ return sz
+}
+
+// GetNull returns the index of an inserted null or KeyNotFound along with a bool
+// that will be true if found and false if not.
+func (s *Float64MemoTable) GetNull() (int, bool) {
+ return int(s.nullIdx), s.nullIdx != KeyNotFound
+}
+
+// GetOrInsertNull will return the index of the null entry or insert a null entry
+// if one currently doesn't exist. The found value will be true if there was already
+// a null in the table, and false if it inserted one.
+func (s *Float64MemoTable) GetOrInsertNull() (idx int, found bool) {
+ idx, found = s.GetNull()
+ if !found {
+ idx = s.Size()
+ s.nullIdx = int32(idx)
+ }
+ return
+}
+
+// CopyValues will copy the values from the memo table out into the passed in slice
+// which must be of the appropriate type.
+func (s *Float64MemoTable) CopyValues(out interface{}) {
+ s.CopyValuesSubset(0, out)
+}
+
+// CopyValuesSubset is like CopyValues but only copies a subset of values starting
+// at the provided start index
+func (s *Float64MemoTable) CopyValuesSubset(start int, out interface{}) {
+ s.tbl.CopyValuesSubset(start, out.([]float64))
+}
+
+func (s *Float64MemoTable) WriteOut(out []byte) {
+ s.tbl.CopyValues(arrow.Float64Traits.CastFromBytes(out))
+}
+
+func (s *Float64MemoTable) WriteOutSubset(start int, out []byte) {
+ s.tbl.CopyValuesSubset(start, arrow.Float64Traits.CastFromBytes(out))
+}
+
+func (s *Float64MemoTable) WriteOutLE(out []byte) {
+ s.tbl.WriteOut(out)
+}
+
+func (s *Float64MemoTable) WriteOutSubsetLE(start int, out []byte) {
+ s.tbl.WriteOutSubset(start, out)
+}
+
+// Get returns the index of the requested value in the hash table or KeyNotFound
+// along with a boolean indicating if it was found or not.
+func (s *Float64MemoTable) Get(val interface{}) (int, bool) {
+ var cmp func(float64) bool
+ if math.IsNaN(val.(float64)) {
+ cmp = math.IsNaN
+ // use consistent internal bit pattern for NaN regardless of the pattern
+ // that is passed to us. NaN is NaN is NaN
+ val = math.NaN()
+ } else {
+ cmp = func(v float64) bool { return val.(float64) == v }
+ }
+
+ h := hashFloat64(val.(float64), 0)
+ if e, ok := s.tbl.Lookup(h, cmp); ok {
+ return int(e.payload.memoIdx), ok
+ }
+ return KeyNotFound, false
+}
+
+// GetOrInsert will return the index of the specified value in the table, or insert the
+// value into the table and return the new index. found indicates whether or not it already
+// existed in the table (true) or was inserted by this call (false).
+func (s *Float64MemoTable) GetOrInsert(val interface{}) (idx int, found bool, err error) {
+
+ var cmp func(float64) bool
+ if math.IsNaN(val.(float64)) {
+ cmp = math.IsNaN
+ // use consistent internal bit pattern for NaN regardless of the pattern
+ // that is passed to us. NaN is NaN is NaN
+ val = math.NaN()
+ } else {
+ cmp = func(v float64) bool { return val.(float64) == v }
+ }
+
+ h := hashFloat64(val.(float64), 0)
+ e, ok := s.tbl.Lookup(h, cmp)
+
+ if ok {
+ idx = int(e.payload.memoIdx)
+ found = true
+ } else {
+ idx = s.Size()
+ s.tbl.Insert(e, h, val.(float64), int32(idx))
+ }
+ return
+}
+
+// GetOrInsertBytes is unimplemented
+func (s *Float64MemoTable) GetOrInsertBytes(val []byte) (idx int, found bool, err error) {
+ panic("unimplemented")
+}
diff --git a/vendor/github.com/apache/arrow/go/v14/internal/hashing/xxh3_memo_table.gen.go.tmpl b/vendor/github.com/apache/arrow/go/v14/internal/hashing/xxh3_memo_table.gen.go.tmpl
new file mode 100644
index 000000000..25164341d
--- /dev/null
+++ b/vendor/github.com/apache/arrow/go/v14/internal/hashing/xxh3_memo_table.gen.go.tmpl
@@ -0,0 +1,349 @@
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements. See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership. The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License. You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package hashing
+
+import (
+ "github.com/apache/arrow/go/v14/arrow/bitutil"
+ "github.com/apache/arrow/go/v14/internal/utils"
+)
+
+{{range .In}}
+type payload{{.Name}} struct {
+ val {{.name}}
+ memoIdx int32
+}
+
+type entry{{.Name}} struct {
+ h uint64
+ payload payload{{.Name}}
+}
+
+func (e entry{{.Name}}) Valid() bool { return e.h != sentinel }
+
+// {{.Name}}HashTable is a hashtable specifically for {{.name}} that
+// is utilized with the MemoTable to generalize interactions for easier
+// implementation of dictionaries without losing performance.
+type {{.Name}}HashTable struct {
+ cap uint64
+ capMask uint64
+ size uint64
+
+ entries []entry{{.Name}}
+}
+
+// New{{.Name}}HashTable returns a new hash table for {{.name}} values
+// initialized with the passed in capacity or 32 whichever is larger.
+func New{{.Name}}HashTable(cap uint64) *{{.Name}}HashTable {
+ initCap := uint64(bitutil.NextPowerOf2(int(max(cap, 32))))
+ ret := &{{.Name}}HashTable{cap: initCap, capMask: initCap - 1, size: 0}
+ ret.entries = make([]entry{{.Name}}, initCap)
+ return ret
+}
+
+// Reset drops all of the values in this hash table and re-initializes it
+// with the specified initial capacity as if by calling New, but without having
+// to reallocate the object.
+func (h *{{.Name}}HashTable) Reset(cap uint64) {
+ h.cap = uint64(bitutil.NextPowerOf2(int(max(cap, 32))))
+ h.capMask = h.cap - 1
+ h.size = 0
+ h.entries = make([]entry{{.Name}}, h.cap)
+}
+
+// CopyValues is used for copying the values out of the hash table into the
+// passed in slice, in the order that they were first inserted
+func (h *{{.Name}}HashTable) CopyValues(out []{{.name}}) {
+ h.CopyValuesSubset(0, out)
+}
+
+// CopyValuesSubset copies a subset of the values in the hashtable out, starting
+// with the value at start, in the order that they were inserted.
+func (h *{{.Name}}HashTable) CopyValuesSubset(start int, out []{{.name}}) {
+ h.VisitEntries(func(e *entry{{.Name}}) {
+ idx := e.payload.memoIdx - int32(start)
+ if idx >= 0 {
+ out[idx] = e.payload.val
+ }
+ })
+}
+
+func (h *{{.Name}}HashTable) WriteOut(out []byte) {
+ h.WriteOutSubset(0, out)
+}
+
+func (h *{{.Name}}HashTable) WriteOutSubset(start int, out []byte) {
+ data := arrow.{{.Name}}Traits.CastFromBytes(out)
+ h.VisitEntries(func(e *entry{{.Name}}) {
+ idx := e.payload.memoIdx - int32(start)
+ if idx >= 0 {
+{{if and (ne .Name "Int8") (ne .Name "Uint8") -}}
+ data[idx] = utils.ToLE{{.Name}}(e.payload.val)
+{{else -}}
+ data[idx] = e.payload.val
+{{end -}}
+ }
+ })
+}
+
+func (h *{{.Name}}HashTable) needUpsize() bool { return h.size*uint64(loadFactor) >= h.cap }
+
+func ({{.Name}}HashTable) fixHash(v uint64) uint64 {
+ if v == sentinel {
+ return 42
+ }
+ return v
+}
+
+// Lookup retrieves the entry for a given hash value assuming it's payload value returns
+// true when passed to the cmp func. Returns a pointer to the entry for the given hash value,
+// and a boolean as to whether it was found. It is not safe to use the pointer if the bool is false.
+func (h *{{.Name}}HashTable) Lookup(v uint64, cmp func({{.name}}) bool) (*entry{{.Name}}, bool) {
+ idx, ok := h.lookup(v, h.capMask, cmp)
+ return &h.entries[idx], ok
+}
+
+func (h *{{.Name}}HashTable) lookup(v uint64, szMask uint64, cmp func({{.name}}) bool) (uint64, bool) {
+ const perturbShift uint8 = 5
+
+ var (
+ idx uint64
+ perturb uint64
+ e *entry{{.Name}}
+ )
+
+ v = h.fixHash(v)
+ idx = v & szMask
+ perturb = (v >> uint64(perturbShift)) + 1
+
+ for {
+ e = &h.entries[idx]
+ if e.h == v && cmp(e.payload.val) {
+ return idx, true
+ }
+
+ if e.h == sentinel {
+ return idx, false
+ }
+
+ // perturbation logic inspired from CPython's set/dict object
+ // the goal is that all 64 bits of unmasked hash value eventually
+ // participate int he probing sequence, to minimize clustering
+ idx = (idx + perturb) & szMask
+ perturb = (perturb >> uint64(perturbShift)) + 1
+ }
+}
+
+func (h *{{.Name}}HashTable) upsize(newcap uint64) error {
+ newMask := newcap - 1
+
+ oldEntries := h.entries
+ h.entries = make([]entry{{.Name}}, newcap)
+ for _, e := range oldEntries {
+ if e.Valid() {
+ idx, _ := h.lookup(e.h, newMask, func({{.name}}) bool { return false })
+ h.entries[idx] = e
+ }
+ }
+ h.cap = newcap
+ h.capMask = newMask
+ return nil
+}
+
+// Insert updates the given entry with the provided hash value, payload value and memo index.
+// The entry pointer must have been retrieved via lookup in order to actually insert properly.
+func (h *{{.Name}}HashTable) Insert(e *entry{{.Name}}, v uint64, val {{.name}}, memoIdx int32) error {
+ e.h = h.fixHash(v)
+ e.payload.val = val
+ e.payload.memoIdx = memoIdx
+ h.size++
+
+ if h.needUpsize() {
+ h.upsize(h.cap * uint64(loadFactor) * 2)
+ }
+ return nil
+}
+
+// VisitEntries will call the passed in function on each *valid* entry in the hash table,
+// a valid entry being one which has had a value inserted into it.
+func (h *{{.Name}}HashTable) VisitEntries(visit func(*entry{{.Name}})) {
+ for _, e := range h.entries {
+ if e.Valid() {
+ visit(&e)
+ }
+ }
+}
+
+// {{.Name}}MemoTable is a wrapper over the appropriate hashtable to provide an interface
+// conforming to the MemoTable interface defined in the encoding package for general interactions
+// regarding dictionaries.
+type {{.Name}}MemoTable struct {
+ tbl *{{.Name}}HashTable
+ nullIdx int32
+}
+
+// New{{.Name}}MemoTable returns a new memotable with num entries pre-allocated to reduce further
+// allocations when inserting.
+func New{{.Name}}MemoTable(num int64) *{{.Name}}MemoTable {
+ return &{{.Name}}MemoTable{tbl: New{{.Name}}HashTable(uint64(num)), nullIdx: KeyNotFound}
+}
+
+func ({{.Name}}MemoTable) TypeTraits() TypeTraits {
+ return arrow.{{.Name}}Traits
+}
+
+// Reset allows this table to be re-used by dumping all the data currently in the table.
+func (s *{{.Name}}MemoTable) Reset() {
+ s.tbl.Reset(32)
+ s.nullIdx = KeyNotFound
+}
+
+// Size returns the current number of inserted elements into the table including if a null
+// has been inserted.
+func (s *{{.Name}}MemoTable) Size() int {
+ sz := int(s.tbl.size)
+ if _, ok := s.GetNull(); ok {
+ sz++
+ }
+ return sz
+}
+
+// GetNull returns the index of an inserted null or KeyNotFound along with a bool
+// that will be true if found and false if not.
+func (s *{{.Name}}MemoTable) GetNull() (int, bool) {
+ return int(s.nullIdx), s.nullIdx != KeyNotFound
+}
+
+// GetOrInsertNull will return the index of the null entry or insert a null entry
+// if one currently doesn't exist. The found value will be true if there was already
+// a null in the table, and false if it inserted one.
+func (s *{{.Name}}MemoTable) GetOrInsertNull() (idx int, found bool) {
+ idx, found = s.GetNull()
+ if !found {
+ idx = s.Size()
+ s.nullIdx = int32(idx)
+ }
+ return
+}
+
+// CopyValues will copy the values from the memo table out into the passed in slice
+// which must be of the appropriate type.
+func (s *{{.Name}}MemoTable) CopyValues(out interface{}) {
+ s.CopyValuesSubset(0, out)
+}
+
+// CopyValuesSubset is like CopyValues but only copies a subset of values starting
+// at the provided start index
+func (s *{{.Name}}MemoTable) CopyValuesSubset(start int, out interface{}) {
+ s.tbl.CopyValuesSubset(start, out.([]{{.name}}))
+}
+
+func (s *{{.Name}}MemoTable) WriteOut(out []byte) {
+ s.tbl.CopyValues(arrow.{{.Name}}Traits.CastFromBytes(out))
+}
+
+func (s *{{.Name}}MemoTable) WriteOutSubset(start int, out []byte) {
+ s.tbl.CopyValuesSubset(start, arrow.{{.Name}}Traits.CastFromBytes(out))
+}
+
+func (s *{{.Name}}MemoTable) WriteOutLE(out []byte) {
+ s.tbl.WriteOut(out)
+}
+
+func (s *{{.Name}}MemoTable) WriteOutSubsetLE(start int, out []byte) {
+ s.tbl.WriteOutSubset(start, out)
+}
+
+// Get returns the index of the requested value in the hash table or KeyNotFound
+// along with a boolean indicating if it was found or not.
+func (s *{{.Name}}MemoTable) Get(val interface{}) (int, bool) {
+{{if and (ne .Name "Float32") (ne .Name "Float64") }}
+ h := hashInt(uint64(val.({{.name}})), 0)
+ if e, ok := s.tbl.Lookup(h, func(v {{.name}}) bool { return val.({{.name}}) == v }); ok {
+{{ else -}}
+ var cmp func({{.name}}) bool
+ {{if eq .Name "Float32"}}
+ if math.IsNaN(float64(val.(float32))) {
+ cmp = isNan32Cmp
+ // use consistent internal bit pattern for NaN regardless of the pattern
+ // that is passed to us. NaN is NaN is NaN
+ val = float32(math.NaN())
+ {{ else -}}
+ if math.IsNaN(val.(float64)) {
+ cmp = math.IsNaN
+ // use consistent internal bit pattern for NaN regardless of the pattern
+ // that is passed to us. NaN is NaN is NaN
+ val = math.NaN()
+ {{end -}}
+ } else {
+ cmp = func(v {{.name}}) bool { return val.({{.name}}) == v }
+ }
+
+ h := hash{{.Name}}(val.({{.name}}), 0)
+ if e, ok := s.tbl.Lookup(h, cmp); ok {
+{{ end -}}
+ return int(e.payload.memoIdx), ok
+ }
+ return KeyNotFound, false
+}
+
+// GetOrInsert will return the index of the specified value in the table, or insert the
+// value into the table and return the new index. found indicates whether or not it already
+// existed in the table (true) or was inserted by this call (false).
+func (s *{{.Name}}MemoTable) GetOrInsert(val interface{}) (idx int, found bool, err error) {
+ {{if and (ne .Name "Float32") (ne .Name "Float64") }}
+ h := hashInt(uint64(val.({{.name}})), 0)
+ e, ok := s.tbl.Lookup(h, func(v {{.name}}) bool {
+ return val.({{.name}}) == v
+ })
+{{ else }}
+ var cmp func({{.name}}) bool
+ {{if eq .Name "Float32"}}
+ if math.IsNaN(float64(val.(float32))) {
+ cmp = isNan32Cmp
+ // use consistent internal bit pattern for NaN regardless of the pattern
+ // that is passed to us. NaN is NaN is NaN
+ val = float32(math.NaN())
+ {{ else -}}
+ if math.IsNaN(val.(float64)) {
+ cmp = math.IsNaN
+ // use consistent internal bit pattern for NaN regardless of the pattern
+ // that is passed to us. NaN is NaN is NaN
+ val = math.NaN()
+ {{end -}}
+ } else {
+ cmp = func(v {{.name}}) bool { return val.({{.name}}) == v }
+ }
+
+ h := hash{{.Name}}(val.({{.name}}), 0)
+ e, ok := s.tbl.Lookup(h, cmp)
+{{ end }}
+ if ok {
+ idx = int(e.payload.memoIdx)
+ found = true
+ } else {
+ idx = s.Size()
+ s.tbl.Insert(e, h, val.({{.name}}), int32(idx))
+ }
+ return
+}
+
+
+// GetOrInsertBytes is unimplemented
+func (s *{{.Name}}MemoTable) GetOrInsertBytes(val []byte) (idx int, found bool, err error) {
+ panic("unimplemented")
+}
+{{end}}
diff --git a/vendor/github.com/apache/arrow/go/v14/internal/hashing/xxh3_memo_table.go b/vendor/github.com/apache/arrow/go/v14/internal/hashing/xxh3_memo_table.go
new file mode 100644
index 000000000..81994f0a8
--- /dev/null
+++ b/vendor/github.com/apache/arrow/go/v14/internal/hashing/xxh3_memo_table.go
@@ -0,0 +1,443 @@
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements. See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership. The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License. You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Package hashing provides utilities for and an implementation of a hash
+// table which is more performant than the default go map implementation
+// by leveraging xxh3 and some custom hash functions.
+package hashing
+
+import (
+ "bytes"
+ "math"
+ "reflect"
+ "unsafe"
+)
+
+//go:generate go run ../../arrow/_tools/tmpl/main.go -i -data=types.tmpldata xxh3_memo_table.gen.go.tmpl
+
+type TypeTraits interface {
+ BytesRequired(n int) int
+}
+
+type ByteSlice interface {
+ Bytes() []byte
+}
+
+// MemoTable interface for hash tables and dictionary encoding.
+//
+// Values will remember the order they are inserted to generate a valid
+// dictionary.
+type MemoTable interface {
+ TypeTraits() TypeTraits
+ // Reset drops everything in the table allowing it to be reused
+ Reset()
+ // Size returns the current number of unique values stored in
+ // the table, including whether or not a null value has been
+ // inserted via GetOrInsertNull.
+ Size() int
+ // GetOrInsert returns the index of the table the specified value is,
+ // and a boolean indicating whether or not the value was found in
+ // the table (if false, the value was inserted). An error is returned
+ // if val is not the appropriate type for the table.
+ GetOrInsert(val interface{}) (idx int, existed bool, err error)
+ // GetOrInsertBytes returns the index of the table the specified value is,
+ // and a boolean indicating whether or not the value was found in
+ // the table (if false, the value was inserted). An error is returned
+ // if val is not the appropriate type for the table. This function is intended to be used by
+ // the BinaryMemoTable to prevent uncessary allocations of the data when converting from a []byte to interface{}.
+ GetOrInsertBytes(val []byte) (idx int, existed bool, err error)
+ // GetOrInsertNull returns the index of the null value in the table,
+ // inserting one if it hasn't already been inserted. It returns a boolean
+ // indicating if the null value already existed or not in the table.
+ GetOrInsertNull() (idx int, existed bool)
+ // GetNull returns the index of the null value in the table, but does not
+ // insert one if it doesn't already exist. Will return -1 if it doesn't exist
+ // indicated by a false value for the boolean.
+ GetNull() (idx int, exists bool)
+ // WriteOut copys the unique values of the memotable out to the byte slice
+ // provided. Must have allocated enough bytes for all the values.
+ WriteOut(out []byte)
+ // WriteOutSubset is like WriteOut, but only writes a subset of values
+ // starting with the index offset.
+ WriteOutSubset(offset int, out []byte)
+}
+
+type NumericMemoTable interface {
+ MemoTable
+ WriteOutLE(out []byte)
+ WriteOutSubsetLE(offset int, out []byte)
+}
+
+const (
+ sentinel uint64 = 0
+ loadFactor int64 = 2
+)
+
+func max(a, b uint64) uint64 {
+ if a > b {
+ return a
+ }
+ return b
+}
+
+var isNan32Cmp = func(v float32) bool { return math.IsNaN(float64(v)) }
+
+// KeyNotFound is the constant returned by memo table functions when a key isn't found in the table
+const KeyNotFound = -1
+
+type BinaryBuilderIFace interface {
+ Reserve(int)
+ ReserveData(int)
+ Retain()
+ Resize(int)
+ ResizeData(int)
+ Release()
+ DataLen() int
+ Value(int) []byte
+ Len() int
+ AppendNull()
+ AppendString(string)
+ Append([]byte)
+}
+
+// BinaryMemoTable is our hashtable for binary data using the BinaryBuilder
+// to construct the actual data in an easy to pass around way with minimal copies
+// while using a hash table to keep track of the indexes into the dictionary that
+// is created as we go.
+type BinaryMemoTable struct {
+ tbl *Int32HashTable
+ builder BinaryBuilderIFace
+ nullIdx int
+}
+
+// NewBinaryMemoTable returns a hash table for Binary data, the passed in allocator will
+// be utilized for the BinaryBuilder, if nil then memory.DefaultAllocator will be used.
+// initial and valuesize can be used to pre-allocate the table to reduce allocations. With
+// initial being the initial number of entries to allocate for and valuesize being the starting
+// amount of space allocated for writing the actual binary data.
+func NewBinaryMemoTable(initial, valuesize int, bldr BinaryBuilderIFace) *BinaryMemoTable {
+ bldr.Reserve(int(initial))
+ datasize := valuesize
+ if datasize <= 0 {
+ datasize = initial * 4
+ }
+ bldr.ReserveData(datasize)
+ return &BinaryMemoTable{tbl: NewInt32HashTable(uint64(initial)), builder: bldr, nullIdx: KeyNotFound}
+}
+
+type unimplementedtraits struct{}
+
+func (unimplementedtraits) BytesRequired(int) int { panic("unimplemented") }
+
+func (BinaryMemoTable) TypeTraits() TypeTraits {
+ return unimplementedtraits{}
+}
+
+// Reset dumps all of the data in the table allowing it to be reutilized.
+func (s *BinaryMemoTable) Reset() {
+ s.tbl.Reset(32)
+ s.builder.Resize(0)
+ s.builder.ResizeData(0)
+ s.builder.Reserve(int(32))
+ s.builder.ReserveData(int(32) * 4)
+ s.nullIdx = KeyNotFound
+}
+
+// GetNull returns the index of a null that has been inserted into the table or
+// KeyNotFound. The bool returned will be true if there was a null inserted into
+// the table, and false otherwise.
+func (s *BinaryMemoTable) GetNull() (int, bool) {
+ return int(s.nullIdx), s.nullIdx != KeyNotFound
+}
+
+// Size returns the current size of the memo table including the null value
+// if one has been inserted.
+func (s *BinaryMemoTable) Size() int {
+ sz := int(s.tbl.size)
+ if _, ok := s.GetNull(); ok {
+ sz++
+ }
+ return sz
+}
+
+// helper function to easily return a byte slice for any given value
+// regardless of the type if it's a []byte, string, or fulfills the
+// ByteSlice interface.
+func (BinaryMemoTable) valAsByteSlice(val interface{}) []byte {
+ switch v := val.(type) {
+ case []byte:
+ return v
+ case ByteSlice:
+ return v.Bytes()
+ case string:
+ var out []byte
+ h := (*reflect.StringHeader)(unsafe.Pointer(&v))
+ s := (*reflect.SliceHeader)(unsafe.Pointer(&out))
+ s.Data = h.Data
+ s.Len = h.Len
+ s.Cap = h.Len
+ return out
+ default:
+ panic("invalid type for binarymemotable")
+ }
+}
+
+// helper function to get the hash value regardless of the underlying binary type
+func (BinaryMemoTable) getHash(val interface{}) uint64 {
+ switch v := val.(type) {
+ case string:
+ return hashString(v, 0)
+ case []byte:
+ return Hash(v, 0)
+ case ByteSlice:
+ return Hash(v.Bytes(), 0)
+ default:
+ panic("invalid type for binarymemotable")
+ }
+}
+
+// helper function to append the given value to the builder regardless
+// of the underlying binary type.
+func (b *BinaryMemoTable) appendVal(val interface{}) {
+ switch v := val.(type) {
+ case string:
+ b.builder.AppendString(v)
+ case []byte:
+ b.builder.Append(v)
+ case ByteSlice:
+ b.builder.Append(v.Bytes())
+ }
+}
+
+func (b *BinaryMemoTable) lookup(h uint64, val []byte) (*entryInt32, bool) {
+ return b.tbl.Lookup(h, func(i int32) bool {
+ return bytes.Equal(val, b.builder.Value(int(i)))
+ })
+}
+
+// Get returns the index of the specified value in the table or KeyNotFound,
+// and a boolean indicating whether it was found in the table.
+func (b *BinaryMemoTable) Get(val interface{}) (int, bool) {
+ if p, ok := b.lookup(b.getHash(val), b.valAsByteSlice(val)); ok {
+ return int(p.payload.val), ok
+ }
+ return KeyNotFound, false
+}
+
+// GetOrInsertBytes returns the index of the given value in the table, if not found
+// it is inserted into the table. The return value 'found' indicates whether the value
+// was found in the table (true) or inserted (false) along with any possible error.
+func (b *BinaryMemoTable) GetOrInsertBytes(val []byte) (idx int, found bool, err error) {
+ h := Hash(val, 0)
+ p, found := b.lookup(h, val)
+ if found {
+ idx = int(p.payload.val)
+ } else {
+ idx = b.Size()
+ b.builder.Append(val)
+ b.tbl.Insert(p, h, int32(idx), -1)
+ }
+ return
+}
+
+// GetOrInsert returns the index of the given value in the table, if not found
+// it is inserted into the table. The return value 'found' indicates whether the value
+// was found in the table (true) or inserted (false) along with any possible error.
+func (b *BinaryMemoTable) GetOrInsert(val interface{}) (idx int, found bool, err error) {
+ h := b.getHash(val)
+ p, found := b.lookup(h, b.valAsByteSlice(val))
+ if found {
+ idx = int(p.payload.val)
+ } else {
+ idx = b.Size()
+ b.appendVal(val)
+ b.tbl.Insert(p, h, int32(idx), -1)
+ }
+ return
+}
+
+// GetOrInsertNull retrieves the index of a null in the table or inserts
+// null into the table, returning the index and a boolean indicating if it was
+// found in the table (true) or was inserted (false).
+func (b *BinaryMemoTable) GetOrInsertNull() (idx int, found bool) {
+ idx, found = b.GetNull()
+ if !found {
+ idx = b.Size()
+ b.nullIdx = idx
+ b.builder.AppendNull()
+ }
+ return
+}
+
+func (b *BinaryMemoTable) Value(i int) []byte {
+ return b.builder.Value(i)
+}
+
+// helper function to get the offset into the builder data for a given
+// index value.
+func (b *BinaryMemoTable) findOffset(idx int) uintptr {
+ if b.builder.DataLen() == 0 {
+ // only empty strings, short circuit
+ return 0
+ }
+
+ val := b.builder.Value(idx)
+ for len(val) == 0 {
+ idx++
+ if idx >= b.builder.Len() {
+ break
+ }
+ val = b.builder.Value(idx)
+ }
+ if len(val) != 0 {
+ return uintptr(unsafe.Pointer(&val[0]))
+ }
+ return uintptr(b.builder.DataLen()) + b.findOffset(0)
+}
+
+// CopyOffsets copies the list of offsets into the passed in slice, the offsets
+// being the start and end values of the underlying allocated bytes in the builder
+// for the individual values of the table. out should be at least sized to Size()+1
+func (b *BinaryMemoTable) CopyOffsets(out []int32) {
+ b.CopyOffsetsSubset(0, out)
+}
+
+// CopyOffsetsSubset is like CopyOffsets but instead of copying all of the offsets,
+// it gets a subset of the offsets in the table starting at the index provided by "start".
+func (b *BinaryMemoTable) CopyOffsetsSubset(start int, out []int32) {
+ if b.builder.Len() <= start {
+ return
+ }
+
+ first := b.findOffset(0)
+ delta := b.findOffset(start)
+ sz := b.Size()
+ for i := start; i < sz; i++ {
+ offset := int32(b.findOffset(i) - delta)
+ out[i-start] = offset
+ }
+
+ out[sz-start] = int32(b.builder.DataLen() - (int(delta) - int(first)))
+}
+
+// CopyLargeOffsets copies the list of offsets into the passed in slice, the offsets
+// being the start and end values of the underlying allocated bytes in the builder
+// for the individual values of the table. out should be at least sized to Size()+1
+func (b *BinaryMemoTable) CopyLargeOffsets(out []int64) {
+ b.CopyLargeOffsetsSubset(0, out)
+}
+
+// CopyLargeOffsetsSubset is like CopyOffsets but instead of copying all of the offsets,
+// it gets a subset of the offsets in the table starting at the index provided by "start".
+func (b *BinaryMemoTable) CopyLargeOffsetsSubset(start int, out []int64) {
+ if b.builder.Len() <= start {
+ return
+ }
+
+ first := b.findOffset(0)
+ delta := b.findOffset(start)
+ sz := b.Size()
+ for i := start; i < sz; i++ {
+ offset := int64(b.findOffset(i) - delta)
+ out[i-start] = offset
+ }
+
+ out[sz-start] = int64(b.builder.DataLen() - (int(delta) - int(first)))
+}
+
+// CopyValues copies the raw binary data bytes out, out should be a []byte
+// with at least ValuesSize bytes allocated to copy into.
+func (b *BinaryMemoTable) CopyValues(out interface{}) {
+ b.CopyValuesSubset(0, out)
+}
+
+// CopyValuesSubset copies the raw binary data bytes out starting with the value
+// at the index start, out should be a []byte with at least ValuesSize bytes allocated
+func (b *BinaryMemoTable) CopyValuesSubset(start int, out interface{}) {
+ if b.builder.Len() <= start {
+ return
+ }
+
+ var (
+ first = b.findOffset(0)
+ offset = b.findOffset(int(start))
+ length = b.builder.DataLen() - int(offset-first)
+ )
+
+ outval := out.([]byte)
+ copy(outval, b.builder.Value(start)[0:length])
+}
+
+func (b *BinaryMemoTable) WriteOut(out []byte) {
+ b.CopyValues(out)
+}
+
+func (b *BinaryMemoTable) WriteOutSubset(start int, out []byte) {
+ b.CopyValuesSubset(start, out)
+}
+
+// CopyFixedWidthValues exists to cope with the fact that the table doesn't keep
+// track of the fixed width when inserting the null value the databuffer holds a
+// zero length byte slice for the null value (if found)
+func (b *BinaryMemoTable) CopyFixedWidthValues(start, width int, out []byte) {
+ if start >= b.Size() {
+ return
+ }
+
+ null, exists := b.GetNull()
+ if !exists || null < start {
+ // nothing to skip, proceed as usual
+ b.CopyValuesSubset(start, out)
+ return
+ }
+
+ var (
+ leftOffset = b.findOffset(start)
+ nullOffset = b.findOffset(null)
+ leftSize = nullOffset - leftOffset
+ rightOffset = leftOffset + uintptr(b.ValuesSize())
+ )
+
+ if leftSize > 0 {
+ copy(out, b.builder.Value(start)[0:leftSize])
+ }
+
+ rightSize := rightOffset - nullOffset
+ if rightSize > 0 {
+ // skip the null fixed size value
+ copy(out[int(leftSize)+width:], b.builder.Value(null + 1)[0:rightSize])
+ }
+}
+
+// VisitValues exists to run the visitFn on each value currently in the hash table.
+func (b *BinaryMemoTable) VisitValues(start int, visitFn func([]byte)) {
+ for i := int(start); i < b.Size(); i++ {
+ visitFn(b.builder.Value(i))
+ }
+}
+
+// Release is used to tell the underlying builder that it can release the memory allocated
+// when the reference count reaches 0, this is safe to be called from multiple goroutines
+// simultaneously
+func (b *BinaryMemoTable) Release() { b.builder.Release() }
+
+// Retain increases the ref count, it is safe to call it from multiple goroutines
+// simultaneously.
+func (b *BinaryMemoTable) Retain() { b.builder.Retain() }
+
+// ValuesSize returns the current total size of all the raw bytes that have been inserted
+// into the memotable so far.
+func (b *BinaryMemoTable) ValuesSize() int { return b.builder.DataLen() }
diff --git a/vendor/github.com/apache/arrow/go/v14/internal/json/json.go b/vendor/github.com/apache/arrow/go/v14/internal/json/json.go
new file mode 100644
index 000000000..319b12c55
--- /dev/null
+++ b/vendor/github.com/apache/arrow/go/v14/internal/json/json.go
@@ -0,0 +1,51 @@
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements. See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership. The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License. You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+//go:build !tinygo
+// +build !tinygo
+
+package json
+
+import (
+ "io"
+
+ "github.com/goccy/go-json"
+)
+
+type Decoder = json.Decoder
+type Encoder = json.Encoder
+type Marshaler = json.Marshaler
+type Delim = json.Delim
+type UnmarshalTypeError = json.UnmarshalTypeError
+type Number = json.Number
+type Unmarshaler = json.Unmarshaler
+type RawMessage = json.RawMessage
+
+func Marshal(v interface{}) ([]byte, error) {
+ return json.Marshal(v)
+}
+
+func Unmarshal(data []byte, v interface{}) error {
+ return json.Unmarshal(data, v)
+}
+
+func NewDecoder(r io.Reader) *Decoder {
+ return json.NewDecoder(r)
+}
+
+func NewEncoder(w io.Writer) *Encoder {
+ return json.NewEncoder(w)
+}
diff --git a/vendor/github.com/apache/arrow/go/v14/internal/json/json_tinygo.go b/vendor/github.com/apache/arrow/go/v14/internal/json/json_tinygo.go
new file mode 100644
index 000000000..8e4f447b3
--- /dev/null
+++ b/vendor/github.com/apache/arrow/go/v14/internal/json/json_tinygo.go
@@ -0,0 +1,51 @@
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements. See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership. The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License. You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+//go:build tinygo
+// +build tinygo
+
+package json
+
+import (
+ "io"
+
+ "encoding/json"
+)
+
+type Decoder = json.Decoder
+type Encoder = json.Encoder
+type Marshaler = json.Marshaler
+type Delim = json.Delim
+type UnmarshalTypeError = json.UnmarshalTypeError
+type Number = json.Number
+type Unmarshaler = json.Unmarshaler
+type RawMessage = json.RawMessage
+
+func Marshal(v interface{}) ([]byte, error) {
+ return json.Marshal(v)
+}
+
+func Unmarshal(data []byte, v interface{}) error {
+ return json.Unmarshal(data, v)
+}
+
+func NewDecoder(r io.Reader) *Decoder {
+ return json.NewDecoder(r)
+}
+
+func NewEncoder(w io.Writer) *Encoder {
+ return json.NewEncoder(w)
+}
diff --git a/vendor/github.com/apache/arrow/go/v14/internal/utils/Makefile b/vendor/github.com/apache/arrow/go/v14/internal/utils/Makefile
new file mode 100644
index 000000000..fded9d1d5
--- /dev/null
+++ b/vendor/github.com/apache/arrow/go/v14/internal/utils/Makefile
@@ -0,0 +1,80 @@
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements. See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership. The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# this converts rotate instructions from "ro[lr] <reg>" -> "ro[lr] <reg>, 1" for yasm compatibility
+PERL_FIXUP_ROTATE=perl -i -pe 's/(ro[rl]\s+\w{2,3})$$/\1, 1/'
+
+C2GOASM=c2goasm
+CC=clang-11
+C_FLAGS=-target x86_64-unknown-none -masm=intel -mno-red-zone -mstackrealign -mllvm -inline-threshold=1000 \
+ -fno-asynchronous-unwind-tables -fno-exceptions -fno-rtti -O3 -fno-builtin -ffast-math -fno-jump-tables -I_lib
+ASM_FLAGS_AVX2=-mavx2 -mfma
+ASM_FLAGS_SSE4=-msse4
+ASM_FLAGS_BMI2=-mbmi2
+ASM_FLAGS_POPCNT=-mpopcnt
+
+C_FLAGS_NEON=-O3 -fvectorize -mllvm -force-vector-width=16 -fno-asynchronous-unwind-tables -mno-red-zone -mstackrealign -fno-exceptions \
+ -fno-rtti -fno-builtin -ffast-math -fno-jump-tables -I_lib
+
+GO_SOURCES := $(shell find . -path ./_lib -prune -o -name '*.go' -not -name '*_test.go')
+ALL_SOURCES := $(shell find . -path ./_lib -prune -o -name '*.go' -name '*.s' -not -name '*_test.go')
+
+.PHONEY: assembly
+
+INTEL_SOURCES := \
+ min_max_avx2_amd64.s min_max_sse4_amd64.s transpose_ints_avx2_amd64.s transpose_ints_sse4_amd64.s
+
+#
+# ARROW-15336: DO NOT add the assembly target for Arm64 (ARM_SOURCES) until c2goasm added the Arm64 support.
+# min_max_neon_arm64.s was generated by asm2plan9s.
+# And manually formatted it as the Arm64 Plan9.
+#
+
+assembly: $(INTEL_SOURCES)
+
+_lib/min_max_avx2_amd64.s: _lib/min_max.c
+ $(CC) -S $(C_FLAGS) $(ASM_FLAGS_AVX2) $^ -o $@ ; $(PERL_FIXUP_ROTATE) $@
+
+_lib/min_max_sse4_amd64.s: _lib/min_max.c
+ $(CC) -S $(C_FLAGS) $(ASM_FLAGS_SSE4) $^ -o $@ ; $(PERL_FIXUP_ROTATE) $@
+
+_lib/min_max_neon.s: _lib/min_max.c
+ $(CC) -S $(C_FLAGS_NEON) $^ -o $@ ; $(PERL_FIXUP_ROTATE) $@
+
+_lib/transpose_ints_avx2_amd64.s: _lib/transpose_ints.c
+ $(CC) -S $(C_FLAGS) $(ASM_FLAGS_AVX2) $^ -o $@ ; $(PERL_FIXUP_ROTATE) $@
+
+_lib/transpose_ints_sse4_amd64.s: _lib/transpose_ints.c
+ $(CC) -S $(C_FLAGS) $(ASM_FLAGS_SSE4) $^ -o $@ ; $(PERL_FIXUP_ROTATE) $@
+
+_lib/transpose_ints_neon.s: _lib/transpose_ints.c
+ $(CC) -S $(C_FLAGS_NEON) $^ -o $@ ; $(PERL_FIXUP_ROTATE) $@
+
+min_max_avx2_amd64.s: _lib/min_max_avx2_amd64.s
+ $(C2GOASM) -a -f $^ $@
+
+min_max_sse4_amd64.s: _lib/min_max_sse4_amd64.s
+ $(C2GOASM) -a -f $^ $@
+
+transpose_ints_avx2_amd64.s: _lib/transpose_ints_avx2_amd64.s
+ $(C2GOASM) -a -f $^ $@
+
+transpose_ints_sse4_amd64.s: _lib/transpose_ints_sse4_amd64.s
+ $(C2GOASM) -a -f $^ $@
+
+clean:
+ rm -f $(INTEL_SOURCES)
+ rm -f $(addprefix _lib/,$(INTEL_SOURCES))
diff --git a/vendor/github.com/apache/arrow/go/v14/internal/utils/buf_reader.go b/vendor/github.com/apache/arrow/go/v14/internal/utils/buf_reader.go
new file mode 100644
index 000000000..0b2381da1
--- /dev/null
+++ b/vendor/github.com/apache/arrow/go/v14/internal/utils/buf_reader.go
@@ -0,0 +1,212 @@
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements. See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership. The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License. You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package utils
+
+import (
+ "bufio"
+ "errors"
+ "fmt"
+ "io"
+)
+
+// bufferedReader is similar to bufio.Reader except
+// it will expand the buffer if necessary when asked to Peek
+// more bytes than are in the buffer
+type bufferedReader struct {
+ bufferSz int
+ buf []byte
+ r, w int
+ rd io.Reader
+ err error
+}
+
+// NewBufferedReader returns a buffered reader with similar semantics to bufio.Reader
+// except Peek will expand the internal buffer if needed rather than return
+// an error.
+func NewBufferedReader(rd io.Reader, sz int) *bufferedReader {
+ // if rd is already a buffered reader whose buffer is >= the requested size
+ // then just return it as is. no need to make a new object.
+ b, ok := rd.(*bufferedReader)
+ if ok && len(b.buf) >= sz {
+ return b
+ }
+
+ r := &bufferedReader{
+ rd: rd,
+ }
+ r.resizeBuffer(sz)
+ return r
+}
+
+func (b *bufferedReader) resetBuffer() {
+ if b.buf == nil {
+ b.buf = make([]byte, b.bufferSz)
+ } else if b.bufferSz > cap(b.buf) {
+ buf := b.buf
+ b.buf = make([]byte, b.bufferSz)
+ copy(b.buf, buf)
+ } else {
+ b.buf = b.buf[:b.bufferSz]
+ }
+}
+
+func (b *bufferedReader) resizeBuffer(newSize int) {
+ b.bufferSz = newSize
+ b.resetBuffer()
+}
+
+func (b *bufferedReader) fill() error {
+ // slide existing data to the beginning
+ if b.r > 0 {
+ copy(b.buf, b.buf[b.r:b.w])
+ b.w -= b.r
+ b.r = 0
+ }
+
+ if b.w >= len(b.buf) {
+ return fmt.Errorf("arrow/bufferedreader: %w", bufio.ErrBufferFull)
+ }
+
+ n, err := io.ReadAtLeast(b.rd, b.buf[b.w:], 1)
+ if n < 0 {
+ return fmt.Errorf("arrow/bufferedreader: filling buffer: %w", bufio.ErrNegativeCount)
+ }
+
+ b.w += n
+ b.err = err
+ return nil
+}
+
+func (b *bufferedReader) readErr() error {
+ err := b.err
+ b.err = nil
+ return err
+}
+
+// Buffered returns the number of bytes currently buffered
+func (b *bufferedReader) Buffered() int { return b.w - b.r }
+
+// SetBufferSize resets the size of the internal buffer to the desired size.
+// Will return an error if newSize is <= 0 or if newSize is less than the size
+// of the buffered data.
+func (b *bufferedReader) SetBufferSize(newSize int) error {
+ if newSize <= 0 {
+ return errors.New("buffer size should be positive")
+ }
+
+ if b.w >= newSize {
+ return errors.New("cannot shrink read buffer if buffered data remains")
+ }
+
+ b.resizeBuffer(newSize)
+ return nil
+}
+
+// Peek will buffer and return n bytes from the underlying reader without advancing
+// the reader itself. If n is larger than the current buffer size, the buffer will
+// be expanded to accommodate the extra bytes rather than error.
+func (b *bufferedReader) Peek(n int) ([]byte, error) {
+ if n < 0 {
+ return nil, fmt.Errorf("arrow/bufferedreader: %w", bufio.ErrNegativeCount)
+ }
+
+ if n > len(b.buf) {
+ if err := b.SetBufferSize(n); err != nil {
+ return nil, err
+ }
+ }
+
+ for b.w-b.r < n && b.w-b.r < len(b.buf) && b.err == nil {
+ b.fill() // b.w-b.r < len(b.buf) => buffer is not full
+ }
+
+ return b.buf[b.r : b.r+n], b.readErr()
+}
+
+// Discard skips the next n bytes either by advancing the internal buffer
+// or by reading that many bytes in and throwing them away.
+func (b *bufferedReader) Discard(n int) (discarded int, err error) {
+ if n < 0 {
+ return 0, fmt.Errorf("arrow/bufferedreader: %w", bufio.ErrNegativeCount)
+ }
+
+ if n == 0 {
+ return
+ }
+
+ remain := n
+ for {
+ skip := b.Buffered()
+ if skip == 0 {
+ b.fill()
+ skip = b.Buffered()
+ }
+ if skip > remain {
+ skip = remain
+ }
+ b.r += skip
+ remain -= skip
+ if remain == 0 {
+ return n, nil
+ }
+ if b.err != nil {
+ return n - remain, b.readErr()
+ }
+ }
+}
+
+func (b *bufferedReader) Read(p []byte) (n int, err error) {
+ n = len(p)
+ if n == 0 {
+ if b.Buffered() > 0 {
+ return 0, nil
+ }
+ return 0, b.readErr()
+ }
+
+ if b.r == b.w {
+ if b.err != nil {
+ return 0, b.readErr()
+ }
+ if len(p) >= len(b.buf) {
+ // large read, empty buffer
+ // read directly into p to avoid extra copy
+ n, b.err = b.rd.Read(p)
+ if n < 0 {
+ return n, fmt.Errorf("arrow/bufferedreader: %w", bufio.ErrNegativeCount)
+ }
+ return n, b.readErr()
+ }
+
+ // one read
+ // don't use b.fill
+ b.r, b.w = 0, 0
+ n, b.err = b.rd.Read(b.buf)
+ if n < 0 {
+ return n, fmt.Errorf("arrow/bufferedreader: %w", bufio.ErrNegativeCount)
+ }
+ if n == 0 {
+ return 0, b.readErr()
+ }
+ b.w += n
+ }
+
+ // copy as much as we can
+ n = copy(p, b.buf[b.r:b.w])
+ b.r += n
+ return n, nil
+}
diff --git a/vendor/github.com/apache/arrow/go/v14/internal/utils/endians_default.go b/vendor/github.com/apache/arrow/go/v14/internal/utils/endians_default.go
new file mode 100644
index 000000000..5fd257f52
--- /dev/null
+++ b/vendor/github.com/apache/arrow/go/v14/internal/utils/endians_default.go
@@ -0,0 +1,30 @@
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements. See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership. The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License. You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+//go:build !s390x
+
+package utils
+
+var (
+ ToLEInt16 = func(x int16) int16 { return x }
+ ToLEUint16 = func(x uint16) uint16 { return x }
+ ToLEUint32 = func(x uint32) uint32 { return x }
+ ToLEUint64 = func(x uint64) uint64 { return x }
+ ToLEInt32 = func(x int32) int32 { return x }
+ ToLEInt64 = func(x int64) int64 { return x }
+ ToLEFloat32 = func(x float32) float32 { return x }
+ ToLEFloat64 = func(x float64) float64 { return x }
+)
diff --git a/vendor/github.com/apache/arrow/go/v14/internal/utils/endians_s390x.go b/vendor/github.com/apache/arrow/go/v14/internal/utils/endians_s390x.go
new file mode 100644
index 000000000..7bb27cd81
--- /dev/null
+++ b/vendor/github.com/apache/arrow/go/v14/internal/utils/endians_s390x.go
@@ -0,0 +1,33 @@
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements. See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership. The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License. You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package utils
+
+import (
+ "math"
+ "math/bits"
+)
+
+var (
+ ToLEInt16 = func(x int16) int16 { return int16(bits.ReverseBytes16(uint16(x))) }
+ ToLEUint16 = bits.ReverseBytes16
+ ToLEUint32 = bits.ReverseBytes32
+ ToLEUint64 = bits.ReverseBytes64
+ ToLEInt32 = func(x int32) int32 { return int32(bits.ReverseBytes32(uint32(x))) }
+ ToLEInt64 = func(x int64) int64 { return int64(bits.ReverseBytes64(uint64(x))) }
+ ToLEFloat32 = func(x float32) float32 { return math.Float32frombits(bits.ReverseBytes32(math.Float32bits(x))) }
+ ToLEFloat64 = func(x float64) float64 { return math.Float64frombits(bits.ReverseBytes64(math.Float64bits(x))) }
+)
diff --git a/vendor/github.com/apache/arrow/go/v14/internal/utils/math.go b/vendor/github.com/apache/arrow/go/v14/internal/utils/math.go
new file mode 100644
index 000000000..62cf96ce4
--- /dev/null
+++ b/vendor/github.com/apache/arrow/go/v14/internal/utils/math.go
@@ -0,0 +1,49 @@
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements. See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership. The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License. You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package utils
+
+// Min is a convenience Min function for int64
+func Min(a, b int64) int64 {
+ if a < b {
+ return a
+ }
+ return b
+}
+
+// MinInt is a convenience Min function for int
+func MinInt(a, b int) int {
+ if a < b {
+ return a
+ }
+ return b
+}
+
+// Max is a convenience Max function for int64
+func Max(a, b int64) int64 {
+ if a > b {
+ return a
+ }
+ return b
+}
+
+// MaxInt is a convenience Max function for int
+func MaxInt(a, b int) int {
+ if a > b {
+ return a
+ }
+ return b
+}
diff --git a/vendor/github.com/apache/arrow/go/v14/internal/utils/min_max.go b/vendor/github.com/apache/arrow/go/v14/internal/utils/min_max.go
new file mode 100644
index 000000000..3d7b0024a
--- /dev/null
+++ b/vendor/github.com/apache/arrow/go/v14/internal/utils/min_max.go
@@ -0,0 +1,212 @@
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements. See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership. The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License. You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package utils
+
+import (
+ "math"
+)
+
+// this file contains pure go implementations of the min_max functions that are
+// SIMD accelerated so that we can fallback to these if the cpu doesn't support
+// AVX2 or SSE4 instructions.
+
+func int8MinMax(values []int8) (min, max int8) {
+ min = math.MaxInt8
+ max = math.MinInt8
+
+ for _, v := range values {
+ if min > v {
+ min = v
+ }
+ if max < v {
+ max = v
+ }
+ }
+ return
+}
+
+func uint8MinMax(values []uint8) (min, max uint8) {
+ min = math.MaxUint8
+ max = 0
+
+ for _, v := range values {
+ if min > v {
+ min = v
+ }
+ if max < v {
+ max = v
+ }
+ }
+ return
+}
+
+func int16MinMax(values []int16) (min, max int16) {
+ min = math.MaxInt16
+ max = math.MinInt16
+
+ for _, v := range values {
+ if min > v {
+ min = v
+ }
+ if max < v {
+ max = v
+ }
+ }
+ return
+}
+
+func uint16MinMax(values []uint16) (min, max uint16) {
+ min = math.MaxUint16
+ max = 0
+
+ for _, v := range values {
+ if min > v {
+ min = v
+ }
+ if max < v {
+ max = v
+ }
+ }
+ return
+}
+
+func int32MinMax(values []int32) (min, max int32) {
+ min = math.MaxInt32
+ max = math.MinInt32
+
+ for _, v := range values {
+ if min > v {
+ min = v
+ }
+ if max < v {
+ max = v
+ }
+ }
+ return
+}
+
+func uint32MinMax(values []uint32) (min, max uint32) {
+ min = math.MaxUint32
+ max = 0
+
+ for _, v := range values {
+ if min > v {
+ min = v
+ }
+ if max < v {
+ max = v
+ }
+ }
+ return
+}
+
+func int64MinMax(values []int64) (min, max int64) {
+ min = math.MaxInt64
+ max = math.MinInt64
+
+ for _, v := range values {
+ if min > v {
+ min = v
+ }
+ if max < v {
+ max = v
+ }
+ }
+ return
+}
+
+func uint64MinMax(values []uint64) (min, max uint64) {
+ min = math.MaxUint64
+ max = 0
+
+ for _, v := range values {
+ if min > v {
+ min = v
+ }
+ if max < v {
+ max = v
+ }
+ }
+ return
+}
+
+var minmaxFuncs = struct {
+ i8 func([]int8) (int8, int8)
+ ui8 func([]uint8) (uint8, uint8)
+ i16 func([]int16) (int16, int16)
+ ui16 func([]uint16) (uint16, uint16)
+ i32 func([]int32) (int32, int32)
+ ui32 func([]uint32) (uint32, uint32)
+ i64 func([]int64) (int64, int64)
+ ui64 func([]uint64) (uint64, uint64)
+}{}
+
+// GetMinMaxInt8 returns the min and max for a int8 slice, using AVX2 or
+// SSE4 cpu extensions if available, falling back to a pure go implementation
+// if they are unavailable or built with the noasm tag.
+func GetMinMaxInt8(v []int8) (min, max int8) {
+ return minmaxFuncs.i8(v)
+}
+
+// GetMinMaxUint8 returns the min and max for a uint8 slice, using AVX2 or
+// SSE4 cpu extensions if available, falling back to a pure go implementation
+// if they are unavailable or built with the noasm tag.
+func GetMinMaxUint8(v []uint8) (min, max uint8) {
+ return minmaxFuncs.ui8(v)
+}
+
+// GetMinMaxInt16 returns the min and max for a int16 slice, using AVX2 or
+// SSE4 cpu extensions if available, falling back to a pure go implementation
+// if they are unavailable or built with the noasm tag.
+func GetMinMaxInt16(v []int16) (min, max int16) {
+ return minmaxFuncs.i16(v)
+}
+
+// GetMinMaxUint16 returns the min and max for a uint16 slice, using AVX2 or
+// SSE4 cpu extensions if available, falling back to a pure go implementation
+// if they are unavailable or built with the noasm tag.
+func GetMinMaxUint16(v []uint16) (min, max uint16) {
+ return minmaxFuncs.ui16(v)
+}
+
+// GetMinMaxInt32 returns the min and max for a int32 slice, using AVX2 or
+// SSE4 cpu extensions if available, falling back to a pure go implementation
+// if they are unavailable or built with the noasm tag.
+func GetMinMaxInt32(v []int32) (min, max int32) {
+ return minmaxFuncs.i32(v)
+}
+
+// GetMinMaxUint32 returns the min and max for a uint32 slice, using AVX2 or
+// SSE4 cpu extensions if available, falling back to a pure go implementation
+// if they are unavailable or built with the noasm tag.
+func GetMinMaxUint32(v []uint32) (min, max uint32) {
+ return minmaxFuncs.ui32(v)
+}
+
+// GetMinMaxInt64 returns the min and max for a int64 slice, using AVX2 or
+// SSE4 cpu extensions if available, falling back to a pure go implementation
+// if they are unavailable or built with the noasm tag.
+func GetMinMaxInt64(v []int64) (min, max int64) {
+ return minmaxFuncs.i64(v)
+}
+
+// GetMinMaxUint64 returns the min and max for a uint64 slice, using AVX2 or
+// SSE4 cpu extensions if available, falling back to a pure go implementation
+// if they are unavailable or built with the noasm tag.
+func GetMinMaxUint64(v []uint64) (min, max uint64) {
+ return minmaxFuncs.ui64(v)
+}
diff --git a/vendor/github.com/apache/arrow/go/v14/internal/utils/min_max_amd64.go b/vendor/github.com/apache/arrow/go/v14/internal/utils/min_max_amd64.go
new file mode 100644
index 000000000..5fccddbee
--- /dev/null
+++ b/vendor/github.com/apache/arrow/go/v14/internal/utils/min_max_amd64.go
@@ -0,0 +1,55 @@
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements. See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership. The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License. You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+//go:build !noasm
+
+package utils
+
+import "golang.org/x/sys/cpu"
+
+func init() {
+ // if the CPU supports AVX2 or SSE4 then let's use those to benefit from SIMD
+ // to accelerate the performance for finding the min and max for an integral slice.
+ // otherwise fallback to a pure go implementation if the cpu doesn't have these features.
+ if cpu.X86.HasAVX2 {
+ minmaxFuncs.i8 = int8MaxMinAVX2
+ minmaxFuncs.ui8 = uint8MaxMinAVX2
+ minmaxFuncs.i16 = int16MaxMinAVX2
+ minmaxFuncs.ui16 = uint16MaxMinAVX2
+ minmaxFuncs.i32 = int32MaxMinAVX2
+ minmaxFuncs.ui32 = uint32MaxMinAVX2
+ minmaxFuncs.i64 = int64MaxMinAVX2
+ minmaxFuncs.ui64 = uint64MaxMinAVX2
+ } else if cpu.X86.HasSSE42 {
+ minmaxFuncs.i8 = int8MaxMinSSE4
+ minmaxFuncs.ui8 = uint8MaxMinSSE4
+ minmaxFuncs.i16 = int16MaxMinSSE4
+ minmaxFuncs.ui16 = uint16MaxMinSSE4
+ minmaxFuncs.i32 = int32MaxMinSSE4
+ minmaxFuncs.ui32 = uint32MaxMinSSE4
+ minmaxFuncs.i64 = int64MaxMinSSE4
+ minmaxFuncs.ui64 = uint64MaxMinSSE4
+ } else {
+ minmaxFuncs.i8 = int8MinMax
+ minmaxFuncs.ui8 = uint8MinMax
+ minmaxFuncs.i16 = int16MinMax
+ minmaxFuncs.ui16 = uint16MinMax
+ minmaxFuncs.i32 = int32MinMax
+ minmaxFuncs.ui32 = uint32MinMax
+ minmaxFuncs.i64 = int64MinMax
+ minmaxFuncs.ui64 = uint64MinMax
+ }
+}
diff --git a/vendor/github.com/apache/arrow/go/v14/internal/utils/min_max_arm64.go b/vendor/github.com/apache/arrow/go/v14/internal/utils/min_max_arm64.go
new file mode 100644
index 000000000..7404e95d9
--- /dev/null
+++ b/vendor/github.com/apache/arrow/go/v14/internal/utils/min_max_arm64.go
@@ -0,0 +1,65 @@
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements. See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership. The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License. You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+//go:build !noasm
+
+package utils
+
+import (
+ "os"
+ "strings"
+)
+import "golang.org/x/sys/cpu"
+
+func init() {
+ // Added ability to enable extension via environment:
+ // ARM_ENABLE_EXT=NEON go test
+ if ext, ok := os.LookupEnv("ARM_ENABLE_EXT"); ok {
+ exts := strings.Split(ext, ",")
+
+ for _, x := range exts {
+ switch x {
+ case "NEON":
+ cpu.ARM64.HasASIMD = true
+ case "AES":
+ cpu.ARM64.HasAES = true
+ case "PMULL":
+ cpu.ARM64.HasPMULL = true
+ default:
+ cpu.ARM64.HasASIMD = false
+ cpu.ARM64.HasAES = false
+ cpu.ARM64.HasPMULL = false
+ }
+ }
+ }
+ if cpu.ARM64.HasASIMD {
+ minmaxFuncs.i32 = int32MaxMinNEON
+ minmaxFuncs.ui32 = uint32MaxMinNEON
+ minmaxFuncs.i64 = int64MaxMinNEON
+ minmaxFuncs.ui64 = uint64MaxMinNEON
+ } else {
+ minmaxFuncs.i32 = int32MinMax
+ minmaxFuncs.ui32 = uint32MinMax
+ minmaxFuncs.i64 = int64MinMax
+ minmaxFuncs.ui64 = uint64MinMax
+ }
+
+ // haven't yet generated the NEON arm64 for these
+ minmaxFuncs.i8 = int8MinMax
+ minmaxFuncs.ui8 = uint8MinMax
+ minmaxFuncs.i16 = int16MinMax
+ minmaxFuncs.ui16 = uint16MinMax
+}
diff --git a/vendor/github.com/apache/arrow/go/v14/internal/utils/min_max_avx2_amd64.go b/vendor/github.com/apache/arrow/go/v14/internal/utils/min_max_avx2_amd64.go
new file mode 100644
index 000000000..af6726243
--- /dev/null
+++ b/vendor/github.com/apache/arrow/go/v14/internal/utils/min_max_avx2_amd64.go
@@ -0,0 +1,90 @@
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements. See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership. The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License. You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+//go:build !noasm
+
+package utils
+
+import (
+ "unsafe"
+)
+
+// This file contains convenience functions for utilizing AVX2 intrinsics to quickly
+// and efficiently get the min and max from an integral slice.
+
+//go:noescape
+func _int8_max_min_avx2(values unsafe.Pointer, length int, minout, maxout unsafe.Pointer)
+
+func int8MaxMinAVX2(values []int8) (min, max int8) {
+ _int8_max_min_avx2(unsafe.Pointer(&values[0]), len(values), unsafe.Pointer(&min), unsafe.Pointer(&max))
+ return
+}
+
+//go:noescape
+func _uint8_max_min_avx2(values unsafe.Pointer, length int, minout, maxout unsafe.Pointer)
+
+func uint8MaxMinAVX2(values []uint8) (min, max uint8) {
+ _uint8_max_min_avx2(unsafe.Pointer(&values[0]), len(values), unsafe.Pointer(&min), unsafe.Pointer(&max))
+ return
+}
+
+//go:noescape
+func _int16_max_min_avx2(values unsafe.Pointer, length int, minout, maxout unsafe.Pointer)
+
+func int16MaxMinAVX2(values []int16) (min, max int16) {
+ _int16_max_min_avx2(unsafe.Pointer(&values[0]), len(values), unsafe.Pointer(&min), unsafe.Pointer(&max))
+ return
+}
+
+//go:noescape
+func _uint16_max_min_avx2(values unsafe.Pointer, length int, minout, maxout unsafe.Pointer)
+
+func uint16MaxMinAVX2(values []uint16) (min, max uint16) {
+ _uint16_max_min_avx2(unsafe.Pointer(&values[0]), len(values), unsafe.Pointer(&min), unsafe.Pointer(&max))
+ return
+}
+
+//go:noescape
+func _int32_max_min_avx2(values unsafe.Pointer, length int, minout, maxout unsafe.Pointer)
+
+func int32MaxMinAVX2(values []int32) (min, max int32) {
+ _int32_max_min_avx2(unsafe.Pointer(&values[0]), len(values), unsafe.Pointer(&min), unsafe.Pointer(&max))
+ return
+}
+
+//go:noescape
+func _uint32_max_min_avx2(values unsafe.Pointer, length int, minout, maxout unsafe.Pointer)
+
+func uint32MaxMinAVX2(values []uint32) (min, max uint32) {
+ _uint32_max_min_avx2(unsafe.Pointer(&values[0]), len(values), unsafe.Pointer(&min), unsafe.Pointer(&max))
+ return
+}
+
+//go:noescape
+func _int64_max_min_avx2(values unsafe.Pointer, length int, minout, maxout unsafe.Pointer)
+
+func int64MaxMinAVX2(values []int64) (min, max int64) {
+ _int64_max_min_avx2(unsafe.Pointer(&values[0]), len(values), unsafe.Pointer(&min), unsafe.Pointer(&max))
+ return
+}
+
+//go:noescape
+func _uint64_max_min_avx2(values unsafe.Pointer, length int, minout, maxout unsafe.Pointer)
+
+func uint64MaxMinAVX2(values []uint64) (min, max uint64) {
+ _uint64_max_min_avx2(unsafe.Pointer(&values[0]), len(values), unsafe.Pointer(&min), unsafe.Pointer(&max))
+ return
+}
diff --git a/vendor/github.com/apache/arrow/go/v14/internal/utils/min_max_avx2_amd64.s b/vendor/github.com/apache/arrow/go/v14/internal/utils/min_max_avx2_amd64.s
new file mode 100644
index 000000000..fe0c36e0e
--- /dev/null
+++ b/vendor/github.com/apache/arrow/go/v14/internal/utils/min_max_avx2_amd64.s
@@ -0,0 +1,927 @@
+//+build !noasm !appengine
+// AUTO-GENERATED BY C2GOASM -- DO NOT EDIT
+
+DATA LCDATA1<>+0x000(SB)/8, $0x8080808080808080
+DATA LCDATA1<>+0x008(SB)/8, $0x8080808080808080
+DATA LCDATA1<>+0x010(SB)/8, $0x8080808080808080
+DATA LCDATA1<>+0x018(SB)/8, $0x8080808080808080
+DATA LCDATA1<>+0x020(SB)/8, $0x7f7f7f7f7f7f7f7f
+DATA LCDATA1<>+0x028(SB)/8, $0x7f7f7f7f7f7f7f7f
+DATA LCDATA1<>+0x030(SB)/8, $0x7f7f7f7f7f7f7f7f
+DATA LCDATA1<>+0x038(SB)/8, $0x7f7f7f7f7f7f7f7f
+DATA LCDATA1<>+0x040(SB)/8, $0x7f7f7f7f7f7f7f7f
+DATA LCDATA1<>+0x048(SB)/8, $0x7f7f7f7f7f7f7f7f
+DATA LCDATA1<>+0x050(SB)/8, $0x8080808080808080
+DATA LCDATA1<>+0x058(SB)/8, $0x8080808080808080
+GLOBL LCDATA1<>(SB), 8, $96
+
+TEXT ·_int8_max_min_avx2(SB), $0-32
+
+ MOVQ values+0(FP), DI
+ MOVQ length+8(FP), SI
+ MOVQ minout+16(FP), DX
+ MOVQ maxout+24(FP), CX
+ LEAQ LCDATA1<>(SB), BP
+
+ WORD $0xf685 // test esi, esi
+ JLE LBB0_1
+ WORD $0x8941; BYTE $0xf1 // mov r9d, esi
+ WORD $0xfe83; BYTE $0x3f // cmp esi, 63
+ JA LBB0_4
+ WORD $0xb041; BYTE $0x80 // mov r8b, -128
+ WORD $0xb640; BYTE $0x7f // mov sil, 127
+ WORD $0x3145; BYTE $0xd2 // xor r10d, r10d
+ JMP LBB0_11
+
+LBB0_1:
+ WORD $0xb640; BYTE $0x7f // mov sil, 127
+ WORD $0xb041; BYTE $0x80 // mov r8b, -128
+ JMP LBB0_12
+
+LBB0_4:
+ WORD $0x8945; BYTE $0xca // mov r10d, r9d
+ LONG $0xc0e28341 // and r10d, -64
+ LONG $0xc0428d49 // lea rax, [r10 - 64]
+ WORD $0x8949; BYTE $0xc0 // mov r8, rax
+ LONG $0x06e8c149 // shr r8, 6
+ LONG $0x01c08349 // add r8, 1
+ WORD $0x8548; BYTE $0xc0 // test rax, rax
+ JE LBB0_5
+ WORD $0x894c; BYTE $0xc6 // mov rsi, r8
+ LONG $0xfee68348 // and rsi, -2
+ WORD $0xf748; BYTE $0xde // neg rsi
+ LONG $0x4d6ffdc5; BYTE $0x00 // vmovdqa ymm1, yword 0[rbp] /* [rip + .LCPI0_0] */
+ LONG $0x456ffdc5; BYTE $0x20 // vmovdqa ymm0, yword 32[rbp] /* [rip + .LCPI0_1] */
+ WORD $0xc031 // xor eax, eax
+ LONG $0xd06ffdc5 // vmovdqa ymm2, ymm0
+ LONG $0xd96ffdc5 // vmovdqa ymm3, ymm1
+
+LBB0_7:
+ LONG $0x246ffec5; BYTE $0x07 // vmovdqu ymm4, yword [rdi + rax]
+ LONG $0x6c6ffec5; WORD $0x2007 // vmovdqu ymm5, yword [rdi + rax + 32]
+ LONG $0x746ffec5; WORD $0x4007 // vmovdqu ymm6, yword [rdi + rax + 64]
+ LONG $0x7c6ffec5; WORD $0x6007 // vmovdqu ymm7, yword [rdi + rax + 96]
+ LONG $0x387de2c4; BYTE $0xc4 // vpminsb ymm0, ymm0, ymm4
+ LONG $0x386de2c4; BYTE $0xd5 // vpminsb ymm2, ymm2, ymm5
+ LONG $0x3c75e2c4; BYTE $0xcc // vpmaxsb ymm1, ymm1, ymm4
+ LONG $0x3c65e2c4; BYTE $0xdd // vpmaxsb ymm3, ymm3, ymm5
+ LONG $0x387de2c4; BYTE $0xc6 // vpminsb ymm0, ymm0, ymm6
+ LONG $0x386de2c4; BYTE $0xd7 // vpminsb ymm2, ymm2, ymm7
+ LONG $0x3c75e2c4; BYTE $0xce // vpmaxsb ymm1, ymm1, ymm6
+ LONG $0x3c65e2c4; BYTE $0xdf // vpmaxsb ymm3, ymm3, ymm7
+ LONG $0x80e88348 // sub rax, -128
+ LONG $0x02c68348 // add rsi, 2
+ JNE LBB0_7
+ LONG $0x01c0f641 // test r8b, 1
+ JE LBB0_10
+
+LBB0_9:
+ LONG $0x246ffec5; BYTE $0x07 // vmovdqu ymm4, yword [rdi + rax]
+ LONG $0x6c6ffec5; WORD $0x2007 // vmovdqu ymm5, yword [rdi + rax + 32]
+ LONG $0x3c65e2c4; BYTE $0xdd // vpmaxsb ymm3, ymm3, ymm5
+ LONG $0x3c75e2c4; BYTE $0xcc // vpmaxsb ymm1, ymm1, ymm4
+ LONG $0x386de2c4; BYTE $0xd5 // vpminsb ymm2, ymm2, ymm5
+ LONG $0x387de2c4; BYTE $0xc4 // vpminsb ymm0, ymm0, ymm4
+
+LBB0_10:
+ LONG $0x3c75e2c4; BYTE $0xcb // vpmaxsb ymm1, ymm1, ymm3
+ LONG $0x397de3c4; WORD $0x01cb // vextracti128 xmm3, ymm1, 1
+ LONG $0x3c71e2c4; BYTE $0xcb // vpmaxsb xmm1, xmm1, xmm3
+ LONG $0x4deff1c5; BYTE $0x40 // vpxor xmm1, xmm1, oword 64[rbp] /* [rip + .LCPI0_2] */
+ LONG $0x387de2c4; BYTE $0xc2 // vpminsb ymm0, ymm0, ymm2
+ LONG $0xd171e9c5; BYTE $0x08 // vpsrlw xmm2, xmm1, 8
+ LONG $0xcadaf1c5 // vpminub xmm1, xmm1, xmm2
+ LONG $0x4179e2c4; BYTE $0xc9 // vphminposuw xmm1, xmm1
+ LONG $0x7e79c1c4; BYTE $0xc8 // vmovd r8d, xmm1
+ LONG $0x7ff08041 // xor r8b, 127
+ LONG $0x397de3c4; WORD $0x01c1 // vextracti128 xmm1, ymm0, 1
+ LONG $0x3879e2c4; BYTE $0xc1 // vpminsb xmm0, xmm0, xmm1
+ LONG $0x45eff9c5; BYTE $0x50 // vpxor xmm0, xmm0, oword 80[rbp] /* [rip + .LCPI0_3] */
+ LONG $0xd071f1c5; BYTE $0x08 // vpsrlw xmm1, xmm0, 8
+ LONG $0xc1daf9c5 // vpminub xmm0, xmm0, xmm1
+ LONG $0x4179e2c4; BYTE $0xc0 // vphminposuw xmm0, xmm0
+ LONG $0xc67ef9c5 // vmovd esi, xmm0
+ LONG $0x80f68040 // xor sil, -128
+ WORD $0x394d; BYTE $0xca // cmp r10, r9
+ JE LBB0_12
+
+LBB0_11:
+ LONG $0x04b60f42; BYTE $0x17 // movzx eax, byte [rdi + r10]
+ WORD $0x3840; BYTE $0xc6 // cmp sil, al
+ LONG $0xf6b60f40 // movzx esi, sil
+ WORD $0x4f0f; BYTE $0xf0 // cmovg esi, eax
+ WORD $0x3841; BYTE $0xc0 // cmp r8b, al
+ LONG $0xc0b60f45 // movzx r8d, r8b
+ LONG $0xc04c0f44 // cmovl r8d, eax
+ LONG $0x01c28349 // add r10, 1
+ WORD $0x394d; BYTE $0xd1 // cmp r9, r10
+ JNE LBB0_11
+
+LBB0_12:
+ WORD $0x8844; BYTE $0x01 // mov byte [rcx], r8b
+ WORD $0x8840; BYTE $0x32 // mov byte [rdx], sil
+ VZEROUPPER
+ RET
+
+LBB0_5:
+ LONG $0x4d6ffdc5; BYTE $0x00 // vmovdqa ymm1, yword 0[rbp] /* [rip + .LCPI0_0] */
+ LONG $0x456ffdc5; BYTE $0x20 // vmovdqa ymm0, yword 32[rbp] /* [rip + .LCPI0_1] */
+ WORD $0xc031 // xor eax, eax
+ LONG $0xd06ffdc5 // vmovdqa ymm2, ymm0
+ LONG $0xd96ffdc5 // vmovdqa ymm3, ymm1
+ LONG $0x01c0f641 // test r8b, 1
+ JNE LBB0_9
+ JMP LBB0_10
+
+TEXT ·_uint8_max_min_avx2(SB), $0-32
+
+ MOVQ values+0(FP), DI
+ MOVQ length+8(FP), SI
+ MOVQ minout+16(FP), DX
+ MOVQ maxout+24(FP), CX
+
+ WORD $0xf685 // test esi, esi
+ JLE LBB1_1
+ WORD $0x8941; BYTE $0xf1 // mov r9d, esi
+ WORD $0xfe83; BYTE $0x3f // cmp esi, 63
+ JA LBB1_4
+ WORD $0xb640; BYTE $0xff // mov sil, -1
+ WORD $0x3145; BYTE $0xd2 // xor r10d, r10d
+ WORD $0xc031 // xor eax, eax
+ JMP LBB1_11
+
+LBB1_1:
+ WORD $0xb640; BYTE $0xff // mov sil, -1
+ WORD $0xc031 // xor eax, eax
+ JMP LBB1_12
+
+LBB1_4:
+ WORD $0x8945; BYTE $0xca // mov r10d, r9d
+ LONG $0xc0e28341 // and r10d, -64
+ LONG $0xc0428d49 // lea rax, [r10 - 64]
+ WORD $0x8949; BYTE $0xc0 // mov r8, rax
+ LONG $0x06e8c149 // shr r8, 6
+ LONG $0x01c08349 // add r8, 1
+ WORD $0x8548; BYTE $0xc0 // test rax, rax
+ JE LBB1_5
+ WORD $0x894c; BYTE $0xc6 // mov rsi, r8
+ LONG $0xfee68348 // and rsi, -2
+ WORD $0xf748; BYTE $0xde // neg rsi
+ LONG $0xc0eff9c5 // vpxor xmm0, xmm0, xmm0
+ LONG $0xc976f5c5 // vpcmpeqd ymm1, ymm1, ymm1
+ WORD $0xc031 // xor eax, eax
+ LONG $0xd276edc5 // vpcmpeqd ymm2, ymm2, ymm2
+ LONG $0xdbefe1c5 // vpxor xmm3, xmm3, xmm3
+
+LBB1_7:
+ LONG $0x246ffec5; BYTE $0x07 // vmovdqu ymm4, yword [rdi + rax]
+ LONG $0x6c6ffec5; WORD $0x2007 // vmovdqu ymm5, yword [rdi + rax + 32]
+ LONG $0x746ffec5; WORD $0x4007 // vmovdqu ymm6, yword [rdi + rax + 64]
+ LONG $0x7c6ffec5; WORD $0x6007 // vmovdqu ymm7, yword [rdi + rax + 96]
+ LONG $0xccdaf5c5 // vpminub ymm1, ymm1, ymm4
+ LONG $0xd5daedc5 // vpminub ymm2, ymm2, ymm5
+ LONG $0xc4defdc5 // vpmaxub ymm0, ymm0, ymm4
+ LONG $0xdddee5c5 // vpmaxub ymm3, ymm3, ymm5
+ LONG $0xcedaf5c5 // vpminub ymm1, ymm1, ymm6
+ LONG $0xd7daedc5 // vpminub ymm2, ymm2, ymm7
+ LONG $0xc6defdc5 // vpmaxub ymm0, ymm0, ymm6
+ LONG $0xdfdee5c5 // vpmaxub ymm3, ymm3, ymm7
+ LONG $0x80e88348 // sub rax, -128
+ LONG $0x02c68348 // add rsi, 2
+ JNE LBB1_7
+ LONG $0x01c0f641 // test r8b, 1
+ JE LBB1_10
+
+LBB1_9:
+ LONG $0x246ffec5; BYTE $0x07 // vmovdqu ymm4, yword [rdi + rax]
+ LONG $0x6c6ffec5; WORD $0x2007 // vmovdqu ymm5, yword [rdi + rax + 32]
+ LONG $0xdddee5c5 // vpmaxub ymm3, ymm3, ymm5
+ LONG $0xc4defdc5 // vpmaxub ymm0, ymm0, ymm4
+ LONG $0xd5daedc5 // vpminub ymm2, ymm2, ymm5
+ LONG $0xccdaf5c5 // vpminub ymm1, ymm1, ymm4
+
+LBB1_10:
+ LONG $0xcadaf5c5 // vpminub ymm1, ymm1, ymm2
+ LONG $0xc3defdc5 // vpmaxub ymm0, ymm0, ymm3
+ LONG $0x397de3c4; WORD $0x01c2 // vextracti128 xmm2, ymm0, 1
+ LONG $0xc2def9c5 // vpmaxub xmm0, xmm0, xmm2
+ LONG $0xd276e9c5 // vpcmpeqd xmm2, xmm2, xmm2
+ LONG $0xc2eff9c5 // vpxor xmm0, xmm0, xmm2
+ LONG $0xd071e9c5; BYTE $0x08 // vpsrlw xmm2, xmm0, 8
+ LONG $0xc2daf9c5 // vpminub xmm0, xmm0, xmm2
+ LONG $0x4179e2c4; BYTE $0xc0 // vphminposuw xmm0, xmm0
+ LONG $0xc07ef9c5 // vmovd eax, xmm0
+ WORD $0xd0f6 // not al
+ LONG $0x397de3c4; WORD $0x01c8 // vextracti128 xmm0, ymm1, 1
+ LONG $0xc0daf1c5 // vpminub xmm0, xmm1, xmm0
+ LONG $0xd071f1c5; BYTE $0x08 // vpsrlw xmm1, xmm0, 8
+ LONG $0xc1daf9c5 // vpminub xmm0, xmm0, xmm1
+ LONG $0x4179e2c4; BYTE $0xc0 // vphminposuw xmm0, xmm0
+ LONG $0xc67ef9c5 // vmovd esi, xmm0
+ WORD $0x394d; BYTE $0xca // cmp r10, r9
+ JE LBB1_12
+
+LBB1_11:
+ LONG $0x04b60f46; BYTE $0x17 // movzx r8d, byte [rdi + r10]
+ WORD $0x3844; BYTE $0xc6 // cmp sil, r8b
+ LONG $0xf6b60f40 // movzx esi, sil
+ LONG $0xf0430f41 // cmovae esi, r8d
+ WORD $0x3844; BYTE $0xc0 // cmp al, r8b
+ WORD $0xb60f; BYTE $0xc0 // movzx eax, al
+ LONG $0xc0460f41 // cmovbe eax, r8d
+ LONG $0x01c28349 // add r10, 1
+ WORD $0x394d; BYTE $0xd1 // cmp r9, r10
+ JNE LBB1_11
+
+LBB1_12:
+ WORD $0x0188 // mov byte [rcx], al
+ WORD $0x8840; BYTE $0x32 // mov byte [rdx], sil
+ VZEROUPPER
+ RET
+
+LBB1_5:
+ LONG $0xc0eff9c5 // vpxor xmm0, xmm0, xmm0
+ LONG $0xc976f5c5 // vpcmpeqd ymm1, ymm1, ymm1
+ WORD $0xc031 // xor eax, eax
+ LONG $0xd276edc5 // vpcmpeqd ymm2, ymm2, ymm2
+ LONG $0xdbefe1c5 // vpxor xmm3, xmm3, xmm3
+ LONG $0x01c0f641 // test r8b, 1
+ JNE LBB1_9
+ JMP LBB1_10
+
+DATA LCDATA2<>+0x000(SB)/8, $0x8000800080008000
+DATA LCDATA2<>+0x008(SB)/8, $0x8000800080008000
+DATA LCDATA2<>+0x010(SB)/8, $0x8000800080008000
+DATA LCDATA2<>+0x018(SB)/8, $0x8000800080008000
+DATA LCDATA2<>+0x020(SB)/8, $0x7fff7fff7fff7fff
+DATA LCDATA2<>+0x028(SB)/8, $0x7fff7fff7fff7fff
+DATA LCDATA2<>+0x030(SB)/8, $0x7fff7fff7fff7fff
+DATA LCDATA2<>+0x038(SB)/8, $0x7fff7fff7fff7fff
+DATA LCDATA2<>+0x040(SB)/8, $0x7fff7fff7fff7fff
+DATA LCDATA2<>+0x048(SB)/8, $0x7fff7fff7fff7fff
+DATA LCDATA2<>+0x050(SB)/8, $0x8000800080008000
+DATA LCDATA2<>+0x058(SB)/8, $0x8000800080008000
+GLOBL LCDATA2<>(SB), 8, $96
+
+TEXT ·_int16_max_min_avx2(SB), $0-32
+
+ MOVQ values+0(FP), DI
+ MOVQ length+8(FP), SI
+ MOVQ minout+16(FP), DX
+ MOVQ maxout+24(FP), CX
+ LEAQ LCDATA2<>(SB), BP
+
+ WORD $0xf685 // test esi, esi
+ JLE LBB2_1
+ WORD $0x8941; BYTE $0xf1 // mov r9d, esi
+ WORD $0xfe83; BYTE $0x1f // cmp esi, 31
+ JA LBB2_4
+ LONG $0x00b84166; BYTE $0x80 // mov r8w, -32768
+ LONG $0x7fffbe66 // mov si, 32767
+ WORD $0x3145; BYTE $0xd2 // xor r10d, r10d
+ JMP LBB2_11
+
+LBB2_1:
+ LONG $0x7fffbe66 // mov si, 32767
+ LONG $0x00b84166; BYTE $0x80 // mov r8w, -32768
+ JMP LBB2_12
+
+LBB2_4:
+ WORD $0x8945; BYTE $0xca // mov r10d, r9d
+ LONG $0xe0e28341 // and r10d, -32
+ LONG $0xe0428d49 // lea rax, [r10 - 32]
+ WORD $0x8949; BYTE $0xc0 // mov r8, rax
+ LONG $0x05e8c149 // shr r8, 5
+ LONG $0x01c08349 // add r8, 1
+ WORD $0x8548; BYTE $0xc0 // test rax, rax
+ JE LBB2_5
+ WORD $0x894c; BYTE $0xc6 // mov rsi, r8
+ LONG $0xfee68348 // and rsi, -2
+ WORD $0xf748; BYTE $0xde // neg rsi
+ LONG $0x4d6ffdc5; BYTE $0x00 // vmovdqa ymm1, yword 0[rbp] /* [rip + .LCPI2_0] */
+ LONG $0x456ffdc5; BYTE $0x20 // vmovdqa ymm0, yword 32[rbp] /* [rip + .LCPI2_1] */
+ WORD $0xc031 // xor eax, eax
+ LONG $0xd06ffdc5 // vmovdqa ymm2, ymm0
+ LONG $0xd96ffdc5 // vmovdqa ymm3, ymm1
+
+LBB2_7:
+ LONG $0x246ffec5; BYTE $0x47 // vmovdqu ymm4, yword [rdi + 2*rax]
+ LONG $0x6c6ffec5; WORD $0x2047 // vmovdqu ymm5, yword [rdi + 2*rax + 32]
+ LONG $0x746ffec5; WORD $0x4047 // vmovdqu ymm6, yword [rdi + 2*rax + 64]
+ LONG $0x7c6ffec5; WORD $0x6047 // vmovdqu ymm7, yword [rdi + 2*rax + 96]
+ LONG $0xc4eafdc5 // vpminsw ymm0, ymm0, ymm4
+ LONG $0xd5eaedc5 // vpminsw ymm2, ymm2, ymm5
+ LONG $0xcceef5c5 // vpmaxsw ymm1, ymm1, ymm4
+ LONG $0xddeee5c5 // vpmaxsw ymm3, ymm3, ymm5
+ LONG $0xc6eafdc5 // vpminsw ymm0, ymm0, ymm6
+ LONG $0xd7eaedc5 // vpminsw ymm2, ymm2, ymm7
+ LONG $0xceeef5c5 // vpmaxsw ymm1, ymm1, ymm6
+ LONG $0xdfeee5c5 // vpmaxsw ymm3, ymm3, ymm7
+ LONG $0x40c08348 // add rax, 64
+ LONG $0x02c68348 // add rsi, 2
+ JNE LBB2_7
+ LONG $0x01c0f641 // test r8b, 1
+ JE LBB2_10
+
+LBB2_9:
+ LONG $0x246ffec5; BYTE $0x47 // vmovdqu ymm4, yword [rdi + 2*rax]
+ LONG $0x6c6ffec5; WORD $0x2047 // vmovdqu ymm5, yword [rdi + 2*rax + 32]
+ LONG $0xddeee5c5 // vpmaxsw ymm3, ymm3, ymm5
+ LONG $0xcceef5c5 // vpmaxsw ymm1, ymm1, ymm4
+ LONG $0xd5eaedc5 // vpminsw ymm2, ymm2, ymm5
+ LONG $0xc4eafdc5 // vpminsw ymm0, ymm0, ymm4
+
+LBB2_10:
+ LONG $0xcbeef5c5 // vpmaxsw ymm1, ymm1, ymm3
+ LONG $0x397de3c4; WORD $0x01cb // vextracti128 xmm3, ymm1, 1
+ LONG $0xcbeef1c5 // vpmaxsw xmm1, xmm1, xmm3
+ LONG $0x4deff1c5; BYTE $0x40 // vpxor xmm1, xmm1, oword 64[rbp] /* [rip + .LCPI2_2] */
+ LONG $0xc2eafdc5 // vpminsw ymm0, ymm0, ymm2
+ LONG $0x4179e2c4; BYTE $0xc9 // vphminposuw xmm1, xmm1
+ LONG $0x7e79c1c4; BYTE $0xc8 // vmovd r8d, xmm1
+ LONG $0xfff08141; WORD $0x007f; BYTE $0x00 // xor r8d, 32767
+ LONG $0x397de3c4; WORD $0x01c1 // vextracti128 xmm1, ymm0, 1
+ LONG $0xc1eaf9c5 // vpminsw xmm0, xmm0, xmm1
+ LONG $0x45eff9c5; BYTE $0x50 // vpxor xmm0, xmm0, oword 80[rbp] /* [rip + .LCPI2_3] */
+ LONG $0x4179e2c4; BYTE $0xc0 // vphminposuw xmm0, xmm0
+ LONG $0xc67ef9c5 // vmovd esi, xmm0
+ LONG $0x8000f681; WORD $0x0000 // xor esi, 32768
+ WORD $0x394d; BYTE $0xca // cmp r10, r9
+ JE LBB2_12
+
+LBB2_11:
+ LONG $0x04b70f42; BYTE $0x57 // movzx eax, word [rdi + 2*r10]
+ WORD $0x3966; BYTE $0xc6 // cmp si, ax
+ WORD $0x4f0f; BYTE $0xf0 // cmovg esi, eax
+ LONG $0xc0394166 // cmp r8w, ax
+ LONG $0xc04c0f44 // cmovl r8d, eax
+ LONG $0x01c28349 // add r10, 1
+ WORD $0x394d; BYTE $0xd1 // cmp r9, r10
+ JNE LBB2_11
+
+LBB2_12:
+ LONG $0x01894466 // mov word [rcx], r8w
+ WORD $0x8966; BYTE $0x32 // mov word [rdx], si
+ VZEROUPPER
+ RET
+
+LBB2_5:
+ LONG $0x4d6ffdc5; BYTE $0x00 // vmovdqa ymm1, yword 0[rbp] /* [rip + .LCPI2_0] */
+ LONG $0x456ffdc5; BYTE $0x20 // vmovdqa ymm0, yword 32[rbp] /* [rip + .LCPI2_1] */
+ WORD $0xc031 // xor eax, eax
+ LONG $0xd06ffdc5 // vmovdqa ymm2, ymm0
+ LONG $0xd96ffdc5 // vmovdqa ymm3, ymm1
+ LONG $0x01c0f641 // test r8b, 1
+ JNE LBB2_9
+ JMP LBB2_10
+
+TEXT ·_uint16_max_min_avx2(SB), $0-32
+
+ MOVQ values+0(FP), DI
+ MOVQ length+8(FP), SI
+ MOVQ minout+16(FP), DX
+ MOVQ maxout+24(FP), CX
+
+ WORD $0xf685 // test esi, esi
+ JLE LBB3_1
+ WORD $0x8941; BYTE $0xf1 // mov r9d, esi
+ WORD $0xfe83; BYTE $0x1f // cmp esi, 31
+ JA LBB3_4
+ LONG $0xffb84166; BYTE $0xff // mov r8w, -1
+ WORD $0x3145; BYTE $0xd2 // xor r10d, r10d
+ WORD $0xf631 // xor esi, esi
+ JMP LBB3_11
+
+LBB3_1:
+ LONG $0xffb84166; BYTE $0xff // mov r8w, -1
+ WORD $0xf631 // xor esi, esi
+ JMP LBB3_12
+
+LBB3_4:
+ WORD $0x8945; BYTE $0xca // mov r10d, r9d
+ LONG $0xe0e28341 // and r10d, -32
+ LONG $0xe0428d49 // lea rax, [r10 - 32]
+ WORD $0x8949; BYTE $0xc0 // mov r8, rax
+ LONG $0x05e8c149 // shr r8, 5
+ LONG $0x01c08349 // add r8, 1
+ WORD $0x8548; BYTE $0xc0 // test rax, rax
+ JE LBB3_5
+ WORD $0x894c; BYTE $0xc6 // mov rsi, r8
+ LONG $0xfee68348 // and rsi, -2
+ WORD $0xf748; BYTE $0xde // neg rsi
+ LONG $0xc0eff9c5 // vpxor xmm0, xmm0, xmm0
+ LONG $0xc976f5c5 // vpcmpeqd ymm1, ymm1, ymm1
+ WORD $0xc031 // xor eax, eax
+ LONG $0xd276edc5 // vpcmpeqd ymm2, ymm2, ymm2
+ LONG $0xdbefe1c5 // vpxor xmm3, xmm3, xmm3
+
+LBB3_7:
+ LONG $0x246ffec5; BYTE $0x47 // vmovdqu ymm4, yword [rdi + 2*rax]
+ LONG $0x6c6ffec5; WORD $0x2047 // vmovdqu ymm5, yword [rdi + 2*rax + 32]
+ LONG $0x746ffec5; WORD $0x4047 // vmovdqu ymm6, yword [rdi + 2*rax + 64]
+ LONG $0x7c6ffec5; WORD $0x6047 // vmovdqu ymm7, yword [rdi + 2*rax + 96]
+ LONG $0x3a75e2c4; BYTE $0xcc // vpminuw ymm1, ymm1, ymm4
+ LONG $0x3a6de2c4; BYTE $0xd5 // vpminuw ymm2, ymm2, ymm5
+ LONG $0x3e7de2c4; BYTE $0xc4 // vpmaxuw ymm0, ymm0, ymm4
+ LONG $0x3e65e2c4; BYTE $0xdd // vpmaxuw ymm3, ymm3, ymm5
+ LONG $0x3a75e2c4; BYTE $0xce // vpminuw ymm1, ymm1, ymm6
+ LONG $0x3a6de2c4; BYTE $0xd7 // vpminuw ymm2, ymm2, ymm7
+ LONG $0x3e7de2c4; BYTE $0xc6 // vpmaxuw ymm0, ymm0, ymm6
+ LONG $0x3e65e2c4; BYTE $0xdf // vpmaxuw ymm3, ymm3, ymm7
+ LONG $0x40c08348 // add rax, 64
+ LONG $0x02c68348 // add rsi, 2
+ JNE LBB3_7
+ LONG $0x01c0f641 // test r8b, 1
+ JE LBB3_10
+
+LBB3_9:
+ LONG $0x246ffec5; BYTE $0x47 // vmovdqu ymm4, yword [rdi + 2*rax]
+ LONG $0x6c6ffec5; WORD $0x2047 // vmovdqu ymm5, yword [rdi + 2*rax + 32]
+ LONG $0x3e65e2c4; BYTE $0xdd // vpmaxuw ymm3, ymm3, ymm5
+ LONG $0x3e7de2c4; BYTE $0xc4 // vpmaxuw ymm0, ymm0, ymm4
+ LONG $0x3a6de2c4; BYTE $0xd5 // vpminuw ymm2, ymm2, ymm5
+ LONG $0x3a75e2c4; BYTE $0xcc // vpminuw ymm1, ymm1, ymm4
+
+LBB3_10:
+ LONG $0x3a75e2c4; BYTE $0xca // vpminuw ymm1, ymm1, ymm2
+ LONG $0x3e7de2c4; BYTE $0xc3 // vpmaxuw ymm0, ymm0, ymm3
+ LONG $0x397de3c4; WORD $0x01c2 // vextracti128 xmm2, ymm0, 1
+ LONG $0x3e79e2c4; BYTE $0xc2 // vpmaxuw xmm0, xmm0, xmm2
+ LONG $0xd276e9c5 // vpcmpeqd xmm2, xmm2, xmm2
+ LONG $0xc2eff9c5 // vpxor xmm0, xmm0, xmm2
+ LONG $0x4179e2c4; BYTE $0xc0 // vphminposuw xmm0, xmm0
+ LONG $0xc67ef9c5 // vmovd esi, xmm0
+ WORD $0xd6f7 // not esi
+ LONG $0x397de3c4; WORD $0x01c8 // vextracti128 xmm0, ymm1, 1
+ LONG $0x3a71e2c4; BYTE $0xc0 // vpminuw xmm0, xmm1, xmm0
+ LONG $0x4179e2c4; BYTE $0xc0 // vphminposuw xmm0, xmm0
+ LONG $0x7e79c1c4; BYTE $0xc0 // vmovd r8d, xmm0
+ WORD $0x394d; BYTE $0xca // cmp r10, r9
+ JE LBB3_12
+
+LBB3_11:
+ LONG $0x04b70f42; BYTE $0x57 // movzx eax, word [rdi + 2*r10]
+ LONG $0xc0394166 // cmp r8w, ax
+ LONG $0xc0430f44 // cmovae r8d, eax
+ WORD $0x3966; BYTE $0xc6 // cmp si, ax
+ WORD $0x460f; BYTE $0xf0 // cmovbe esi, eax
+ LONG $0x01c28349 // add r10, 1
+ WORD $0x394d; BYTE $0xd1 // cmp r9, r10
+ JNE LBB3_11
+
+LBB3_12:
+ WORD $0x8966; BYTE $0x31 // mov word [rcx], si
+ LONG $0x02894466 // mov word [rdx], r8w
+ VZEROUPPER
+ RET
+
+LBB3_5:
+ LONG $0xc0eff9c5 // vpxor xmm0, xmm0, xmm0
+ LONG $0xc976f5c5 // vpcmpeqd ymm1, ymm1, ymm1
+ WORD $0xc031 // xor eax, eax
+ LONG $0xd276edc5 // vpcmpeqd ymm2, ymm2, ymm2
+ LONG $0xdbefe1c5 // vpxor xmm3, xmm3, xmm3
+ LONG $0x01c0f641 // test r8b, 1
+ JNE LBB3_9
+ JMP LBB3_10
+
+DATA LCDATA3<>+0x000(SB)/8, $0x7fffffff80000000
+GLOBL LCDATA3<>(SB), 8, $8
+
+TEXT ·_int32_max_min_avx2(SB), $0-32
+
+ MOVQ values+0(FP), DI
+ MOVQ length+8(FP), SI
+ MOVQ minout+16(FP), DX
+ MOVQ maxout+24(FP), CX
+ LEAQ LCDATA3<>(SB), BP
+
+ WORD $0xf685 // test esi, esi
+ JLE LBB4_1
+ WORD $0x8941; BYTE $0xf0 // mov r8d, esi
+ WORD $0xfe83; BYTE $0x1f // cmp esi, 31
+ JA LBB4_4
+ LONG $0x0000ba41; WORD $0x8000 // mov r10d, -2147483648
+ LONG $0xffffffb8; BYTE $0x7f // mov eax, 2147483647
+ WORD $0x3145; BYTE $0xc9 // xor r9d, r9d
+ JMP LBB4_7
+
+LBB4_1:
+ LONG $0xffffffb8; BYTE $0x7f // mov eax, 2147483647
+ LONG $0x000000be; BYTE $0x80 // mov esi, -2147483648
+ JMP LBB4_8
+
+LBB4_4:
+ WORD $0x8945; BYTE $0xc1 // mov r9d, r8d
+ LONG $0x587de2c4; WORD $0x0065 // vpbroadcastd ymm4, dword 0[rbp] /* [rip + .LCPI4_0] */
+ LONG $0xe0e18341 // and r9d, -32
+ LONG $0x587de2c4; WORD $0x0445 // vpbroadcastd ymm0, dword 4[rbp] /* [rip + .LCPI4_1] */
+ WORD $0xc031 // xor eax, eax
+ LONG $0xc86ffdc5 // vmovdqa ymm1, ymm0
+ LONG $0xd06ffdc5 // vmovdqa ymm2, ymm0
+ LONG $0xd86ffdc5 // vmovdqa ymm3, ymm0
+ LONG $0xec6ffdc5 // vmovdqa ymm5, ymm4
+ LONG $0xf46ffdc5 // vmovdqa ymm6, ymm4
+ LONG $0xfc6ffdc5 // vmovdqa ymm7, ymm4
+
+LBB4_5:
+ LONG $0x046f7ec5; BYTE $0x87 // vmovdqu ymm8, yword [rdi + 4*rax]
+ LONG $0x4c6f7ec5; WORD $0x2087 // vmovdqu ymm9, yword [rdi + 4*rax + 32]
+ LONG $0x546f7ec5; WORD $0x4087 // vmovdqu ymm10, yword [rdi + 4*rax + 64]
+ LONG $0x5c6f7ec5; WORD $0x6087 // vmovdqu ymm11, yword [rdi + 4*rax + 96]
+ LONG $0x397dc2c4; BYTE $0xc0 // vpminsd ymm0, ymm0, ymm8
+ LONG $0x3975c2c4; BYTE $0xc9 // vpminsd ymm1, ymm1, ymm9
+ LONG $0x396dc2c4; BYTE $0xd2 // vpminsd ymm2, ymm2, ymm10
+ LONG $0x3965c2c4; BYTE $0xdb // vpminsd ymm3, ymm3, ymm11
+ LONG $0x3d5dc2c4; BYTE $0xe0 // vpmaxsd ymm4, ymm4, ymm8
+ LONG $0x3d55c2c4; BYTE $0xe9 // vpmaxsd ymm5, ymm5, ymm9
+ LONG $0x3d4dc2c4; BYTE $0xf2 // vpmaxsd ymm6, ymm6, ymm10
+ LONG $0x3d45c2c4; BYTE $0xfb // vpmaxsd ymm7, ymm7, ymm11
+ LONG $0x20c08348 // add rax, 32
+ WORD $0x3949; BYTE $0xc1 // cmp r9, rax
+ JNE LBB4_5
+ LONG $0x3d5de2c4; BYTE $0xe5 // vpmaxsd ymm4, ymm4, ymm5
+ LONG $0x3d5de2c4; BYTE $0xe6 // vpmaxsd ymm4, ymm4, ymm6
+ LONG $0x3d5de2c4; BYTE $0xe7 // vpmaxsd ymm4, ymm4, ymm7
+ LONG $0x397de3c4; WORD $0x01e5 // vextracti128 xmm5, ymm4, 1
+ LONG $0x3d59e2c4; BYTE $0xe5 // vpmaxsd xmm4, xmm4, xmm5
+ LONG $0xec70f9c5; BYTE $0x4e // vpshufd xmm5, xmm4, 78
+ LONG $0x3d59e2c4; BYTE $0xe5 // vpmaxsd xmm4, xmm4, xmm5
+ LONG $0xec70f9c5; BYTE $0xe5 // vpshufd xmm5, xmm4, 229
+ LONG $0x3d59e2c4; BYTE $0xe5 // vpmaxsd xmm4, xmm4, xmm5
+ LONG $0x7e79c1c4; BYTE $0xe2 // vmovd r10d, xmm4
+ LONG $0x397de2c4; BYTE $0xc1 // vpminsd ymm0, ymm0, ymm1
+ LONG $0x397de2c4; BYTE $0xc2 // vpminsd ymm0, ymm0, ymm2
+ LONG $0x397de2c4; BYTE $0xc3 // vpminsd ymm0, ymm0, ymm3
+ LONG $0x397de3c4; WORD $0x01c1 // vextracti128 xmm1, ymm0, 1
+ LONG $0x3979e2c4; BYTE $0xc1 // vpminsd xmm0, xmm0, xmm1
+ LONG $0xc870f9c5; BYTE $0x4e // vpshufd xmm1, xmm0, 78
+ LONG $0x3979e2c4; BYTE $0xc1 // vpminsd xmm0, xmm0, xmm1
+ LONG $0xc870f9c5; BYTE $0xe5 // vpshufd xmm1, xmm0, 229
+ LONG $0x3979e2c4; BYTE $0xc1 // vpminsd xmm0, xmm0, xmm1
+ LONG $0xc07ef9c5 // vmovd eax, xmm0
+ WORD $0x8944; BYTE $0xd6 // mov esi, r10d
+ WORD $0x394d; BYTE $0xc1 // cmp r9, r8
+ JE LBB4_8
+
+LBB4_7:
+ LONG $0x8f348b42 // mov esi, dword [rdi + 4*r9]
+ WORD $0xf039 // cmp eax, esi
+ WORD $0x4f0f; BYTE $0xc6 // cmovg eax, esi
+ WORD $0x3941; BYTE $0xf2 // cmp r10d, esi
+ LONG $0xf24d0f41 // cmovge esi, r10d
+ LONG $0x01c18349 // add r9, 1
+ WORD $0x8941; BYTE $0xf2 // mov r10d, esi
+ WORD $0x394d; BYTE $0xc8 // cmp r8, r9
+ JNE LBB4_7
+
+LBB4_8:
+ WORD $0x3189 // mov dword [rcx], esi
+ WORD $0x0289 // mov dword [rdx], eax
+ VZEROUPPER
+ RET
+
+TEXT ·_uint32_max_min_avx2(SB), $0-32
+
+ MOVQ values+0(FP), DI
+ MOVQ length+8(FP), SI
+ MOVQ minout+16(FP), DX
+ MOVQ maxout+24(FP), CX
+
+ WORD $0xf685 // test esi, esi
+ JLE LBB5_1
+ WORD $0x8941; BYTE $0xf0 // mov r8d, esi
+ WORD $0xfe83; BYTE $0x1f // cmp esi, 31
+ JA LBB5_4
+ WORD $0x3145; BYTE $0xc9 // xor r9d, r9d
+ LONG $0xffffffb8; BYTE $0xff // mov eax, -1
+ WORD $0x3145; BYTE $0xd2 // xor r10d, r10d
+ JMP LBB5_7
+
+LBB5_1:
+ LONG $0xffffffb8; BYTE $0xff // mov eax, -1
+ WORD $0xf631 // xor esi, esi
+ JMP LBB5_8
+
+LBB5_4:
+ WORD $0x8945; BYTE $0xc1 // mov r9d, r8d
+ LONG $0xe0e18341 // and r9d, -32
+ LONG $0xe4efd9c5 // vpxor xmm4, xmm4, xmm4
+ LONG $0xc076fdc5 // vpcmpeqd ymm0, ymm0, ymm0
+ WORD $0xc031 // xor eax, eax
+ LONG $0xc976f5c5 // vpcmpeqd ymm1, ymm1, ymm1
+ LONG $0xd276edc5 // vpcmpeqd ymm2, ymm2, ymm2
+ LONG $0xdb76e5c5 // vpcmpeqd ymm3, ymm3, ymm3
+ LONG $0xedefd1c5 // vpxor xmm5, xmm5, xmm5
+ LONG $0xf6efc9c5 // vpxor xmm6, xmm6, xmm6
+ LONG $0xffefc1c5 // vpxor xmm7, xmm7, xmm7
+
+LBB5_5:
+ LONG $0x046f7ec5; BYTE $0x87 // vmovdqu ymm8, yword [rdi + 4*rax]
+ LONG $0x4c6f7ec5; WORD $0x2087 // vmovdqu ymm9, yword [rdi + 4*rax + 32]
+ LONG $0x546f7ec5; WORD $0x4087 // vmovdqu ymm10, yword [rdi + 4*rax + 64]
+ LONG $0x5c6f7ec5; WORD $0x6087 // vmovdqu ymm11, yword [rdi + 4*rax + 96]
+ LONG $0x3b7dc2c4; BYTE $0xc0 // vpminud ymm0, ymm0, ymm8
+ LONG $0x3b75c2c4; BYTE $0xc9 // vpminud ymm1, ymm1, ymm9
+ LONG $0x3b6dc2c4; BYTE $0xd2 // vpminud ymm2, ymm2, ymm10
+ LONG $0x3b65c2c4; BYTE $0xdb // vpminud ymm3, ymm3, ymm11
+ LONG $0x3f5dc2c4; BYTE $0xe0 // vpmaxud ymm4, ymm4, ymm8
+ LONG $0x3f55c2c4; BYTE $0xe9 // vpmaxud ymm5, ymm5, ymm9
+ LONG $0x3f4dc2c4; BYTE $0xf2 // vpmaxud ymm6, ymm6, ymm10
+ LONG $0x3f45c2c4; BYTE $0xfb // vpmaxud ymm7, ymm7, ymm11
+ LONG $0x20c08348 // add rax, 32
+ WORD $0x3949; BYTE $0xc1 // cmp r9, rax
+ JNE LBB5_5
+ LONG $0x3f5de2c4; BYTE $0xe5 // vpmaxud ymm4, ymm4, ymm5
+ LONG $0x3f5de2c4; BYTE $0xe6 // vpmaxud ymm4, ymm4, ymm6
+ LONG $0x3f5de2c4; BYTE $0xe7 // vpmaxud ymm4, ymm4, ymm7
+ LONG $0x397de3c4; WORD $0x01e5 // vextracti128 xmm5, ymm4, 1
+ LONG $0x3f59e2c4; BYTE $0xe5 // vpmaxud xmm4, xmm4, xmm5
+ LONG $0xec70f9c5; BYTE $0x4e // vpshufd xmm5, xmm4, 78
+ LONG $0x3f59e2c4; BYTE $0xe5 // vpmaxud xmm4, xmm4, xmm5
+ LONG $0xec70f9c5; BYTE $0xe5 // vpshufd xmm5, xmm4, 229
+ LONG $0x3f59e2c4; BYTE $0xe5 // vpmaxud xmm4, xmm4, xmm5
+ LONG $0x7e79c1c4; BYTE $0xe2 // vmovd r10d, xmm4
+ LONG $0x3b7de2c4; BYTE $0xc1 // vpminud ymm0, ymm0, ymm1
+ LONG $0x3b7de2c4; BYTE $0xc2 // vpminud ymm0, ymm0, ymm2
+ LONG $0x3b7de2c4; BYTE $0xc3 // vpminud ymm0, ymm0, ymm3
+ LONG $0x397de3c4; WORD $0x01c1 // vextracti128 xmm1, ymm0, 1
+ LONG $0x3b79e2c4; BYTE $0xc1 // vpminud xmm0, xmm0, xmm1
+ LONG $0xc870f9c5; BYTE $0x4e // vpshufd xmm1, xmm0, 78
+ LONG $0x3b79e2c4; BYTE $0xc1 // vpminud xmm0, xmm0, xmm1
+ LONG $0xc870f9c5; BYTE $0xe5 // vpshufd xmm1, xmm0, 229
+ LONG $0x3b79e2c4; BYTE $0xc1 // vpminud xmm0, xmm0, xmm1
+ LONG $0xc07ef9c5 // vmovd eax, xmm0
+ WORD $0x8944; BYTE $0xd6 // mov esi, r10d
+ WORD $0x394d; BYTE $0xc1 // cmp r9, r8
+ JE LBB5_8
+
+LBB5_7:
+ LONG $0x8f348b42 // mov esi, dword [rdi + 4*r9]
+ WORD $0xf039 // cmp eax, esi
+ WORD $0x430f; BYTE $0xc6 // cmovae eax, esi
+ WORD $0x3941; BYTE $0xf2 // cmp r10d, esi
+ LONG $0xf2470f41 // cmova esi, r10d
+ LONG $0x01c18349 // add r9, 1
+ WORD $0x8941; BYTE $0xf2 // mov r10d, esi
+ WORD $0x394d; BYTE $0xc8 // cmp r8, r9
+ JNE LBB5_7
+
+LBB5_8:
+ WORD $0x3189 // mov dword [rcx], esi
+ WORD $0x0289 // mov dword [rdx], eax
+ VZEROUPPER
+ RET
+
+DATA LCDATA4<>+0x000(SB)/8, $0x8000000000000000
+DATA LCDATA4<>+0x008(SB)/8, $0x7fffffffffffffff
+GLOBL LCDATA4<>(SB), 8, $16
+
+TEXT ·_int64_max_min_avx2(SB), $0-32
+
+ MOVQ values+0(FP), DI
+ MOVQ length+8(FP), SI
+ MOVQ minout+16(FP), DX
+ MOVQ maxout+24(FP), CX
+ LEAQ LCDATA4<>(SB), BP
+
+ QUAD $0xffffffffffffb848; WORD $0x7fff // mov rax, 9223372036854775807
+ WORD $0xf685 // test esi, esi
+ JLE LBB6_1
+ WORD $0x8941; BYTE $0xf0 // mov r8d, esi
+ WORD $0xfe83; BYTE $0x0f // cmp esi, 15
+ JA LBB6_4
+ LONG $0x01508d4c // lea r10, [rax + 1]
+ WORD $0x3145; BYTE $0xc9 // xor r9d, r9d
+ JMP LBB6_7
+
+LBB6_1:
+ LONG $0x01708d48 // lea rsi, [rax + 1]
+ JMP LBB6_8
+
+LBB6_4:
+ WORD $0x8945; BYTE $0xc1 // mov r9d, r8d
+ LONG $0x597de2c4; WORD $0x0065 // vpbroadcastq ymm4, qword 0[rbp] /* [rip + .LCPI6_0] */
+ LONG $0xf0e18341 // and r9d, -16
+ LONG $0x597de2c4; WORD $0x0845 // vpbroadcastq ymm0, qword 8[rbp] /* [rip + .LCPI6_1] */
+ WORD $0xc031 // xor eax, eax
+ LONG $0xd86ffdc5 // vmovdqa ymm3, ymm0
+ LONG $0xd06ffdc5 // vmovdqa ymm2, ymm0
+ LONG $0xc86ffdc5 // vmovdqa ymm1, ymm0
+ LONG $0xfc6ffdc5 // vmovdqa ymm7, ymm4
+ LONG $0xf46ffdc5 // vmovdqa ymm6, ymm4
+ LONG $0xec6ffdc5 // vmovdqa ymm5, ymm4
+
+LBB6_5:
+ LONG $0x046f7ec5; BYTE $0xc7 // vmovdqu ymm8, yword [rdi + 8*rax]
+ LONG $0x373d62c4; BYTE $0xc8 // vpcmpgtq ymm9, ymm8, ymm0
+ LONG $0x4b3de3c4; WORD $0x90c0 // vblendvpd ymm0, ymm8, ymm0, ymm9
+ LONG $0x4c6f7ec5; WORD $0x20c7 // vmovdqu ymm9, yword [rdi + 8*rax + 32]
+ LONG $0x373562c4; BYTE $0xd3 // vpcmpgtq ymm10, ymm9, ymm3
+ LONG $0x4b35e3c4; WORD $0xa0db // vblendvpd ymm3, ymm9, ymm3, ymm10
+ LONG $0x546f7ec5; WORD $0x40c7 // vmovdqu ymm10, yword [rdi + 8*rax + 64]
+ LONG $0x372d62c4; BYTE $0xda // vpcmpgtq ymm11, ymm10, ymm2
+ LONG $0x4b2de3c4; WORD $0xb0d2 // vblendvpd ymm2, ymm10, ymm2, ymm11
+ LONG $0x5c6f7ec5; WORD $0x60c7 // vmovdqu ymm11, yword [rdi + 8*rax + 96]
+ LONG $0x372562c4; BYTE $0xe1 // vpcmpgtq ymm12, ymm11, ymm1
+ LONG $0x4b25e3c4; WORD $0xc0c9 // vblendvpd ymm1, ymm11, ymm1, ymm12
+ LONG $0x375d42c4; BYTE $0xe0 // vpcmpgtq ymm12, ymm4, ymm8
+ LONG $0x4b3de3c4; WORD $0xc0e4 // vblendvpd ymm4, ymm8, ymm4, ymm12
+ LONG $0x374542c4; BYTE $0xc1 // vpcmpgtq ymm8, ymm7, ymm9
+ LONG $0x4b35e3c4; WORD $0x80ff // vblendvpd ymm7, ymm9, ymm7, ymm8
+ LONG $0x374d42c4; BYTE $0xc2 // vpcmpgtq ymm8, ymm6, ymm10
+ LONG $0x4b2de3c4; WORD $0x80f6 // vblendvpd ymm6, ymm10, ymm6, ymm8
+ LONG $0x375542c4; BYTE $0xc3 // vpcmpgtq ymm8, ymm5, ymm11
+ LONG $0x4b25e3c4; WORD $0x80ed // vblendvpd ymm5, ymm11, ymm5, ymm8
+ LONG $0x10c08348 // add rax, 16
+ WORD $0x3949; BYTE $0xc1 // cmp r9, rax
+ JNE LBB6_5
+ LONG $0x375d62c4; BYTE $0xc7 // vpcmpgtq ymm8, ymm4, ymm7
+ LONG $0x4b45e3c4; WORD $0x80e4 // vblendvpd ymm4, ymm7, ymm4, ymm8
+ LONG $0x375de2c4; BYTE $0xfe // vpcmpgtq ymm7, ymm4, ymm6
+ LONG $0x4b4de3c4; WORD $0x70e4 // vblendvpd ymm4, ymm6, ymm4, ymm7
+ LONG $0x375de2c4; BYTE $0xf5 // vpcmpgtq ymm6, ymm4, ymm5
+ LONG $0x4b55e3c4; WORD $0x60e4 // vblendvpd ymm4, ymm5, ymm4, ymm6
+ LONG $0x197de3c4; WORD $0x01e5 // vextractf128 xmm5, ymm4, 1
+ LONG $0x3759e2c4; BYTE $0xf5 // vpcmpgtq xmm6, xmm4, xmm5
+ LONG $0x4b51e3c4; WORD $0x60e4 // vblendvpd xmm4, xmm5, xmm4, xmm6
+ LONG $0x0479e3c4; WORD $0x4eec // vpermilps xmm5, xmm4, 78
+ LONG $0x3759e2c4; BYTE $0xf5 // vpcmpgtq xmm6, xmm4, xmm5
+ LONG $0x4b51e3c4; WORD $0x60e4 // vblendvpd xmm4, xmm5, xmm4, xmm6
+ LONG $0x7ef9c1c4; BYTE $0xe2 // vmovq r10, xmm4
+ LONG $0x3765e2c4; BYTE $0xe0 // vpcmpgtq ymm4, ymm3, ymm0
+ LONG $0x4b65e3c4; WORD $0x40c0 // vblendvpd ymm0, ymm3, ymm0, ymm4
+ LONG $0x376de2c4; BYTE $0xd8 // vpcmpgtq ymm3, ymm2, ymm0
+ LONG $0x4b6de3c4; WORD $0x30c0 // vblendvpd ymm0, ymm2, ymm0, ymm3
+ LONG $0x3775e2c4; BYTE $0xd0 // vpcmpgtq ymm2, ymm1, ymm0
+ LONG $0x4b75e3c4; WORD $0x20c0 // vblendvpd ymm0, ymm1, ymm0, ymm2
+ LONG $0x197de3c4; WORD $0x01c1 // vextractf128 xmm1, ymm0, 1
+ LONG $0x3771e2c4; BYTE $0xd0 // vpcmpgtq xmm2, xmm1, xmm0
+ LONG $0x4b71e3c4; WORD $0x20c0 // vblendvpd xmm0, xmm1, xmm0, xmm2
+ LONG $0x0479e3c4; WORD $0x4ec8 // vpermilps xmm1, xmm0, 78
+ LONG $0x3771e2c4; BYTE $0xd0 // vpcmpgtq xmm2, xmm1, xmm0
+ LONG $0x4b71e3c4; WORD $0x20c0 // vblendvpd xmm0, xmm1, xmm0, xmm2
+ LONG $0x7ef9e1c4; BYTE $0xc0 // vmovq rax, xmm0
+ WORD $0x894c; BYTE $0xd6 // mov rsi, r10
+ WORD $0x394d; BYTE $0xc1 // cmp r9, r8
+ JE LBB6_8
+
+LBB6_7:
+ LONG $0xcf348b4a // mov rsi, qword [rdi + 8*r9]
+ WORD $0x3948; BYTE $0xf0 // cmp rax, rsi
+ LONG $0xc64f0f48 // cmovg rax, rsi
+ WORD $0x3949; BYTE $0xf2 // cmp r10, rsi
+ LONG $0xf24d0f49 // cmovge rsi, r10
+ LONG $0x01c18349 // add r9, 1
+ WORD $0x8949; BYTE $0xf2 // mov r10, rsi
+ WORD $0x394d; BYTE $0xc8 // cmp r8, r9
+ JNE LBB6_7
+
+LBB6_8:
+ WORD $0x8948; BYTE $0x31 // mov qword [rcx], rsi
+ WORD $0x8948; BYTE $0x02 // mov qword [rdx], rax
+ VZEROUPPER
+ RET
+
+DATA LCDATA5<>+0x000(SB)/8, $0x8000000000000000
+GLOBL LCDATA5<>(SB), 8, $8
+
+TEXT ·_uint64_max_min_avx2(SB), $0-32
+
+ MOVQ values+0(FP), DI
+ MOVQ length+8(FP), SI
+ MOVQ minout+16(FP), DX
+ MOVQ maxout+24(FP), CX
+ LEAQ LCDATA5<>(SB), BP
+
+ WORD $0xf685 // test esi, esi
+ JLE LBB7_1
+ WORD $0x8941; BYTE $0xf0 // mov r8d, esi
+ WORD $0xfe83; BYTE $0x0f // cmp esi, 15
+ JA LBB7_4
+ LONG $0xffc0c748; WORD $0xffff; BYTE $0xff // mov rax, -1
+ WORD $0x3145; BYTE $0xc9 // xor r9d, r9d
+ WORD $0x3145; BYTE $0xd2 // xor r10d, r10d
+ JMP LBB7_7
+
+LBB7_1:
+ LONG $0xffc0c748; WORD $0xffff; BYTE $0xff // mov rax, -1
+ WORD $0xf631 // xor esi, esi
+ JMP LBB7_8
+
+LBB7_4:
+ WORD $0x8945; BYTE $0xc1 // mov r9d, r8d
+ LONG $0xf0e18341 // and r9d, -16
+ LONG $0xedefd1c5 // vpxor xmm5, xmm5, xmm5
+ LONG $0xc976f5c5 // vpcmpeqd ymm1, ymm1, ymm1
+ WORD $0xc031 // xor eax, eax
+ LONG $0x597de2c4; WORD $0x0045 // vpbroadcastq ymm0, qword 0[rbp] /* [rip + .LCPI7_0] */
+ LONG $0xe476ddc5 // vpcmpeqd ymm4, ymm4, ymm4
+ LONG $0xdb76e5c5 // vpcmpeqd ymm3, ymm3, ymm3
+ LONG $0xd276edc5 // vpcmpeqd ymm2, ymm2, ymm2
+ LONG $0xef3941c4; BYTE $0xc0 // vpxor xmm8, xmm8, xmm8
+ LONG $0xffefc1c5 // vpxor xmm7, xmm7, xmm7
+ LONG $0xf6efc9c5 // vpxor xmm6, xmm6, xmm6
+
+LBB7_5:
+ LONG $0x0c6f7ec5; BYTE $0xc7 // vmovdqu ymm9, yword [rdi + 8*rax]
+ LONG $0xd0ef75c5 // vpxor ymm10, ymm1, ymm0
+ LONG $0xd8ef35c5 // vpxor ymm11, ymm9, ymm0
+ LONG $0x372542c4; BYTE $0xd2 // vpcmpgtq ymm10, ymm11, ymm10
+ LONG $0x4b35e3c4; WORD $0xa0c9 // vblendvpd ymm1, ymm9, ymm1, ymm10
+ LONG $0xd0ef55c5 // vpxor ymm10, ymm5, ymm0
+ LONG $0x372d42c4; BYTE $0xd3 // vpcmpgtq ymm10, ymm10, ymm11
+ LONG $0x4b35e3c4; WORD $0xa0ed // vblendvpd ymm5, ymm9, ymm5, ymm10
+ LONG $0x4c6f7ec5; WORD $0x20c7 // vmovdqu ymm9, yword [rdi + 8*rax + 32]
+ LONG $0xd0ef5dc5 // vpxor ymm10, ymm4, ymm0
+ LONG $0xd8ef35c5 // vpxor ymm11, ymm9, ymm0
+ LONG $0x372542c4; BYTE $0xd2 // vpcmpgtq ymm10, ymm11, ymm10
+ LONG $0x4b35e3c4; WORD $0xa0e4 // vblendvpd ymm4, ymm9, ymm4, ymm10
+ LONG $0xd0ef3dc5 // vpxor ymm10, ymm8, ymm0
+ LONG $0x372d42c4; BYTE $0xd3 // vpcmpgtq ymm10, ymm10, ymm11
+ LONG $0x5c6f7ec5; WORD $0x40c7 // vmovdqu ymm11, yword [rdi + 8*rax + 64]
+ LONG $0x4b3543c4; WORD $0xa0c0 // vblendvpd ymm8, ymm9, ymm8, ymm10
+ LONG $0xc8ef65c5 // vpxor ymm9, ymm3, ymm0
+ LONG $0xd0ef25c5 // vpxor ymm10, ymm11, ymm0
+ LONG $0x372d42c4; BYTE $0xc9 // vpcmpgtq ymm9, ymm10, ymm9
+ LONG $0x4b25e3c4; WORD $0x90db // vblendvpd ymm3, ymm11, ymm3, ymm9
+ LONG $0xc8ef45c5 // vpxor ymm9, ymm7, ymm0
+ LONG $0x373542c4; BYTE $0xca // vpcmpgtq ymm9, ymm9, ymm10
+ LONG $0x4b25e3c4; WORD $0x90ff // vblendvpd ymm7, ymm11, ymm7, ymm9
+ LONG $0x4c6f7ec5; WORD $0x60c7 // vmovdqu ymm9, yword [rdi + 8*rax + 96]
+ LONG $0xd0ef6dc5 // vpxor ymm10, ymm2, ymm0
+ LONG $0xd8ef35c5 // vpxor ymm11, ymm9, ymm0
+ LONG $0x372542c4; BYTE $0xd2 // vpcmpgtq ymm10, ymm11, ymm10
+ LONG $0x4b35e3c4; WORD $0xa0d2 // vblendvpd ymm2, ymm9, ymm2, ymm10
+ LONG $0xd0ef4dc5 // vpxor ymm10, ymm6, ymm0
+ LONG $0x372d42c4; BYTE $0xd3 // vpcmpgtq ymm10, ymm10, ymm11
+ LONG $0x4b35e3c4; WORD $0xa0f6 // vblendvpd ymm6, ymm9, ymm6, ymm10
+ LONG $0x10c08348 // add rax, 16
+ WORD $0x3949; BYTE $0xc1 // cmp r9, rax
+ JNE LBB7_5
+ LONG $0xc8ef3dc5 // vpxor ymm9, ymm8, ymm0
+ LONG $0xd0ef55c5 // vpxor ymm10, ymm5, ymm0
+ LONG $0x372d42c4; BYTE $0xc9 // vpcmpgtq ymm9, ymm10, ymm9
+ LONG $0x4b3de3c4; WORD $0x90ed // vblendvpd ymm5, ymm8, ymm5, ymm9
+ LONG $0xc05755c5 // vxorpd ymm8, ymm5, ymm0
+ LONG $0xc8ef45c5 // vpxor ymm9, ymm7, ymm0
+ LONG $0x373d42c4; BYTE $0xc1 // vpcmpgtq ymm8, ymm8, ymm9
+ LONG $0x4b45e3c4; WORD $0x80ed // vblendvpd ymm5, ymm7, ymm5, ymm8
+ LONG $0xf857d5c5 // vxorpd ymm7, ymm5, ymm0
+ LONG $0xc0ef4dc5 // vpxor ymm8, ymm6, ymm0
+ LONG $0x3745c2c4; BYTE $0xf8 // vpcmpgtq ymm7, ymm7, ymm8
+ LONG $0x4b4de3c4; WORD $0x70ed // vblendvpd ymm5, ymm6, ymm5, ymm7
+ LONG $0x197de3c4; WORD $0x01ee // vextractf128 xmm6, ymm5, 1
+ LONG $0xc05749c5 // vxorpd xmm8, xmm6, xmm0
+ LONG $0xf857d1c5 // vxorpd xmm7, xmm5, xmm0
+ LONG $0x3741c2c4; BYTE $0xf8 // vpcmpgtq xmm7, xmm7, xmm8
+ LONG $0x4b49e3c4; WORD $0x70ed // vblendvpd xmm5, xmm6, xmm5, xmm7
+ LONG $0x0479e3c4; WORD $0x4ef5 // vpermilps xmm6, xmm5, 78
+ LONG $0xc05751c5 // vxorpd xmm8, xmm5, xmm0
+ LONG $0xf857c9c5 // vxorpd xmm7, xmm6, xmm0
+ LONG $0x3739e2c4; BYTE $0xff // vpcmpgtq xmm7, xmm8, xmm7
+ LONG $0x4b49e3c4; WORD $0x70ed // vblendvpd xmm5, xmm6, xmm5, xmm7
+ LONG $0xf0eff5c5 // vpxor ymm6, ymm1, ymm0
+ LONG $0xf8efddc5 // vpxor ymm7, ymm4, ymm0
+ LONG $0x3745e2c4; BYTE $0xf6 // vpcmpgtq ymm6, ymm7, ymm6
+ LONG $0x4b5de3c4; WORD $0x60c9 // vblendvpd ymm1, ymm4, ymm1, ymm6
+ LONG $0xe057f5c5 // vxorpd ymm4, ymm1, ymm0
+ LONG $0xf0efe5c5 // vpxor ymm6, ymm3, ymm0
+ LONG $0x374de2c4; BYTE $0xe4 // vpcmpgtq ymm4, ymm6, ymm4
+ LONG $0x4b65e3c4; WORD $0x40c9 // vblendvpd ymm1, ymm3, ymm1, ymm4
+ LONG $0x7ef9c1c4; BYTE $0xea // vmovq r10, xmm5
+ LONG $0xd857f5c5 // vxorpd ymm3, ymm1, ymm0
+ LONG $0xe0efedc5 // vpxor ymm4, ymm2, ymm0
+ LONG $0x375de2c4; BYTE $0xdb // vpcmpgtq ymm3, ymm4, ymm3
+ LONG $0x4b6de3c4; WORD $0x30c9 // vblendvpd ymm1, ymm2, ymm1, ymm3
+ LONG $0x197de3c4; WORD $0x01ca // vextractf128 xmm2, ymm1, 1
+ LONG $0xd857f1c5 // vxorpd xmm3, xmm1, xmm0
+ LONG $0xe057e9c5 // vxorpd xmm4, xmm2, xmm0
+ LONG $0x3759e2c4; BYTE $0xdb // vpcmpgtq xmm3, xmm4, xmm3
+ LONG $0x4b69e3c4; WORD $0x30c9 // vblendvpd xmm1, xmm2, xmm1, xmm3
+ LONG $0x0479e3c4; WORD $0x4ed1 // vpermilps xmm2, xmm1, 78
+ LONG $0xd857f1c5 // vxorpd xmm3, xmm1, xmm0
+ LONG $0xc057e9c5 // vxorpd xmm0, xmm2, xmm0
+ LONG $0x3779e2c4; BYTE $0xc3 // vpcmpgtq xmm0, xmm0, xmm3
+ LONG $0x4b69e3c4; WORD $0x00c1 // vblendvpd xmm0, xmm2, xmm1, xmm0
+ LONG $0x7ef9e1c4; BYTE $0xc0 // vmovq rax, xmm0
+ WORD $0x894c; BYTE $0xd6 // mov rsi, r10
+ WORD $0x394d; BYTE $0xc1 // cmp r9, r8
+ JE LBB7_8
+
+LBB7_7:
+ LONG $0xcf348b4a // mov rsi, qword [rdi + 8*r9]
+ WORD $0x3948; BYTE $0xf0 // cmp rax, rsi
+ LONG $0xc6430f48 // cmovae rax, rsi
+ WORD $0x3949; BYTE $0xf2 // cmp r10, rsi
+ LONG $0xf2470f49 // cmova rsi, r10
+ LONG $0x01c18349 // add r9, 1
+ WORD $0x8949; BYTE $0xf2 // mov r10, rsi
+ WORD $0x394d; BYTE $0xc8 // cmp r8, r9
+ JNE LBB7_7
+
+LBB7_8:
+ WORD $0x8948; BYTE $0x31 // mov qword [rcx], rsi
+ WORD $0x8948; BYTE $0x02 // mov qword [rdx], rax
+ VZEROUPPER
+ RET
diff --git a/vendor/github.com/apache/arrow/go/v14/internal/utils/min_max_neon_arm64.go b/vendor/github.com/apache/arrow/go/v14/internal/utils/min_max_neon_arm64.go
new file mode 100644
index 000000000..f9d3c44e3
--- /dev/null
+++ b/vendor/github.com/apache/arrow/go/v14/internal/utils/min_max_neon_arm64.go
@@ -0,0 +1,56 @@
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements. See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership. The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License. You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+//go:build !noasm
+
+package utils
+
+import "unsafe"
+
+// This file contains convenience functions for utilizing Arm64 Neon intrinsics to quickly
+// and efficiently get the min and max from an integral slice.
+
+//go:noescape
+func _int32_max_min_neon(values unsafe.Pointer, length int, minout, maxout unsafe.Pointer)
+
+func int32MaxMinNEON(values []int32) (min, max int32) {
+ _int32_max_min_neon(unsafe.Pointer(&values[0]), len(values), unsafe.Pointer(&min), unsafe.Pointer(&max))
+ return
+}
+
+//go:noescape
+func _uint32_max_min_neon(values unsafe.Pointer, length int, minout, maxout unsafe.Pointer)
+
+func uint32MaxMinNEON(values []uint32) (min, max uint32) {
+ _uint32_max_min_neon(unsafe.Pointer(&values[0]), len(values), unsafe.Pointer(&min), unsafe.Pointer(&max))
+ return
+}
+
+//go:noescape
+func _int64_max_min_neon(values unsafe.Pointer, length int, minout, maxout unsafe.Pointer)
+
+func int64MaxMinNEON(values []int64) (min, max int64) {
+ _int64_max_min_neon(unsafe.Pointer(&values[0]), len(values), unsafe.Pointer(&min), unsafe.Pointer(&max))
+ return
+}
+
+//go:noescape
+func _uint64_max_min_neon(values unsafe.Pointer, length int, minout, maxout unsafe.Pointer)
+
+func uint64MaxMinNEON(values []uint64) (min, max uint64) {
+ _uint64_max_min_neon(unsafe.Pointer(&values[0]), len(values), unsafe.Pointer(&min), unsafe.Pointer(&max))
+ return
+}
diff --git a/vendor/github.com/apache/arrow/go/v14/internal/utils/min_max_neon_arm64.s b/vendor/github.com/apache/arrow/go/v14/internal/utils/min_max_neon_arm64.s
new file mode 100644
index 000000000..b679bb6e3
--- /dev/null
+++ b/vendor/github.com/apache/arrow/go/v14/internal/utils/min_max_neon_arm64.s
@@ -0,0 +1,324 @@
+//+build !noasm !appengine
+
+// ARROW-15336
+// (C2GOASM doesn't work correctly for Arm64)
+// Partly GENERATED BY asm2plan9s.
+
+
+// func _int32_max_min_neon(values unsafe.Pointer, length int, minout, maxout unsafe.Pointer)
+TEXT ·_int32_max_min_neon(SB), $0-32
+
+ MOVD values+0(FP), R0
+ MOVD length+8(FP), R1
+ MOVD minout+16(FP), R2
+ MOVD maxout+24(FP), R3
+
+ WORD $0xa9bf7bfd // stp x29, x30, [sp, #-16]!
+ WORD $0x7100043f // cmp w1, #1
+ WORD $0x910003fd // mov x29, sp
+ BLT LBB0_3
+
+ WORD $0x71000c3f // cmp w1, #3
+ WORD $0x2a0103e8 // mov w8, w1
+ BHI LBB0_4
+
+ WORD $0xaa1f03e9 // mov x9, xzr
+ WORD $0x52b0000b // mov w11, #-2147483648
+ WORD $0x12b0000a // mov w10, #2147483647
+ JMP LBB0_7
+LBB0_3:
+ WORD $0x12b0000a // mov w10, #2147483647
+ WORD $0x52b0000b // mov w11, #-2147483648
+ WORD $0xb900006b // str w11, [x3]
+ WORD $0xb900004a // str w10, [x2]
+ WORD $0xa8c17bfd // ldp x29, x30, [sp], #16
+ RET
+LBB0_4:
+ WORD $0x927e7509 // and x9, x8, #0xfffffffc
+ WORD $0x9100200a // add x10, x0, #8
+ WORD $0x0f046402 // movi v2.2s, #128, lsl #24
+ WORD $0x2f046400 // mvni v0.2s, #128, lsl #24
+ WORD $0x2f046401 // mvni v1.2s, #128, lsl #24
+ WORD $0xaa0903eb // mov x11, x9
+ WORD $0x0f046403 // movi v3.2s, #128, lsl #24
+LBB0_5:
+ WORD $0x6d7f9544 // ldp d4, d5, [x10, #-8]
+ WORD $0xf100116b // subs x11, x11, #4
+ WORD $0x9100414a // add x10, x10, #16
+ WORD $0x0ea46c00 // smin v0.2s, v0.2s, v4.2s
+ WORD $0x0ea56c21 // smin v1.2s, v1.2s, v5.2s
+ WORD $0x0ea46442 // smax v2.2s, v2.2s, v4.2s
+ WORD $0x0ea56463 // smax v3.2s, v3.2s, v5.2s
+ BNE LBB0_5
+
+ WORD $0x0ea36442 // smax v2.2s, v2.2s, v3.2s
+ WORD $0x0ea16c00 // smin v0.2s, v0.2s, v1.2s
+ WORD $0x0e0c0441 // dup v1.2s, v2.s[1]
+ WORD $0x0e0c0403 // dup v3.2s, v0.s[1]
+ WORD $0x0ea16441 // smax v1.2s, v2.2s, v1.2s
+ WORD $0x0ea36c00 // smin v0.2s, v0.2s, v3.2s
+ WORD $0xeb08013f // cmp x9, x8
+ WORD $0x1e26002b // fmov w11, s1
+ WORD $0x1e26000a // fmov w10, s0
+ BEQ LBB0_9
+LBB0_7:
+ WORD $0x8b09080c // add x12, x0, x9, lsl #2
+ WORD $0xcb090108 // sub x8, x8, x9
+LBB0_8:
+ WORD $0xb8404589 // ldr w9, [x12], #4
+ WORD $0x6b09015f // cmp w10, w9
+ WORD $0x1a89b14a // csel w10, w10, w9, lt
+ WORD $0x6b09017f // cmp w11, w9
+ WORD $0x1a89c16b // csel w11, w11, w9, gt
+ WORD $0xf1000508 // subs x8, x8, #1
+ BNE LBB0_8
+LBB0_9:
+ WORD $0xb900006b // str w11, [x3]
+ WORD $0xb900004a // str w10, [x2]
+ WORD $0xa8c17bfd // ldp x29, x30, [sp], #16
+ RET
+
+// func _uint32_max_min_neon(values unsafe.Pointer, length int, minout, maxout unsafe.Pointer)
+TEXT ·_uint32_max_min_neon(SB), $0-32
+
+ MOVD values+0(FP), R0
+ MOVD length+8(FP), R1
+ MOVD minout+16(FP), R2
+ MOVD maxout+24(FP), R3
+
+ WORD $0xa9bf7bfd // stp x29, x30, [sp, #-16]!
+ WORD $0x7100043f // cmp w1, #1
+ WORD $0x910003fd // mov x29, sp
+ BLT LBB1_3
+
+ WORD $0x71000c3f // cmp w1, #3
+ WORD $0x2a0103e8 // mov w8, w1
+ BHI LBB1_4
+
+ WORD $0xaa1f03e9 // mov x9, xzr
+ WORD $0x2a1f03ea // mov w10, wzr
+ WORD $0x1280000b // mov w11, #-1
+ JMP LBB1_7
+LBB1_3:
+ WORD $0x2a1f03ea // mov w10, wzr
+ WORD $0x1280000b // mov w11, #-1
+ WORD $0xb900006a // str w10, [x3]
+ WORD $0xb900004b // str w11, [x2]
+ WORD $0xa8c17bfd // ldp x29, x30, [sp], #16
+ RET
+LBB1_4:
+ WORD $0x927e7509 // and x9, x8, #0xfffffffc
+ WORD $0x6f00e401 // movi v1.2d, #0000000000000000
+ WORD $0x6f07e7e0 // movi v0.2d, #0xffffffffffffffff
+ WORD $0x9100200a // add x10, x0, #8
+ WORD $0x6f07e7e2 // movi v2.2d, #0xffffffffffffffff
+ WORD $0xaa0903eb // mov x11, x9
+ WORD $0x6f00e403 // movi v3.2d, #0000000000000000
+LBB1_5:
+ WORD $0x6d7f9544 // ldp d4, d5, [x10, #-8]
+ WORD $0xf100116b // subs x11, x11, #4
+ WORD $0x9100414a // add x10, x10, #16
+ WORD $0x2ea46c00 // umin v0.2s, v0.2s, v4.2s
+ WORD $0x2ea56c42 // umin v2.2s, v2.2s, v5.2s
+ WORD $0x2ea46421 // umax v1.2s, v1.2s, v4.2s
+ WORD $0x2ea56463 // umax v3.2s, v3.2s, v5.2s
+ BNE LBB1_5
+
+ WORD $0x2ea36421 // umax v1.2s, v1.2s, v3.2s
+ WORD $0x2ea26c00 // umin v0.2s, v0.2s, v2.2s
+ WORD $0x0e0c0422 // dup v2.2s, v1.s[1]
+ WORD $0x0e0c0403 // dup v3.2s, v0.s[1]
+ WORD $0x2ea26421 // umax v1.2s, v1.2s, v2.2s
+ WORD $0x2ea36c00 // umin v0.2s, v0.2s, v3.2s
+ WORD $0xeb08013f // cmp x9, x8
+ WORD $0x1e26002a // fmov w10, s1
+ WORD $0x1e26000b // fmov w11, s0
+ BEQ LBB1_9
+LBB1_7:
+ WORD $0x8b09080c // add x12, x0, x9, lsl #2
+ WORD $0xcb090108 // sub x8, x8, x9
+LBB1_8:
+ WORD $0xb8404589 // ldr w9, [x12], #4
+ WORD $0x6b09017f // cmp w11, w9
+ WORD $0x1a89316b // csel w11, w11, w9, lo
+ WORD $0x6b09015f // cmp w10, w9
+ WORD $0x1a89814a // csel w10, w10, w9, hi
+ WORD $0xf1000508 // subs x8, x8, #1
+ BNE LBB1_8
+LBB1_9:
+ WORD $0xb900006a // str w10, [x3]
+ WORD $0xb900004b // str w11, [x2]
+ WORD $0xa8c17bfd // ldp x29, x30, [sp], #16
+ RET
+
+// func _int64_max_min_neon(values unsafe.Pointer, length int, minout, maxout unsafe.Pointer)
+TEXT ·_int64_max_min_neon(SB), $0-32
+
+ MOVD values+0(FP), R0
+ MOVD length+8(FP), R1
+ MOVD minout+16(FP), R2
+ MOVD maxout+24(FP), R3
+
+ WORD $0xa9bf7bfd // stp x29, x30, [sp, #-16]!
+ WORD $0x7100043f // cmp w1, #1
+ WORD $0x910003fd // mov x29, sp
+ BLT LBB2_3
+
+ WORD $0x2a0103e8 // mov w8, w1
+ WORD $0xd2f0000b // mov x11, #-9223372036854775808
+ WORD $0x71000c3f // cmp w1, #3
+ WORD $0x92f0000a // mov x10, #9223372036854775807
+ BHI LBB2_4
+
+ WORD $0xaa1f03e9 // mov x9, xzr
+ JMP LBB2_7
+LBB2_3:
+ WORD $0x92f0000a // mov x10, #9223372036854775807
+ WORD $0xd2f0000b // mov x11, #-9223372036854775808
+ WORD $0xf900006b // str x11, [x3]
+ WORD $0xf900004a // str x10, [x2]
+ WORD $0xa8c17bfd // ldp x29, x30, [sp], #16
+ RET
+LBB2_4:
+ WORD $0x927e7509 // and x9, x8, #0xfffffffc
+ WORD $0x4e080d61 // dup v1.2d, x11
+ WORD $0x4e080d40 // dup v0.2d, x10
+ WORD $0x9100400a // add x10, x0, #16
+ WORD $0xaa0903eb // mov x11, x9
+ WORD $0x4ea01c02 // mov v2.16b, v0.16b
+ WORD $0x4ea11c23 // mov v3.16b, v1.16b
+LBB2_5:
+ WORD $0xad7f9544 // ldp q4, q5, [x10, #-16]
+ WORD $0x4ea31c66 // mov v6.16b, v3.16b
+ WORD $0x4ea11c27 // mov v7.16b, v1.16b
+ WORD $0x4ea21c43 // mov v3.16b, v2.16b
+ WORD $0x4ea01c01 // mov v1.16b, v0.16b
+ WORD $0x4ee03480 // cmgt v0.2d, v4.2d, v0.2d
+ WORD $0x4ee234a2 // cmgt v2.2d, v5.2d, v2.2d
+ WORD $0x6e641c20 // bsl v0.16b, v1.16b, v4.16b
+ WORD $0x4ee434e1 // cmgt v1.2d, v7.2d, v4.2d
+ WORD $0x6e651c62 // bsl v2.16b, v3.16b, v5.16b
+ WORD $0x4ee534c3 // cmgt v3.2d, v6.2d, v5.2d
+ WORD $0xf100116b // subs x11, x11, #4
+ WORD $0x6e641ce1 // bsl v1.16b, v7.16b, v4.16b
+ WORD $0x6e651cc3 // bsl v3.16b, v6.16b, v5.16b
+ WORD $0x9100814a // add x10, x10, #32
+ BNE LBB2_5
+
+ WORD $0x4ee33424 // cmgt v4.2d, v1.2d, v3.2d
+ WORD $0x4ee03445 // cmgt v5.2d, v2.2d, v0.2d
+ WORD $0x6e631c24 // bsl v4.16b, v1.16b, v3.16b
+ WORD $0x6e621c05 // bsl v5.16b, v0.16b, v2.16b
+ WORD $0x4e180480 // dup v0.2d, v4.d[1]
+ WORD $0x4e1804a1 // dup v1.2d, v5.d[1]
+ WORD $0x4ee03482 // cmgt v2.2d, v4.2d, v0.2d
+ WORD $0x4ee53423 // cmgt v3.2d, v1.2d, v5.2d
+ WORD $0x6e601c82 // bsl v2.16b, v4.16b, v0.16b
+ WORD $0x6e611ca3 // bsl v3.16b, v5.16b, v1.16b
+ WORD $0xeb08013f // cmp x9, x8
+ WORD $0x9e66004b // fmov x11, d2
+ WORD $0x9e66006a // fmov x10, d3
+ BEQ LBB2_9
+LBB2_7:
+ WORD $0x8b090c0c // add x12, x0, x9, lsl #3
+ WORD $0xcb090108 // sub x8, x8, x9
+LBB2_8:
+ WORD $0xf8408589 // ldr x9, [x12], #8
+ WORD $0xeb09015f // cmp x10, x9
+ WORD $0x9a89b14a // csel x10, x10, x9, lt
+ WORD $0xeb09017f // cmp x11, x9
+ WORD $0x9a89c16b // csel x11, x11, x9, gt
+ WORD $0xf1000508 // subs x8, x8, #1
+ BNE LBB2_8
+LBB2_9:
+ WORD $0xf900006b // str x11, [x3]
+ WORD $0xf900004a // str x10, [x2]
+ WORD $0xa8c17bfd // ldp x29, x30, [sp], #16
+ RET
+
+
+// func _uint64_max_min_neon(values unsafe.Pointer, length int, minout, maxout unsafe.Pointer)
+TEXT ·_uint64_max_min_neon(SB), $0-32
+
+ MOVD values+0(FP), R0
+ MOVD length+8(FP), R1
+ MOVD minout+16(FP), R2
+ MOVD maxout+24(FP), R3
+
+ WORD $0xa9bf7bfd // stp x29, x30, [sp, #-16]!
+ WORD $0x7100043f // cmp w1, #1
+ WORD $0x910003fd // mov x29, sp
+ BLT LBB3_3
+
+ WORD $0x71000c3f // cmp w1, #3
+ WORD $0x2a0103e8 // mov w8, w1
+ BHI LBB3_4
+
+ WORD $0xaa1f03e9 // mov x9, xzr
+ WORD $0xaa1f03ea // mov x10, xzr
+ WORD $0x9280000b // mov x11, #-1
+ JMP LBB3_7
+LBB3_3:
+ WORD $0xaa1f03ea // mov x10, xzr
+ WORD $0x9280000b // mov x11, #-1
+ WORD $0xf900006a // str x10, [x3]
+ WORD $0xf900004b // str x11, [x2]
+ WORD $0xa8c17bfd // ldp x29, x30, [sp], #16
+ RET
+LBB3_4:
+ WORD $0x927e7509 // and x9, x8, #0xfffffffc
+ WORD $0x9100400a // add x10, x0, #16
+ WORD $0x6f00e401 // movi v1.2d, #0000000000000000
+ WORD $0x6f07e7e0 // movi v0.2d, #0xffffffffffffffff
+ WORD $0x6f07e7e2 // movi v2.2d, #0xffffffffffffffff
+ WORD $0xaa0903eb // mov x11, x9
+ WORD $0x6f00e403 // movi v3.2d, #0000000000000000
+LBB3_5:
+ WORD $0xad7f9544 // ldp q4, q5, [x10, #-16]
+ WORD $0x4ea31c66 // mov v6.16b, v3.16b
+ WORD $0x4ea11c27 // mov v7.16b, v1.16b
+ WORD $0x4ea21c43 // mov v3.16b, v2.16b
+ WORD $0x4ea01c01 // mov v1.16b, v0.16b
+ WORD $0x6ee03480 // cmhi v0.2d, v4.2d, v0.2d
+ WORD $0x6ee234a2 // cmhi v2.2d, v5.2d, v2.2d
+ WORD $0x6e641c20 // bsl v0.16b, v1.16b, v4.16b
+ WORD $0x6ee434e1 // cmhi v1.2d, v7.2d, v4.2d
+ WORD $0x6e651c62 // bsl v2.16b, v3.16b, v5.16b
+ WORD $0x6ee534c3 // cmhi v3.2d, v6.2d, v5.2d
+ WORD $0xf100116b // subs x11, x11, #4
+ WORD $0x6e641ce1 // bsl v1.16b, v7.16b, v4.16b
+ WORD $0x6e651cc3 // bsl v3.16b, v6.16b, v5.16b
+ WORD $0x9100814a // add x10, x10, #32
+ BNE LBB3_5
+
+ WORD $0x6ee33424 // cmhi v4.2d, v1.2d, v3.2d
+ WORD $0x6ee03445 // cmhi v5.2d, v2.2d, v0.2d
+ WORD $0x6e631c24 // bsl v4.16b, v1.16b, v3.16b
+ WORD $0x6e621c05 // bsl v5.16b, v0.16b, v2.16b
+ WORD $0x4e180480 // dup v0.2d, v4.d[1]
+ WORD $0x4e1804a1 // dup v1.2d, v5.d[1]
+ WORD $0x6ee03482 // cmhi v2.2d, v4.2d, v0.2d
+ WORD $0x6ee53423 // cmhi v3.2d, v1.2d, v5.2d
+ WORD $0x6e601c82 // bsl v2.16b, v4.16b, v0.16b
+ WORD $0x6e611ca3 // bsl v3.16b, v5.16b, v1.16b
+ WORD $0xeb08013f // cmp x9, x8
+ WORD $0x9e66004a // fmov x10, d2
+ WORD $0x9e66006b // fmov x11, d3
+ BEQ LBB3_9
+LBB3_7:
+ WORD $0x8b090c0c // add x12, x0, x9, lsl #3
+ WORD $0xcb090108 // sub x8, x8, x9
+LBB3_8:
+ WORD $0xf8408589 // ldr x9, [x12], #8
+ WORD $0xeb09017f // cmp x11, x9
+ WORD $0x9a89316b // csel x11, x11, x9, lo
+ WORD $0xeb09015f // cmp x10, x9
+ WORD $0x9a89814a // csel x10, x10, x9, hi
+ WORD $0xf1000508 // subs x8, x8, #1
+ BNE LBB3_8
+LBB3_9:
+ WORD $0xf900006a // str x10, [x3]
+ WORD $0xf900004b // str x11, [x2]
+ WORD $0xa8c17bfd // ldp x29, x30, [sp], #16
+ RET
+
diff --git a/vendor/github.com/apache/arrow/go/v14/internal/utils/min_max_noasm.go b/vendor/github.com/apache/arrow/go/v14/internal/utils/min_max_noasm.go
new file mode 100644
index 000000000..19c24b590
--- /dev/null
+++ b/vendor/github.com/apache/arrow/go/v14/internal/utils/min_max_noasm.go
@@ -0,0 +1,31 @@
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements. See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership. The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License. You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+//go:build noasm
+
+package utils
+
+// if building with the 'noasm' tag, then point to the pure go implementations
+func init() {
+ minmaxFuncs.i8 = int8MinMax
+ minmaxFuncs.ui8 = uint8MinMax
+ minmaxFuncs.i16 = int16MinMax
+ minmaxFuncs.ui16 = uint16MinMax
+ minmaxFuncs.i32 = int32MinMax
+ minmaxFuncs.ui32 = uint32MinMax
+ minmaxFuncs.i64 = int64MinMax
+ minmaxFuncs.ui64 = uint64MinMax
+}
diff --git a/vendor/github.com/apache/arrow/go/v14/internal/utils/min_max_ppc64le.go b/vendor/github.com/apache/arrow/go/v14/internal/utils/min_max_ppc64le.go
new file mode 100644
index 000000000..ffd2db006
--- /dev/null
+++ b/vendor/github.com/apache/arrow/go/v14/internal/utils/min_max_ppc64le.go
@@ -0,0 +1,30 @@
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements. See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership. The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License. You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+//go:build !noasm
+
+package utils
+
+func init() {
+ minmaxFuncs.i8 = int8MinMax
+ minmaxFuncs.ui8 = uint8MinMax
+ minmaxFuncs.i16 = int16MinMax
+ minmaxFuncs.ui16 = uint16MinMax
+ minmaxFuncs.i32 = int32MinMax
+ minmaxFuncs.ui32 = uint32MinMax
+ minmaxFuncs.i64 = int64MinMax
+ minmaxFuncs.ui64 = uint64MinMax
+}
diff --git a/vendor/github.com/apache/arrow/go/v14/internal/utils/min_max_s390x.go b/vendor/github.com/apache/arrow/go/v14/internal/utils/min_max_s390x.go
new file mode 100644
index 000000000..ffd2db006
--- /dev/null
+++ b/vendor/github.com/apache/arrow/go/v14/internal/utils/min_max_s390x.go
@@ -0,0 +1,30 @@
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements. See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership. The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License. You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+//go:build !noasm
+
+package utils
+
+func init() {
+ minmaxFuncs.i8 = int8MinMax
+ minmaxFuncs.ui8 = uint8MinMax
+ minmaxFuncs.i16 = int16MinMax
+ minmaxFuncs.ui16 = uint16MinMax
+ minmaxFuncs.i32 = int32MinMax
+ minmaxFuncs.ui32 = uint32MinMax
+ minmaxFuncs.i64 = int64MinMax
+ minmaxFuncs.ui64 = uint64MinMax
+}
diff --git a/vendor/github.com/apache/arrow/go/v14/internal/utils/min_max_sse4_amd64.go b/vendor/github.com/apache/arrow/go/v14/internal/utils/min_max_sse4_amd64.go
new file mode 100644
index 000000000..1e12a8d17
--- /dev/null
+++ b/vendor/github.com/apache/arrow/go/v14/internal/utils/min_max_sse4_amd64.go
@@ -0,0 +1,88 @@
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements. See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership. The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License. You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+//go:build !noasm
+
+package utils
+
+import "unsafe"
+
+// This file contains convenience functions for utilizing SSE4 intrinsics to quickly
+// and efficiently get the min and max from an integral slice.
+
+//go:noescape
+func _int8_max_min_sse4(values unsafe.Pointer, length int, minout, maxout unsafe.Pointer)
+
+func int8MaxMinSSE4(values []int8) (min, max int8) {
+ _int8_max_min_sse4(unsafe.Pointer(&values[0]), len(values), unsafe.Pointer(&min), unsafe.Pointer(&max))
+ return
+}
+
+//go:noescape
+func _uint8_max_min_sse4(values unsafe.Pointer, length int, minout, maxout unsafe.Pointer)
+
+func uint8MaxMinSSE4(values []uint8) (min, max uint8) {
+ _uint8_max_min_sse4(unsafe.Pointer(&values[0]), len(values), unsafe.Pointer(&min), unsafe.Pointer(&max))
+ return
+}
+
+//go:noescape
+func _int16_max_min_sse4(values unsafe.Pointer, length int, minout, maxout unsafe.Pointer)
+
+func int16MaxMinSSE4(values []int16) (min, max int16) {
+ _int16_max_min_sse4(unsafe.Pointer(&values[0]), len(values), unsafe.Pointer(&min), unsafe.Pointer(&max))
+ return
+}
+
+//go:noescape
+func _uint16_max_min_sse4(values unsafe.Pointer, length int, minout, maxout unsafe.Pointer)
+
+func uint16MaxMinSSE4(values []uint16) (min, max uint16) {
+ _uint16_max_min_sse4(unsafe.Pointer(&values[0]), len(values), unsafe.Pointer(&min), unsafe.Pointer(&max))
+ return
+}
+
+//go:noescape
+func _int32_max_min_sse4(values unsafe.Pointer, length int, minout, maxout unsafe.Pointer)
+
+func int32MaxMinSSE4(values []int32) (min, max int32) {
+ _int32_max_min_sse4(unsafe.Pointer(&values[0]), len(values), unsafe.Pointer(&min), unsafe.Pointer(&max))
+ return
+}
+
+//go:noescape
+func _uint32_max_min_sse4(values unsafe.Pointer, length int, minout, maxout unsafe.Pointer)
+
+func uint32MaxMinSSE4(values []uint32) (min, max uint32) {
+ _uint32_max_min_sse4(unsafe.Pointer(&values[0]), len(values), unsafe.Pointer(&min), unsafe.Pointer(&max))
+ return
+}
+
+//go:noescape
+func _int64_max_min_sse4(values unsafe.Pointer, length int, minout, maxout unsafe.Pointer)
+
+func int64MaxMinSSE4(values []int64) (min, max int64) {
+ _int64_max_min_sse4(unsafe.Pointer(&values[0]), len(values), unsafe.Pointer(&min), unsafe.Pointer(&max))
+ return
+}
+
+//go:noescape
+func _uint64_max_min_sse4(values unsafe.Pointer, length int, minout, maxout unsafe.Pointer)
+
+func uint64MaxMinSSE4(values []uint64) (min, max uint64) {
+ _uint64_max_min_sse4(unsafe.Pointer(&values[0]), len(values), unsafe.Pointer(&min), unsafe.Pointer(&max))
+ return
+}
diff --git a/vendor/github.com/apache/arrow/go/v14/internal/utils/min_max_sse4_amd64.s b/vendor/github.com/apache/arrow/go/v14/internal/utils/min_max_sse4_amd64.s
new file mode 100644
index 000000000..8f1eccf60
--- /dev/null
+++ b/vendor/github.com/apache/arrow/go/v14/internal/utils/min_max_sse4_amd64.s
@@ -0,0 +1,1044 @@
+//+build !noasm !appengine
+// AUTO-GENERATED BY C2GOASM -- DO NOT EDIT
+
+DATA LCDATA1<>+0x000(SB)/8, $0x8080808080808080
+DATA LCDATA1<>+0x008(SB)/8, $0x8080808080808080
+DATA LCDATA1<>+0x010(SB)/8, $0x7f7f7f7f7f7f7f7f
+DATA LCDATA1<>+0x018(SB)/8, $0x7f7f7f7f7f7f7f7f
+GLOBL LCDATA1<>(SB), 8, $32
+
+TEXT ·_int8_max_min_sse4(SB), $0-32
+
+ MOVQ values+0(FP), DI
+ MOVQ length+8(FP), SI
+ MOVQ minout+16(FP), DX
+ MOVQ maxout+24(FP), CX
+ LEAQ LCDATA1<>(SB), BP
+
+ WORD $0xf685 // test esi, esi
+ JLE LBB0_1
+ WORD $0x8941; BYTE $0xf1 // mov r9d, esi
+ WORD $0xfe83; BYTE $0x1f // cmp esi, 31
+ JA LBB0_4
+ WORD $0xb041; BYTE $0x80 // mov r8b, -128
+ WORD $0xb640; BYTE $0x7f // mov sil, 127
+ WORD $0x3145; BYTE $0xdb // xor r11d, r11d
+ JMP LBB0_11
+
+LBB0_1:
+ WORD $0xb640; BYTE $0x7f // mov sil, 127
+ WORD $0xb041; BYTE $0x80 // mov r8b, -128
+ JMP LBB0_12
+
+LBB0_4:
+ WORD $0x8945; BYTE $0xcb // mov r11d, r9d
+ LONG $0xe0e38341 // and r11d, -32
+ LONG $0xe0438d49 // lea rax, [r11 - 32]
+ WORD $0x8949; BYTE $0xc0 // mov r8, rax
+ LONG $0x05e8c149 // shr r8, 5
+ LONG $0x01c08349 // add r8, 1
+ WORD $0x8548; BYTE $0xc0 // test rax, rax
+ JE LBB0_5
+ WORD $0x894d; BYTE $0xc2 // mov r10, r8
+ LONG $0xfee28349 // and r10, -2
+ WORD $0xf749; BYTE $0xda // neg r10
+ LONG $0x4d6f0f66; BYTE $0x00 // movdqa xmm1, oword 0[rbp] /* [rip + .LCPI0_0] */
+ LONG $0x456f0f66; BYTE $0x10 // movdqa xmm0, oword 16[rbp] /* [rip + .LCPI0_1] */
+ WORD $0xc031 // xor eax, eax
+ LONG $0xd06f0f66 // movdqa xmm2, xmm0
+ LONG $0xd96f0f66 // movdqa xmm3, xmm1
+
+LBB0_7:
+ LONG $0x246f0ff3; BYTE $0x07 // movdqu xmm4, oword [rdi + rax]
+ LONG $0x6c6f0ff3; WORD $0x1007 // movdqu xmm5, oword [rdi + rax + 16]
+ LONG $0x746f0ff3; WORD $0x2007 // movdqu xmm6, oword [rdi + rax + 32]
+ LONG $0x7c6f0ff3; WORD $0x3007 // movdqu xmm7, oword [rdi + rax + 48]
+ LONG $0x38380f66; BYTE $0xc4 // pminsb xmm0, xmm4
+ LONG $0x38380f66; BYTE $0xd5 // pminsb xmm2, xmm5
+ LONG $0x3c380f66; BYTE $0xcc // pmaxsb xmm1, xmm4
+ LONG $0x3c380f66; BYTE $0xdd // pmaxsb xmm3, xmm5
+ LONG $0x38380f66; BYTE $0xc6 // pminsb xmm0, xmm6
+ LONG $0x38380f66; BYTE $0xd7 // pminsb xmm2, xmm7
+ LONG $0x3c380f66; BYTE $0xce // pmaxsb xmm1, xmm6
+ LONG $0x3c380f66; BYTE $0xdf // pmaxsb xmm3, xmm7
+ LONG $0x40c08348 // add rax, 64
+ LONG $0x02c28349 // add r10, 2
+ JNE LBB0_7
+ LONG $0x01c0f641 // test r8b, 1
+ JE LBB0_10
+
+LBB0_9:
+ LONG $0x246f0ff3; BYTE $0x07 // movdqu xmm4, oword [rdi + rax]
+ LONG $0x6c6f0ff3; WORD $0x1007 // movdqu xmm5, oword [rdi + rax + 16]
+ LONG $0x3c380f66; BYTE $0xdd // pmaxsb xmm3, xmm5
+ LONG $0x3c380f66; BYTE $0xcc // pmaxsb xmm1, xmm4
+ LONG $0x38380f66; BYTE $0xd5 // pminsb xmm2, xmm5
+ LONG $0x38380f66; BYTE $0xc4 // pminsb xmm0, xmm4
+
+LBB0_10:
+ LONG $0x38380f66; BYTE $0xc2 // pminsb xmm0, xmm2
+ LONG $0x3c380f66; BYTE $0xcb // pmaxsb xmm1, xmm3
+ LONG $0x4def0f66; BYTE $0x10 // pxor xmm1, oword 16[rbp] /* [rip + .LCPI0_1] */
+ LONG $0xd16f0f66 // movdqa xmm2, xmm1
+ LONG $0xd2710f66; BYTE $0x08 // psrlw xmm2, 8
+ LONG $0xd1da0f66 // pminub xmm2, xmm1
+ LONG $0x41380f66; BYTE $0xca // phminposuw xmm1, xmm2
+ LONG $0x7e0f4166; BYTE $0xc8 // movd r8d, xmm1
+ LONG $0x7ff08041 // xor r8b, 127
+ LONG $0x45ef0f66; BYTE $0x00 // pxor xmm0, oword 0[rbp] /* [rip + .LCPI0_0] */
+ LONG $0xc86f0f66 // movdqa xmm1, xmm0
+ LONG $0xd1710f66; BYTE $0x08 // psrlw xmm1, 8
+ LONG $0xc8da0f66 // pminub xmm1, xmm0
+ LONG $0x41380f66; BYTE $0xc1 // phminposuw xmm0, xmm1
+ LONG $0xc67e0f66 // movd esi, xmm0
+ LONG $0x80f68040 // xor sil, -128
+ WORD $0x394d; BYTE $0xcb // cmp r11, r9
+ JE LBB0_12
+
+LBB0_11:
+ LONG $0x04b60f42; BYTE $0x1f // movzx eax, byte [rdi + r11]
+ WORD $0x3840; BYTE $0xc6 // cmp sil, al
+ LONG $0xf6b60f40 // movzx esi, sil
+ WORD $0x4f0f; BYTE $0xf0 // cmovg esi, eax
+ WORD $0x3841; BYTE $0xc0 // cmp r8b, al
+ LONG $0xc0b60f45 // movzx r8d, r8b
+ LONG $0xc04c0f44 // cmovl r8d, eax
+ LONG $0x01c38349 // add r11, 1
+ WORD $0x394d; BYTE $0xd9 // cmp r9, r11
+ JNE LBB0_11
+
+LBB0_12:
+ WORD $0x8844; BYTE $0x01 // mov byte [rcx], r8b
+ WORD $0x8840; BYTE $0x32 // mov byte [rdx], sil
+ RET
+
+LBB0_5:
+ LONG $0x4d6f0f66; BYTE $0x00 // movdqa xmm1, oword 0[rbp] /* [rip + .LCPI0_0] */
+ LONG $0x456f0f66; BYTE $0x10 // movdqa xmm0, oword 16[rbp] /* [rip + .LCPI0_1] */
+ WORD $0xc031 // xor eax, eax
+ LONG $0xd06f0f66 // movdqa xmm2, xmm0
+ LONG $0xd96f0f66 // movdqa xmm3, xmm1
+ LONG $0x01c0f641 // test r8b, 1
+ JNE LBB0_9
+ JMP LBB0_10
+
+TEXT ·_uint8_max_min_sse4(SB), $0-32
+
+ MOVQ values+0(FP), DI
+ MOVQ length+8(FP), SI
+ MOVQ minout+16(FP), DX
+ MOVQ maxout+24(FP), CX
+
+ WORD $0xf685 // test esi, esi
+ JLE LBB1_1
+ WORD $0x8941; BYTE $0xf1 // mov r9d, esi
+ WORD $0xfe83; BYTE $0x1f // cmp esi, 31
+ JA LBB1_4
+ WORD $0xb640; BYTE $0xff // mov sil, -1
+ WORD $0x3145; BYTE $0xdb // xor r11d, r11d
+ WORD $0xc031 // xor eax, eax
+ JMP LBB1_11
+
+LBB1_1:
+ WORD $0xb640; BYTE $0xff // mov sil, -1
+ WORD $0xc031 // xor eax, eax
+ JMP LBB1_12
+
+LBB1_4:
+ WORD $0x8945; BYTE $0xcb // mov r11d, r9d
+ LONG $0xe0e38341 // and r11d, -32
+ LONG $0xe0438d49 // lea rax, [r11 - 32]
+ WORD $0x8949; BYTE $0xc0 // mov r8, rax
+ LONG $0x05e8c149 // shr r8, 5
+ LONG $0x01c08349 // add r8, 1
+ WORD $0x8548; BYTE $0xc0 // test rax, rax
+ JE LBB1_5
+ WORD $0x894d; BYTE $0xc2 // mov r10, r8
+ LONG $0xfee28349 // and r10, -2
+ WORD $0xf749; BYTE $0xda // neg r10
+ LONG $0xc9ef0f66 // pxor xmm1, xmm1
+ LONG $0xc0760f66 // pcmpeqd xmm0, xmm0
+ WORD $0xc031 // xor eax, eax
+ LONG $0xd2760f66 // pcmpeqd xmm2, xmm2
+ LONG $0xdbef0f66 // pxor xmm3, xmm3
+
+LBB1_7:
+ LONG $0x246f0ff3; BYTE $0x07 // movdqu xmm4, oword [rdi + rax]
+ LONG $0x6c6f0ff3; WORD $0x1007 // movdqu xmm5, oword [rdi + rax + 16]
+ LONG $0x746f0ff3; WORD $0x2007 // movdqu xmm6, oword [rdi + rax + 32]
+ LONG $0x7c6f0ff3; WORD $0x3007 // movdqu xmm7, oword [rdi + rax + 48]
+ LONG $0xc4da0f66 // pminub xmm0, xmm4
+ LONG $0xd5da0f66 // pminub xmm2, xmm5
+ LONG $0xccde0f66 // pmaxub xmm1, xmm4
+ LONG $0xddde0f66 // pmaxub xmm3, xmm5
+ LONG $0xc6da0f66 // pminub xmm0, xmm6
+ LONG $0xd7da0f66 // pminub xmm2, xmm7
+ LONG $0xcede0f66 // pmaxub xmm1, xmm6
+ LONG $0xdfde0f66 // pmaxub xmm3, xmm7
+ LONG $0x40c08348 // add rax, 64
+ LONG $0x02c28349 // add r10, 2
+ JNE LBB1_7
+ LONG $0x01c0f641 // test r8b, 1
+ JE LBB1_10
+
+LBB1_9:
+ LONG $0x246f0ff3; BYTE $0x07 // movdqu xmm4, oword [rdi + rax]
+ LONG $0x6c6f0ff3; WORD $0x1007 // movdqu xmm5, oword [rdi + rax + 16]
+ LONG $0xddde0f66 // pmaxub xmm3, xmm5
+ LONG $0xccde0f66 // pmaxub xmm1, xmm4
+ LONG $0xd5da0f66 // pminub xmm2, xmm5
+ LONG $0xc4da0f66 // pminub xmm0, xmm4
+
+LBB1_10:
+ LONG $0xc2da0f66 // pminub xmm0, xmm2
+ LONG $0xcbde0f66 // pmaxub xmm1, xmm3
+ LONG $0xd2760f66 // pcmpeqd xmm2, xmm2
+ LONG $0xd1ef0f66 // pxor xmm2, xmm1
+ LONG $0xca6f0f66 // movdqa xmm1, xmm2
+ LONG $0xd1710f66; BYTE $0x08 // psrlw xmm1, 8
+ LONG $0xcada0f66 // pminub xmm1, xmm2
+ LONG $0x41380f66; BYTE $0xc9 // phminposuw xmm1, xmm1
+ LONG $0xc87e0f66 // movd eax, xmm1
+ WORD $0xd0f6 // not al
+ LONG $0xc86f0f66 // movdqa xmm1, xmm0
+ LONG $0xd1710f66; BYTE $0x08 // psrlw xmm1, 8
+ LONG $0xc8da0f66 // pminub xmm1, xmm0
+ LONG $0x41380f66; BYTE $0xc1 // phminposuw xmm0, xmm1
+ LONG $0xc67e0f66 // movd esi, xmm0
+ WORD $0x394d; BYTE $0xcb // cmp r11, r9
+ JE LBB1_12
+
+LBB1_11:
+ LONG $0x04b60f46; BYTE $0x1f // movzx r8d, byte [rdi + r11]
+ WORD $0x3844; BYTE $0xc6 // cmp sil, r8b
+ LONG $0xf6b60f40 // movzx esi, sil
+ LONG $0xf0430f41 // cmovae esi, r8d
+ WORD $0x3844; BYTE $0xc0 // cmp al, r8b
+ WORD $0xb60f; BYTE $0xc0 // movzx eax, al
+ LONG $0xc0460f41 // cmovbe eax, r8d
+ LONG $0x01c38349 // add r11, 1
+ WORD $0x394d; BYTE $0xd9 // cmp r9, r11
+ JNE LBB1_11
+
+LBB1_12:
+ WORD $0x0188 // mov byte [rcx], al
+ WORD $0x8840; BYTE $0x32 // mov byte [rdx], sil
+ RET
+
+LBB1_5:
+ LONG $0xc9ef0f66 // pxor xmm1, xmm1
+ LONG $0xc0760f66 // pcmpeqd xmm0, xmm0
+ WORD $0xc031 // xor eax, eax
+ LONG $0xd2760f66 // pcmpeqd xmm2, xmm2
+ LONG $0xdbef0f66 // pxor xmm3, xmm3
+ LONG $0x01c0f641 // test r8b, 1
+ JNE LBB1_9
+ JMP LBB1_10
+
+DATA LCDATA2<>+0x000(SB)/8, $0x8000800080008000
+DATA LCDATA2<>+0x008(SB)/8, $0x8000800080008000
+DATA LCDATA2<>+0x010(SB)/8, $0x7fff7fff7fff7fff
+DATA LCDATA2<>+0x018(SB)/8, $0x7fff7fff7fff7fff
+GLOBL LCDATA2<>(SB), 8, $32
+
+TEXT ·_int16_max_min_sse4(SB), $0-32
+
+ MOVQ values+0(FP), DI
+ MOVQ length+8(FP), SI
+ MOVQ minout+16(FP), DX
+ MOVQ maxout+24(FP), CX
+ LEAQ LCDATA2<>(SB), BP
+
+ WORD $0xf685 // test esi, esi
+ JLE LBB2_1
+ WORD $0x8941; BYTE $0xf1 // mov r9d, esi
+ WORD $0xfe83; BYTE $0x0f // cmp esi, 15
+ JA LBB2_4
+ LONG $0x00b84166; BYTE $0x80 // mov r8w, -32768
+ LONG $0x7fffbe66 // mov si, 32767
+ WORD $0x3145; BYTE $0xdb // xor r11d, r11d
+ JMP LBB2_11
+
+LBB2_1:
+ LONG $0x7fffbe66 // mov si, 32767
+ LONG $0x00b84166; BYTE $0x80 // mov r8w, -32768
+ JMP LBB2_12
+
+LBB2_4:
+ WORD $0x8945; BYTE $0xcb // mov r11d, r9d
+ LONG $0xf0e38341 // and r11d, -16
+ LONG $0xf0438d49 // lea rax, [r11 - 16]
+ WORD $0x8949; BYTE $0xc0 // mov r8, rax
+ LONG $0x04e8c149 // shr r8, 4
+ LONG $0x01c08349 // add r8, 1
+ WORD $0x8548; BYTE $0xc0 // test rax, rax
+ JE LBB2_5
+ WORD $0x894d; BYTE $0xc2 // mov r10, r8
+ LONG $0xfee28349 // and r10, -2
+ WORD $0xf749; BYTE $0xda // neg r10
+ LONG $0x4d6f0f66; BYTE $0x00 // movdqa xmm1, oword 0[rbp] /* [rip + .LCPI2_0] */
+ LONG $0x456f0f66; BYTE $0x10 // movdqa xmm0, oword 16[rbp] /* [rip + .LCPI2_1] */
+ WORD $0xc031 // xor eax, eax
+ LONG $0xd06f0f66 // movdqa xmm2, xmm0
+ LONG $0xd96f0f66 // movdqa xmm3, xmm1
+
+LBB2_7:
+ LONG $0x246f0ff3; BYTE $0x47 // movdqu xmm4, oword [rdi + 2*rax]
+ LONG $0x6c6f0ff3; WORD $0x1047 // movdqu xmm5, oword [rdi + 2*rax + 16]
+ LONG $0x746f0ff3; WORD $0x2047 // movdqu xmm6, oword [rdi + 2*rax + 32]
+ LONG $0x7c6f0ff3; WORD $0x3047 // movdqu xmm7, oword [rdi + 2*rax + 48]
+ LONG $0xc4ea0f66 // pminsw xmm0, xmm4
+ LONG $0xd5ea0f66 // pminsw xmm2, xmm5
+ LONG $0xccee0f66 // pmaxsw xmm1, xmm4
+ LONG $0xddee0f66 // pmaxsw xmm3, xmm5
+ LONG $0xc6ea0f66 // pminsw xmm0, xmm6
+ LONG $0xd7ea0f66 // pminsw xmm2, xmm7
+ LONG $0xceee0f66 // pmaxsw xmm1, xmm6
+ LONG $0xdfee0f66 // pmaxsw xmm3, xmm7
+ LONG $0x20c08348 // add rax, 32
+ LONG $0x02c28349 // add r10, 2
+ JNE LBB2_7
+ LONG $0x01c0f641 // test r8b, 1
+ JE LBB2_10
+
+LBB2_9:
+ LONG $0x246f0ff3; BYTE $0x47 // movdqu xmm4, oword [rdi + 2*rax]
+ LONG $0x6c6f0ff3; WORD $0x1047 // movdqu xmm5, oword [rdi + 2*rax + 16]
+ LONG $0xddee0f66 // pmaxsw xmm3, xmm5
+ LONG $0xccee0f66 // pmaxsw xmm1, xmm4
+ LONG $0xd5ea0f66 // pminsw xmm2, xmm5
+ LONG $0xc4ea0f66 // pminsw xmm0, xmm4
+
+LBB2_10:
+ LONG $0xc2ea0f66 // pminsw xmm0, xmm2
+ LONG $0xcbee0f66 // pmaxsw xmm1, xmm3
+ LONG $0x4def0f66; BYTE $0x10 // pxor xmm1, oword 16[rbp] /* [rip + .LCPI2_1] */
+ LONG $0x41380f66; BYTE $0xc9 // phminposuw xmm1, xmm1
+ LONG $0x7e0f4166; BYTE $0xc8 // movd r8d, xmm1
+ LONG $0xfff08141; WORD $0x007f; BYTE $0x00 // xor r8d, 32767
+ LONG $0x45ef0f66; BYTE $0x00 // pxor xmm0, oword 0[rbp] /* [rip + .LCPI2_0] */
+ LONG $0x41380f66; BYTE $0xc0 // phminposuw xmm0, xmm0
+ LONG $0xc67e0f66 // movd esi, xmm0
+ LONG $0x8000f681; WORD $0x0000 // xor esi, 32768
+ WORD $0x394d; BYTE $0xcb // cmp r11, r9
+ JE LBB2_12
+
+LBB2_11:
+ LONG $0x04b70f42; BYTE $0x5f // movzx eax, word [rdi + 2*r11]
+ WORD $0x3966; BYTE $0xc6 // cmp si, ax
+ WORD $0x4f0f; BYTE $0xf0 // cmovg esi, eax
+ LONG $0xc0394166 // cmp r8w, ax
+ LONG $0xc04c0f44 // cmovl r8d, eax
+ LONG $0x01c38349 // add r11, 1
+ WORD $0x394d; BYTE $0xd9 // cmp r9, r11
+ JNE LBB2_11
+
+LBB2_12:
+ LONG $0x01894466 // mov word [rcx], r8w
+ WORD $0x8966; BYTE $0x32 // mov word [rdx], si
+ RET
+
+LBB2_5:
+ LONG $0x4d6f0f66; BYTE $0x00 // movdqa xmm1, oword 0[rbp] /* [rip + .LCPI2_0] */
+ LONG $0x456f0f66; BYTE $0x10 // movdqa xmm0, oword 16[rbp] /* [rip + .LCPI2_1] */
+ WORD $0xc031 // xor eax, eax
+ LONG $0xd06f0f66 // movdqa xmm2, xmm0
+ LONG $0xd96f0f66 // movdqa xmm3, xmm1
+ LONG $0x01c0f641 // test r8b, 1
+ JNE LBB2_9
+ JMP LBB2_10
+
+TEXT ·_uint16_max_min_sse4(SB), $0-32
+
+ MOVQ values+0(FP), DI
+ MOVQ length+8(FP), SI
+ MOVQ minout+16(FP), DX
+ MOVQ maxout+24(FP), CX
+
+ WORD $0xf685 // test esi, esi
+ JLE LBB3_1
+ WORD $0x8941; BYTE $0xf1 // mov r9d, esi
+ WORD $0xfe83; BYTE $0x0f // cmp esi, 15
+ JA LBB3_4
+ LONG $0xffb84166; BYTE $0xff // mov r8w, -1
+ WORD $0x3145; BYTE $0xdb // xor r11d, r11d
+ WORD $0xf631 // xor esi, esi
+ JMP LBB3_11
+
+LBB3_1:
+ LONG $0xffb84166; BYTE $0xff // mov r8w, -1
+ WORD $0xf631 // xor esi, esi
+ JMP LBB3_12
+
+LBB3_4:
+ WORD $0x8945; BYTE $0xcb // mov r11d, r9d
+ LONG $0xf0e38341 // and r11d, -16
+ LONG $0xf0438d49 // lea rax, [r11 - 16]
+ WORD $0x8949; BYTE $0xc0 // mov r8, rax
+ LONG $0x04e8c149 // shr r8, 4
+ LONG $0x01c08349 // add r8, 1
+ WORD $0x8548; BYTE $0xc0 // test rax, rax
+ JE LBB3_5
+ WORD $0x894d; BYTE $0xc2 // mov r10, r8
+ LONG $0xfee28349 // and r10, -2
+ WORD $0xf749; BYTE $0xda // neg r10
+ LONG $0xc9ef0f66 // pxor xmm1, xmm1
+ LONG $0xc0760f66 // pcmpeqd xmm0, xmm0
+ WORD $0xc031 // xor eax, eax
+ LONG $0xd2760f66 // pcmpeqd xmm2, xmm2
+ LONG $0xdbef0f66 // pxor xmm3, xmm3
+
+LBB3_7:
+ LONG $0x246f0ff3; BYTE $0x47 // movdqu xmm4, oword [rdi + 2*rax]
+ LONG $0x6c6f0ff3; WORD $0x1047 // movdqu xmm5, oword [rdi + 2*rax + 16]
+ LONG $0x746f0ff3; WORD $0x2047 // movdqu xmm6, oword [rdi + 2*rax + 32]
+ LONG $0x7c6f0ff3; WORD $0x3047 // movdqu xmm7, oword [rdi + 2*rax + 48]
+ LONG $0x3a380f66; BYTE $0xc4 // pminuw xmm0, xmm4
+ LONG $0x3a380f66; BYTE $0xd5 // pminuw xmm2, xmm5
+ LONG $0x3e380f66; BYTE $0xcc // pmaxuw xmm1, xmm4
+ LONG $0x3e380f66; BYTE $0xdd // pmaxuw xmm3, xmm5
+ LONG $0x3a380f66; BYTE $0xc6 // pminuw xmm0, xmm6
+ LONG $0x3a380f66; BYTE $0xd7 // pminuw xmm2, xmm7
+ LONG $0x3e380f66; BYTE $0xce // pmaxuw xmm1, xmm6
+ LONG $0x3e380f66; BYTE $0xdf // pmaxuw xmm3, xmm7
+ LONG $0x20c08348 // add rax, 32
+ LONG $0x02c28349 // add r10, 2
+ JNE LBB3_7
+ LONG $0x01c0f641 // test r8b, 1
+ JE LBB3_10
+
+LBB3_9:
+ LONG $0x246f0ff3; BYTE $0x47 // movdqu xmm4, oword [rdi + 2*rax]
+ LONG $0x6c6f0ff3; WORD $0x1047 // movdqu xmm5, oword [rdi + 2*rax + 16]
+ LONG $0x3e380f66; BYTE $0xdd // pmaxuw xmm3, xmm5
+ LONG $0x3e380f66; BYTE $0xcc // pmaxuw xmm1, xmm4
+ LONG $0x3a380f66; BYTE $0xd5 // pminuw xmm2, xmm5
+ LONG $0x3a380f66; BYTE $0xc4 // pminuw xmm0, xmm4
+
+LBB3_10:
+ LONG $0x3a380f66; BYTE $0xc2 // pminuw xmm0, xmm2
+ LONG $0x3e380f66; BYTE $0xcb // pmaxuw xmm1, xmm3
+ LONG $0xd2760f66 // pcmpeqd xmm2, xmm2
+ LONG $0xd1ef0f66 // pxor xmm2, xmm1
+ LONG $0x41380f66; BYTE $0xca // phminposuw xmm1, xmm2
+ LONG $0xce7e0f66 // movd esi, xmm1
+ WORD $0xd6f7 // not esi
+ LONG $0x41380f66; BYTE $0xc0 // phminposuw xmm0, xmm0
+ LONG $0x7e0f4166; BYTE $0xc0 // movd r8d, xmm0
+ WORD $0x394d; BYTE $0xcb // cmp r11, r9
+ JE LBB3_12
+
+LBB3_11:
+ LONG $0x04b70f42; BYTE $0x5f // movzx eax, word [rdi + 2*r11]
+ LONG $0xc0394166 // cmp r8w, ax
+ LONG $0xc0430f44 // cmovae r8d, eax
+ WORD $0x3966; BYTE $0xc6 // cmp si, ax
+ WORD $0x460f; BYTE $0xf0 // cmovbe esi, eax
+ LONG $0x01c38349 // add r11, 1
+ WORD $0x394d; BYTE $0xd9 // cmp r9, r11
+ JNE LBB3_11
+
+LBB3_12:
+ WORD $0x8966; BYTE $0x31 // mov word [rcx], si
+ LONG $0x02894466 // mov word [rdx], r8w
+ RET
+
+LBB3_5:
+ LONG $0xc9ef0f66 // pxor xmm1, xmm1
+ LONG $0xc0760f66 // pcmpeqd xmm0, xmm0
+ WORD $0xc031 // xor eax, eax
+ LONG $0xd2760f66 // pcmpeqd xmm2, xmm2
+ LONG $0xdbef0f66 // pxor xmm3, xmm3
+ LONG $0x01c0f641 // test r8b, 1
+ JNE LBB3_9
+ JMP LBB3_10
+
+DATA LCDATA3<>+0x000(SB)/8, $0x8000000080000000
+DATA LCDATA3<>+0x008(SB)/8, $0x8000000080000000
+DATA LCDATA3<>+0x010(SB)/8, $0x7fffffff7fffffff
+DATA LCDATA3<>+0x018(SB)/8, $0x7fffffff7fffffff
+GLOBL LCDATA3<>(SB), 8, $32
+
+TEXT ·_int32_max_min_sse4(SB), $0-32
+
+ MOVQ values+0(FP), DI
+ MOVQ length+8(FP), SI
+ MOVQ minout+16(FP), DX
+ MOVQ maxout+24(FP), CX
+ LEAQ LCDATA3<>(SB), BP
+
+ WORD $0xf685 // test esi, esi
+ JLE LBB4_1
+ WORD $0x8941; BYTE $0xf1 // mov r9d, esi
+ WORD $0xfe83; BYTE $0x07 // cmp esi, 7
+ JA LBB4_6
+ LONG $0x000000b8; BYTE $0x80 // mov eax, -2147483648
+ LONG $0xffffb841; WORD $0x7fff // mov r8d, 2147483647
+ WORD $0x3145; BYTE $0xdb // xor r11d, r11d
+ JMP LBB4_4
+
+LBB4_1:
+ LONG $0xffffb841; WORD $0x7fff // mov r8d, 2147483647
+ LONG $0x000000b8; BYTE $0x80 // mov eax, -2147483648
+ JMP LBB4_13
+
+LBB4_6:
+ WORD $0x8945; BYTE $0xcb // mov r11d, r9d
+ LONG $0xf8e38341 // and r11d, -8
+ LONG $0xf8438d49 // lea rax, [r11 - 8]
+ WORD $0x8949; BYTE $0xc0 // mov r8, rax
+ LONG $0x03e8c149 // shr r8, 3
+ LONG $0x01c08349 // add r8, 1
+ WORD $0x8548; BYTE $0xc0 // test rax, rax
+ JE LBB4_7
+ WORD $0x894d; BYTE $0xc2 // mov r10, r8
+ LONG $0xfee28349 // and r10, -2
+ WORD $0xf749; BYTE $0xda // neg r10
+ LONG $0x4d6f0f66; BYTE $0x00 // movdqa xmm1, oword 0[rbp] /* [rip + .LCPI4_0] */
+ LONG $0x456f0f66; BYTE $0x10 // movdqa xmm0, oword 16[rbp] /* [rip + .LCPI4_1] */
+ WORD $0xc031 // xor eax, eax
+ LONG $0xd06f0f66 // movdqa xmm2, xmm0
+ LONG $0xd96f0f66 // movdqa xmm3, xmm1
+
+LBB4_9:
+ LONG $0x246f0ff3; BYTE $0x87 // movdqu xmm4, oword [rdi + 4*rax]
+ LONG $0x6c6f0ff3; WORD $0x1087 // movdqu xmm5, oword [rdi + 4*rax + 16]
+ LONG $0x746f0ff3; WORD $0x2087 // movdqu xmm6, oword [rdi + 4*rax + 32]
+ LONG $0x7c6f0ff3; WORD $0x3087 // movdqu xmm7, oword [rdi + 4*rax + 48]
+ LONG $0x39380f66; BYTE $0xc4 // pminsd xmm0, xmm4
+ LONG $0x39380f66; BYTE $0xd5 // pminsd xmm2, xmm5
+ LONG $0x3d380f66; BYTE $0xcc // pmaxsd xmm1, xmm4
+ LONG $0x3d380f66; BYTE $0xdd // pmaxsd xmm3, xmm5
+ LONG $0x39380f66; BYTE $0xc6 // pminsd xmm0, xmm6
+ LONG $0x39380f66; BYTE $0xd7 // pminsd xmm2, xmm7
+ LONG $0x3d380f66; BYTE $0xce // pmaxsd xmm1, xmm6
+ LONG $0x3d380f66; BYTE $0xdf // pmaxsd xmm3, xmm7
+ LONG $0x10c08348 // add rax, 16
+ LONG $0x02c28349 // add r10, 2
+ JNE LBB4_9
+ LONG $0x01c0f641 // test r8b, 1
+ JE LBB4_12
+
+LBB4_11:
+ LONG $0x246f0ff3; BYTE $0x87 // movdqu xmm4, oword [rdi + 4*rax]
+ LONG $0x6c6f0ff3; WORD $0x1087 // movdqu xmm5, oword [rdi + 4*rax + 16]
+ LONG $0x3d380f66; BYTE $0xdd // pmaxsd xmm3, xmm5
+ LONG $0x3d380f66; BYTE $0xcc // pmaxsd xmm1, xmm4
+ LONG $0x39380f66; BYTE $0xd5 // pminsd xmm2, xmm5
+ LONG $0x39380f66; BYTE $0xc4 // pminsd xmm0, xmm4
+
+LBB4_12:
+ LONG $0x39380f66; BYTE $0xc2 // pminsd xmm0, xmm2
+ LONG $0x3d380f66; BYTE $0xcb // pmaxsd xmm1, xmm3
+ LONG $0xd1700f66; BYTE $0x4e // pshufd xmm2, xmm1, 78
+ LONG $0x3d380f66; BYTE $0xd1 // pmaxsd xmm2, xmm1
+ LONG $0xca700f66; BYTE $0xe5 // pshufd xmm1, xmm2, 229
+ LONG $0x3d380f66; BYTE $0xca // pmaxsd xmm1, xmm2
+ LONG $0xc87e0f66 // movd eax, xmm1
+ LONG $0xc8700f66; BYTE $0x4e // pshufd xmm1, xmm0, 78
+ LONG $0x39380f66; BYTE $0xc8 // pminsd xmm1, xmm0
+ LONG $0xc1700f66; BYTE $0xe5 // pshufd xmm0, xmm1, 229
+ LONG $0x39380f66; BYTE $0xc1 // pminsd xmm0, xmm1
+ LONG $0x7e0f4166; BYTE $0xc0 // movd r8d, xmm0
+ WORD $0x394d; BYTE $0xcb // cmp r11, r9
+ JE LBB4_13
+
+LBB4_4:
+ WORD $0xc689 // mov esi, eax
+
+LBB4_5:
+ LONG $0x9f048b42 // mov eax, dword [rdi + 4*r11]
+ WORD $0x3941; BYTE $0xc0 // cmp r8d, eax
+ LONG $0xc04f0f44 // cmovg r8d, eax
+ WORD $0xc639 // cmp esi, eax
+ WORD $0x4d0f; BYTE $0xc6 // cmovge eax, esi
+ LONG $0x01c38349 // add r11, 1
+ WORD $0xc689 // mov esi, eax
+ WORD $0x394d; BYTE $0xd9 // cmp r9, r11
+ JNE LBB4_5
+
+LBB4_13:
+ WORD $0x0189 // mov dword [rcx], eax
+ WORD $0x8944; BYTE $0x02 // mov dword [rdx], r8d
+ RET
+
+LBB4_7:
+ LONG $0x4d6f0f66; BYTE $0x00 // movdqa xmm1, oword 0[rbp] /* [rip + .LCPI4_0] */
+ LONG $0x456f0f66; BYTE $0x10 // movdqa xmm0, oword 16[rbp] /* [rip + .LCPI4_1] */
+ WORD $0xc031 // xor eax, eax
+ LONG $0xd06f0f66 // movdqa xmm2, xmm0
+ LONG $0xd96f0f66 // movdqa xmm3, xmm1
+ LONG $0x01c0f641 // test r8b, 1
+ JNE LBB4_11
+ JMP LBB4_12
+
+TEXT ·_uint32_max_min_sse4(SB), $0-32
+
+ MOVQ values+0(FP), DI
+ MOVQ length+8(FP), SI
+ MOVQ minout+16(FP), DX
+ MOVQ maxout+24(FP), CX
+
+ WORD $0xf685 // test esi, esi
+ JLE LBB5_1
+ WORD $0x8941; BYTE $0xf1 // mov r9d, esi
+ WORD $0xfe83; BYTE $0x07 // cmp esi, 7
+ JA LBB5_6
+ WORD $0x3145; BYTE $0xdb // xor r11d, r11d
+ LONG $0xffffb841; WORD $0xffff // mov r8d, -1
+ WORD $0xf631 // xor esi, esi
+ JMP LBB5_4
+
+LBB5_1:
+ LONG $0xffffb841; WORD $0xffff // mov r8d, -1
+ WORD $0xf631 // xor esi, esi
+ JMP LBB5_13
+
+LBB5_6:
+ WORD $0x8945; BYTE $0xcb // mov r11d, r9d
+ LONG $0xf8e38341 // and r11d, -8
+ LONG $0xf8438d49 // lea rax, [r11 - 8]
+ WORD $0x8949; BYTE $0xc0 // mov r8, rax
+ LONG $0x03e8c149 // shr r8, 3
+ LONG $0x01c08349 // add r8, 1
+ WORD $0x8548; BYTE $0xc0 // test rax, rax
+ JE LBB5_7
+ WORD $0x894d; BYTE $0xc2 // mov r10, r8
+ LONG $0xfee28349 // and r10, -2
+ WORD $0xf749; BYTE $0xda // neg r10
+ LONG $0xc9ef0f66 // pxor xmm1, xmm1
+ LONG $0xc0760f66 // pcmpeqd xmm0, xmm0
+ WORD $0xc031 // xor eax, eax
+ LONG $0xd2760f66 // pcmpeqd xmm2, xmm2
+ LONG $0xdbef0f66 // pxor xmm3, xmm3
+
+LBB5_9:
+ LONG $0x246f0ff3; BYTE $0x87 // movdqu xmm4, oword [rdi + 4*rax]
+ LONG $0x6c6f0ff3; WORD $0x1087 // movdqu xmm5, oword [rdi + 4*rax + 16]
+ LONG $0x746f0ff3; WORD $0x2087 // movdqu xmm6, oword [rdi + 4*rax + 32]
+ LONG $0x7c6f0ff3; WORD $0x3087 // movdqu xmm7, oword [rdi + 4*rax + 48]
+ LONG $0x3b380f66; BYTE $0xc4 // pminud xmm0, xmm4
+ LONG $0x3b380f66; BYTE $0xd5 // pminud xmm2, xmm5
+ LONG $0x3f380f66; BYTE $0xcc // pmaxud xmm1, xmm4
+ LONG $0x3f380f66; BYTE $0xdd // pmaxud xmm3, xmm5
+ LONG $0x3b380f66; BYTE $0xc6 // pminud xmm0, xmm6
+ LONG $0x3b380f66; BYTE $0xd7 // pminud xmm2, xmm7
+ LONG $0x3f380f66; BYTE $0xce // pmaxud xmm1, xmm6
+ LONG $0x3f380f66; BYTE $0xdf // pmaxud xmm3, xmm7
+ LONG $0x10c08348 // add rax, 16
+ LONG $0x02c28349 // add r10, 2
+ JNE LBB5_9
+ LONG $0x01c0f641 // test r8b, 1
+ JE LBB5_12
+
+LBB5_11:
+ LONG $0x246f0ff3; BYTE $0x87 // movdqu xmm4, oword [rdi + 4*rax]
+ LONG $0x6c6f0ff3; WORD $0x1087 // movdqu xmm5, oword [rdi + 4*rax + 16]
+ LONG $0x3f380f66; BYTE $0xdd // pmaxud xmm3, xmm5
+ LONG $0x3f380f66; BYTE $0xcc // pmaxud xmm1, xmm4
+ LONG $0x3b380f66; BYTE $0xd5 // pminud xmm2, xmm5
+ LONG $0x3b380f66; BYTE $0xc4 // pminud xmm0, xmm4
+
+LBB5_12:
+ LONG $0x3b380f66; BYTE $0xc2 // pminud xmm0, xmm2
+ LONG $0x3f380f66; BYTE $0xcb // pmaxud xmm1, xmm3
+ LONG $0xd1700f66; BYTE $0x4e // pshufd xmm2, xmm1, 78
+ LONG $0x3f380f66; BYTE $0xd1 // pmaxud xmm2, xmm1
+ LONG $0xca700f66; BYTE $0xe5 // pshufd xmm1, xmm2, 229
+ LONG $0x3f380f66; BYTE $0xca // pmaxud xmm1, xmm2
+ LONG $0xce7e0f66 // movd esi, xmm1
+ LONG $0xc8700f66; BYTE $0x4e // pshufd xmm1, xmm0, 78
+ LONG $0x3b380f66; BYTE $0xc8 // pminud xmm1, xmm0
+ LONG $0xc1700f66; BYTE $0xe5 // pshufd xmm0, xmm1, 229
+ LONG $0x3b380f66; BYTE $0xc1 // pminud xmm0, xmm1
+ LONG $0x7e0f4166; BYTE $0xc0 // movd r8d, xmm0
+ WORD $0x394d; BYTE $0xcb // cmp r11, r9
+ JE LBB5_13
+
+LBB5_4:
+ WORD $0xf089 // mov eax, esi
+
+LBB5_5:
+ LONG $0x9f348b42 // mov esi, dword [rdi + 4*r11]
+ WORD $0x3941; BYTE $0xf0 // cmp r8d, esi
+ LONG $0xc6430f44 // cmovae r8d, esi
+ WORD $0xf039 // cmp eax, esi
+ WORD $0x470f; BYTE $0xf0 // cmova esi, eax
+ LONG $0x01c38349 // add r11, 1
+ WORD $0xf089 // mov eax, esi
+ WORD $0x394d; BYTE $0xd9 // cmp r9, r11
+ JNE LBB5_5
+
+LBB5_13:
+ WORD $0x3189 // mov dword [rcx], esi
+ WORD $0x8944; BYTE $0x02 // mov dword [rdx], r8d
+ RET
+
+LBB5_7:
+ LONG $0xc9ef0f66 // pxor xmm1, xmm1
+ LONG $0xc0760f66 // pcmpeqd xmm0, xmm0
+ WORD $0xc031 // xor eax, eax
+ LONG $0xd2760f66 // pcmpeqd xmm2, xmm2
+ LONG $0xdbef0f66 // pxor xmm3, xmm3
+ LONG $0x01c0f641 // test r8b, 1
+ JNE LBB5_11
+ JMP LBB5_12
+
+DATA LCDATA4<>+0x000(SB)/8, $0x8000000000000000
+DATA LCDATA4<>+0x008(SB)/8, $0x8000000000000000
+DATA LCDATA4<>+0x010(SB)/8, $0x7fffffffffffffff
+DATA LCDATA4<>+0x018(SB)/8, $0x7fffffffffffffff
+GLOBL LCDATA4<>(SB), 8, $32
+
+TEXT ·_int64_max_min_sse4(SB), $0-32
+
+ MOVQ values+0(FP), DI
+ MOVQ length+8(FP), SI
+ MOVQ minout+16(FP), DX
+ MOVQ maxout+24(FP), CX
+ LEAQ LCDATA4<>(SB), BP
+
+ QUAD $0xffffffffffffb849; WORD $0x7fff // mov r8, 9223372036854775807
+ WORD $0xf685 // test esi, esi
+ JLE LBB6_1
+ WORD $0x8941; BYTE $0xf1 // mov r9d, esi
+ WORD $0xfe83; BYTE $0x03 // cmp esi, 3
+ JA LBB6_6
+ LONG $0x01708d49 // lea rsi, [r8 + 1]
+ WORD $0x3145; BYTE $0xdb // xor r11d, r11d
+ JMP LBB6_4
+
+LBB6_1:
+ LONG $0x01708d49 // lea rsi, [r8 + 1]
+ JMP LBB6_13
+
+LBB6_6:
+ WORD $0x8945; BYTE $0xcb // mov r11d, r9d
+ LONG $0xfce38341 // and r11d, -4
+ LONG $0xfc438d49 // lea rax, [r11 - 4]
+ WORD $0x8949; BYTE $0xc0 // mov r8, rax
+ LONG $0x02e8c149 // shr r8, 2
+ LONG $0x01c08349 // add r8, 1
+ WORD $0x8548; BYTE $0xc0 // test rax, rax
+ JE LBB6_7
+ WORD $0x894d; BYTE $0xc2 // mov r10, r8
+ LONG $0xfee28349 // and r10, -2
+ WORD $0xf749; BYTE $0xda // neg r10
+ LONG $0x6f0f4466; WORD $0x004d // movdqa xmm9, oword 0[rbp] /* [rip + .LCPI6_0] */
+ LONG $0x6f0f4466; WORD $0x1045 // movdqa xmm8, oword 16[rbp] /* [rip + .LCPI6_1] */
+ WORD $0xc031 // xor eax, eax
+ LONG $0x6f0f4166; BYTE $0xd0 // movdqa xmm2, xmm8
+ LONG $0x6f0f4166; BYTE $0xf1 // movdqa xmm6, xmm9
+
+LBB6_9:
+ LONG $0x3c6f0ff3; BYTE $0xc7 // movdqu xmm7, oword [rdi + 8*rax]
+ LONG $0xc76f0f66 // movdqa xmm0, xmm7
+ LONG $0x380f4166; WORD $0xc037 // pcmpgtq xmm0, xmm8
+ LONG $0xe76f0f66 // movdqa xmm4, xmm7
+ LONG $0x380f4166; WORD $0xe015 // blendvpd xmm4, xmm8, xmm0
+ LONG $0x4c6f0ff3; WORD $0x10c7 // movdqu xmm1, oword [rdi + 8*rax + 16]
+ LONG $0xc16f0f66 // movdqa xmm0, xmm1
+ LONG $0x37380f66; BYTE $0xc2 // pcmpgtq xmm0, xmm2
+ LONG $0xe96f0f66 // movdqa xmm5, xmm1
+ LONG $0x15380f66; BYTE $0xea // blendvpd xmm5, xmm2, xmm0
+ LONG $0x6f0f4166; BYTE $0xc1 // movdqa xmm0, xmm9
+ LONG $0x37380f66; BYTE $0xc7 // pcmpgtq xmm0, xmm7
+ LONG $0x380f4166; WORD $0xf915 // blendvpd xmm7, xmm9, xmm0
+ LONG $0xc66f0f66 // movdqa xmm0, xmm6
+ LONG $0x37380f66; BYTE $0xc1 // pcmpgtq xmm0, xmm1
+ LONG $0x15380f66; BYTE $0xce // blendvpd xmm1, xmm6, xmm0
+ LONG $0x5c6f0ff3; WORD $0x20c7 // movdqu xmm3, oword [rdi + 8*rax + 32]
+ LONG $0xc36f0f66 // movdqa xmm0, xmm3
+ LONG $0x37380f66; BYTE $0xc4 // pcmpgtq xmm0, xmm4
+ LONG $0x6f0f4466; BYTE $0xc3 // movdqa xmm8, xmm3
+ LONG $0x380f4466; WORD $0xc415 // blendvpd xmm8, xmm4, xmm0
+ LONG $0x646f0ff3; WORD $0x30c7 // movdqu xmm4, oword [rdi + 8*rax + 48]
+ LONG $0xc46f0f66 // movdqa xmm0, xmm4
+ LONG $0x37380f66; BYTE $0xc5 // pcmpgtq xmm0, xmm5
+ LONG $0xd46f0f66 // movdqa xmm2, xmm4
+ LONG $0x15380f66; BYTE $0xd5 // blendvpd xmm2, xmm5, xmm0
+ LONG $0xc7280f66 // movapd xmm0, xmm7
+ LONG $0x37380f66; BYTE $0xc3 // pcmpgtq xmm0, xmm3
+ LONG $0x15380f66; BYTE $0xdf // blendvpd xmm3, xmm7, xmm0
+ LONG $0xc1280f66 // movapd xmm0, xmm1
+ LONG $0x37380f66; BYTE $0xc4 // pcmpgtq xmm0, xmm4
+ LONG $0x15380f66; BYTE $0xe1 // blendvpd xmm4, xmm1, xmm0
+ LONG $0x08c08348 // add rax, 8
+ LONG $0x280f4466; BYTE $0xcb // movapd xmm9, xmm3
+ LONG $0xf4280f66 // movapd xmm6, xmm4
+ LONG $0x02c28349 // add r10, 2
+ JNE LBB6_9
+ LONG $0x01c0f641 // test r8b, 1
+ JE LBB6_12
+
+LBB6_11:
+ LONG $0x4c6f0ff3; WORD $0x10c7 // movdqu xmm1, oword [rdi + 8*rax + 16]
+ LONG $0xc4280f66 // movapd xmm0, xmm4
+ LONG $0x37380f66; BYTE $0xc1 // pcmpgtq xmm0, xmm1
+ LONG $0xe96f0f66 // movdqa xmm5, xmm1
+ LONG $0x15380f66; BYTE $0xec // blendvpd xmm5, xmm4, xmm0
+ LONG $0x246f0ff3; BYTE $0xc7 // movdqu xmm4, oword [rdi + 8*rax]
+ LONG $0xc3280f66 // movapd xmm0, xmm3
+ LONG $0x37380f66; BYTE $0xc4 // pcmpgtq xmm0, xmm4
+ LONG $0xf46f0f66 // movdqa xmm6, xmm4
+ LONG $0x15380f66; BYTE $0xf3 // blendvpd xmm6, xmm3, xmm0
+ LONG $0xc16f0f66 // movdqa xmm0, xmm1
+ LONG $0x37380f66; BYTE $0xc2 // pcmpgtq xmm0, xmm2
+ LONG $0x15380f66; BYTE $0xca // blendvpd xmm1, xmm2, xmm0
+ LONG $0xc46f0f66 // movdqa xmm0, xmm4
+ LONG $0x380f4166; WORD $0xc037 // pcmpgtq xmm0, xmm8
+ LONG $0x380f4166; WORD $0xe015 // blendvpd xmm4, xmm8, xmm0
+ LONG $0x280f4466; BYTE $0xc4 // movapd xmm8, xmm4
+ LONG $0xd1280f66 // movapd xmm2, xmm1
+ LONG $0xde280f66 // movapd xmm3, xmm6
+ LONG $0xe5280f66 // movapd xmm4, xmm5
+
+LBB6_12:
+ LONG $0xc3280f66 // movapd xmm0, xmm3
+ LONG $0x37380f66; BYTE $0xc4 // pcmpgtq xmm0, xmm4
+ LONG $0x15380f66; BYTE $0xe3 // blendvpd xmm4, xmm3, xmm0
+ LONG $0xcc700f66; BYTE $0x4e // pshufd xmm1, xmm4, 78
+ LONG $0xc46f0f66 // movdqa xmm0, xmm4
+ LONG $0x37380f66; BYTE $0xc1 // pcmpgtq xmm0, xmm1
+ LONG $0x15380f66; BYTE $0xcc // blendvpd xmm1, xmm4, xmm0
+ LONG $0x7e0f4866; BYTE $0xce // movq rsi, xmm1
+ LONG $0xc26f0f66 // movdqa xmm0, xmm2
+ LONG $0x380f4166; WORD $0xc037 // pcmpgtq xmm0, xmm8
+ LONG $0x380f4166; WORD $0xd015 // blendvpd xmm2, xmm8, xmm0
+ LONG $0xca700f66; BYTE $0x4e // pshufd xmm1, xmm2, 78
+ LONG $0xc16f0f66 // movdqa xmm0, xmm1
+ LONG $0x37380f66; BYTE $0xc2 // pcmpgtq xmm0, xmm2
+ LONG $0x15380f66; BYTE $0xca // blendvpd xmm1, xmm2, xmm0
+ LONG $0x7e0f4966; BYTE $0xc8 // movq r8, xmm1
+ WORD $0x394d; BYTE $0xcb // cmp r11, r9
+ JE LBB6_13
+
+LBB6_4:
+ WORD $0x8948; BYTE $0xf0 // mov rax, rsi
+
+LBB6_5:
+ LONG $0xdf348b4a // mov rsi, qword [rdi + 8*r11]
+ WORD $0x3949; BYTE $0xf0 // cmp r8, rsi
+ LONG $0xc64f0f4c // cmovg r8, rsi
+ WORD $0x3948; BYTE $0xf0 // cmp rax, rsi
+ LONG $0xf04d0f48 // cmovge rsi, rax
+ LONG $0x01c38349 // add r11, 1
+ WORD $0x8948; BYTE $0xf0 // mov rax, rsi
+ WORD $0x394d; BYTE $0xd9 // cmp r9, r11
+ JNE LBB6_5
+
+LBB6_13:
+ WORD $0x8948; BYTE $0x31 // mov qword [rcx], rsi
+ WORD $0x894c; BYTE $0x02 // mov qword [rdx], r8
+ RET
+
+LBB6_7:
+ LONG $0x5d280f66; BYTE $0x00 // movapd xmm3, oword 0[rbp] /* [rip + .LCPI6_0] */
+ LONG $0x6f0f4466; WORD $0x1045 // movdqa xmm8, oword 16[rbp] /* [rip + .LCPI6_1] */
+ WORD $0xc031 // xor eax, eax
+ LONG $0x6f0f4166; BYTE $0xd0 // movdqa xmm2, xmm8
+ LONG $0xe3280f66 // movapd xmm4, xmm3
+ LONG $0x01c0f641 // test r8b, 1
+ JNE LBB6_11
+ JMP LBB6_12
+
+DATA LCDATA5<>+0x000(SB)/8, $0x8000000000000000
+DATA LCDATA5<>+0x008(SB)/8, $0x8000000000000000
+GLOBL LCDATA5<>(SB), 8, $16
+
+TEXT ·_uint64_max_min_sse4(SB), $0-32
+
+ MOVQ values+0(FP), DI
+ MOVQ length+8(FP), SI
+ MOVQ minout+16(FP), DX
+ MOVQ maxout+24(FP), CX
+ LEAQ LCDATA5<>(SB), BP
+
+ WORD $0xf685 // test esi, esi
+ JLE LBB7_1
+ WORD $0x8941; BYTE $0xf1 // mov r9d, esi
+ WORD $0xfe83; BYTE $0x03 // cmp esi, 3
+ JA LBB7_6
+ LONG $0xffc0c749; WORD $0xffff; BYTE $0xff // mov r8, -1
+ WORD $0x3145; BYTE $0xdb // xor r11d, r11d
+ WORD $0xc031 // xor eax, eax
+ JMP LBB7_4
+
+LBB7_1:
+ LONG $0xffc0c749; WORD $0xffff; BYTE $0xff // mov r8, -1
+ WORD $0xc031 // xor eax, eax
+ JMP LBB7_13
+
+LBB7_6:
+ WORD $0x8945; BYTE $0xcb // mov r11d, r9d
+ LONG $0xfce38341 // and r11d, -4
+ LONG $0xfc438d49 // lea rax, [r11 - 4]
+ WORD $0x8949; BYTE $0xc0 // mov r8, rax
+ LONG $0x02e8c149 // shr r8, 2
+ LONG $0x01c08349 // add r8, 1
+ WORD $0x8548; BYTE $0xc0 // test rax, rax
+ JE LBB7_7
+ WORD $0x894d; BYTE $0xc2 // mov r10, r8
+ LONG $0xfee28349 // and r10, -2
+ WORD $0xf749; BYTE $0xda // neg r10
+ LONG $0xef0f4566; BYTE $0xc9 // pxor xmm9, xmm9
+ LONG $0x760f4566; BYTE $0xd2 // pcmpeqd xmm10, xmm10
+ WORD $0xc031 // xor eax, eax
+ LONG $0x6f0f4466; WORD $0x0045 // movdqa xmm8, oword 0[rbp] /* [rip + .LCPI7_0] */
+ LONG $0x760f4566; BYTE $0xdb // pcmpeqd xmm11, xmm11
+ LONG $0xef0f4566; BYTE $0xe4 // pxor xmm12, xmm12
+
+LBB7_9:
+ LONG $0x6f0f4166; BYTE $0xd2 // movdqa xmm2, xmm10
+ LONG $0xef0f4166; BYTE $0xd0 // pxor xmm2, xmm8
+ LONG $0x246f0ff3; BYTE $0xc7 // movdqu xmm4, oword [rdi + 8*rax]
+ LONG $0x6c6f0ff3; WORD $0x10c7 // movdqu xmm5, oword [rdi + 8*rax + 16]
+ LONG $0x6f0f44f3; WORD $0xc76c; BYTE $0x20 // movdqu xmm13, oword [rdi + 8*rax + 32]
+ LONG $0xc46f0f66 // movdqa xmm0, xmm4
+ LONG $0xef0f4166; BYTE $0xc0 // pxor xmm0, xmm8
+ LONG $0x6f0f4166; BYTE $0xc9 // movdqa xmm1, xmm9
+ LONG $0xef0f4166; BYTE $0xc8 // pxor xmm1, xmm8
+ LONG $0x37380f66; BYTE $0xc8 // pcmpgtq xmm1, xmm0
+ LONG $0x37380f66; BYTE $0xc2 // pcmpgtq xmm0, xmm2
+ LONG $0xdc6f0f66 // movdqa xmm3, xmm4
+ LONG $0x380f4166; WORD $0xda15 // blendvpd xmm3, xmm10, xmm0
+ LONG $0x746f0ff3; WORD $0x30c7 // movdqu xmm6, oword [rdi + 8*rax + 48]
+ LONG $0x6f0f4166; BYTE $0xfb // movdqa xmm7, xmm11
+ LONG $0xef0f4166; BYTE $0xf8 // pxor xmm7, xmm8
+ LONG $0xc56f0f66 // movdqa xmm0, xmm5
+ LONG $0xef0f4166; BYTE $0xc0 // pxor xmm0, xmm8
+ LONG $0x6f0f4166; BYTE $0xd4 // movdqa xmm2, xmm12
+ LONG $0xef0f4166; BYTE $0xd0 // pxor xmm2, xmm8
+ LONG $0x37380f66; BYTE $0xd0 // pcmpgtq xmm2, xmm0
+ LONG $0x37380f66; BYTE $0xc7 // pcmpgtq xmm0, xmm7
+ LONG $0xfd6f0f66 // movdqa xmm7, xmm5
+ LONG $0x380f4166; WORD $0xfb15 // blendvpd xmm7, xmm11, xmm0
+ LONG $0xc16f0f66 // movdqa xmm0, xmm1
+ LONG $0x380f4166; WORD $0xe115 // blendvpd xmm4, xmm9, xmm0
+ LONG $0xc26f0f66 // movdqa xmm0, xmm2
+ LONG $0x380f4166; WORD $0xec15 // blendvpd xmm5, xmm12, xmm0
+ LONG $0xd3280f66 // movapd xmm2, xmm3
+ LONG $0x570f4166; BYTE $0xd0 // xorpd xmm2, xmm8
+ LONG $0x6f0f4166; BYTE $0xc5 // movdqa xmm0, xmm13
+ LONG $0xef0f4166; BYTE $0xc0 // pxor xmm0, xmm8
+ LONG $0xcc280f66 // movapd xmm1, xmm4
+ LONG $0x570f4166; BYTE $0xc8 // xorpd xmm1, xmm8
+ LONG $0x37380f66; BYTE $0xc8 // pcmpgtq xmm1, xmm0
+ LONG $0x37380f66; BYTE $0xc2 // pcmpgtq xmm0, xmm2
+ LONG $0x6f0f4566; BYTE $0xd5 // movdqa xmm10, xmm13
+ LONG $0x380f4466; WORD $0xd315 // blendvpd xmm10, xmm3, xmm0
+ LONG $0xdf280f66 // movapd xmm3, xmm7
+ LONG $0x570f4166; BYTE $0xd8 // xorpd xmm3, xmm8
+ LONG $0xc66f0f66 // movdqa xmm0, xmm6
+ LONG $0xef0f4166; BYTE $0xc0 // pxor xmm0, xmm8
+ LONG $0xd5280f66 // movapd xmm2, xmm5
+ LONG $0x570f4166; BYTE $0xd0 // xorpd xmm2, xmm8
+ LONG $0x37380f66; BYTE $0xd0 // pcmpgtq xmm2, xmm0
+ LONG $0x37380f66; BYTE $0xc3 // pcmpgtq xmm0, xmm3
+ LONG $0x6f0f4466; BYTE $0xde // movdqa xmm11, xmm6
+ LONG $0x380f4466; WORD $0xdf15 // blendvpd xmm11, xmm7, xmm0
+ LONG $0xc16f0f66 // movdqa xmm0, xmm1
+ LONG $0x380f4466; WORD $0xec15 // blendvpd xmm13, xmm4, xmm0
+ LONG $0xc26f0f66 // movdqa xmm0, xmm2
+ LONG $0x15380f66; BYTE $0xf5 // blendvpd xmm6, xmm5, xmm0
+ LONG $0x08c08348 // add rax, 8
+ LONG $0x280f4566; BYTE $0xcd // movapd xmm9, xmm13
+ LONG $0x280f4466; BYTE $0xe6 // movapd xmm12, xmm6
+ LONG $0x02c28349 // add r10, 2
+ JNE LBB7_9
+ LONG $0x01c0f641 // test r8b, 1
+ JE LBB7_12
+
+LBB7_11:
+ LONG $0x24100f66; BYTE $0xc7 // movupd xmm4, oword [rdi + 8*rax]
+ LONG $0x5c100f66; WORD $0x10c7 // movupd xmm3, oword [rdi + 8*rax + 16]
+ LONG $0x6d280f66; BYTE $0x00 // movapd xmm5, oword 0[rbp] /* [rip + .LCPI7_0] */
+ LONG $0xc6280f66 // movapd xmm0, xmm6
+ LONG $0xc5570f66 // xorpd xmm0, xmm5
+ LONG $0xcb280f66 // movapd xmm1, xmm3
+ LONG $0xcd570f66 // xorpd xmm1, xmm5
+ LONG $0x37380f66; BYTE $0xc1 // pcmpgtq xmm0, xmm1
+ LONG $0xfb280f66 // movapd xmm7, xmm3
+ LONG $0x15380f66; BYTE $0xfe // blendvpd xmm7, xmm6, xmm0
+ LONG $0x280f4166; BYTE $0xc5 // movapd xmm0, xmm13
+ LONG $0xc5570f66 // xorpd xmm0, xmm5
+ LONG $0xd4280f66 // movapd xmm2, xmm4
+ LONG $0xd5570f66 // xorpd xmm2, xmm5
+ LONG $0x37380f66; BYTE $0xc2 // pcmpgtq xmm0, xmm2
+ LONG $0xf4280f66 // movapd xmm6, xmm4
+ LONG $0x380f4166; WORD $0xf515 // blendvpd xmm6, xmm13, xmm0
+ LONG $0x280f4166; BYTE $0xc3 // movapd xmm0, xmm11
+ LONG $0xc5570f66 // xorpd xmm0, xmm5
+ LONG $0x37380f66; BYTE $0xc8 // pcmpgtq xmm1, xmm0
+ LONG $0xc16f0f66 // movdqa xmm0, xmm1
+ LONG $0x380f4166; WORD $0xdb15 // blendvpd xmm3, xmm11, xmm0
+ LONG $0x570f4166; BYTE $0xea // xorpd xmm5, xmm10
+ LONG $0x37380f66; BYTE $0xd5 // pcmpgtq xmm2, xmm5
+ LONG $0xc26f0f66 // movdqa xmm0, xmm2
+ LONG $0x380f4166; WORD $0xe215 // blendvpd xmm4, xmm10, xmm0
+ LONG $0x280f4466; BYTE $0xd4 // movapd xmm10, xmm4
+ LONG $0x280f4466; BYTE $0xdb // movapd xmm11, xmm3
+ LONG $0x280f4466; BYTE $0xee // movapd xmm13, xmm6
+ LONG $0xf7280f66 // movapd xmm6, xmm7
+
+LBB7_12:
+ LONG $0x4d280f66; BYTE $0x00 // movapd xmm1, oword 0[rbp] /* [rip + .LCPI7_0] */
+ LONG $0xd6280f66 // movapd xmm2, xmm6
+ LONG $0xd1570f66 // xorpd xmm2, xmm1
+ LONG $0x280f4166; BYTE $0xc5 // movapd xmm0, xmm13
+ LONG $0xc1570f66 // xorpd xmm0, xmm1
+ LONG $0x37380f66; BYTE $0xc2 // pcmpgtq xmm0, xmm2
+ LONG $0x380f4166; WORD $0xf515 // blendvpd xmm6, xmm13, xmm0
+ LONG $0xd6700f66; BYTE $0x4e // pshufd xmm2, xmm6, 78
+ LONG $0xc6280f66 // movapd xmm0, xmm6
+ LONG $0xc1570f66 // xorpd xmm0, xmm1
+ LONG $0xda6f0f66 // movdqa xmm3, xmm2
+ LONG $0xd9ef0f66 // pxor xmm3, xmm1
+ LONG $0x37380f66; BYTE $0xc3 // pcmpgtq xmm0, xmm3
+ LONG $0x15380f66; BYTE $0xd6 // blendvpd xmm2, xmm6, xmm0
+ LONG $0x7e0f4866; BYTE $0xd0 // movq rax, xmm2
+ LONG $0x6f0f4166; BYTE $0xd2 // movdqa xmm2, xmm10
+ LONG $0xd1ef0f66 // pxor xmm2, xmm1
+ LONG $0x6f0f4166; BYTE $0xc3 // movdqa xmm0, xmm11
+ LONG $0xc1ef0f66 // pxor xmm0, xmm1
+ LONG $0x37380f66; BYTE $0xc2 // pcmpgtq xmm0, xmm2
+ LONG $0x380f4566; WORD $0xda15 // blendvpd xmm11, xmm10, xmm0
+ LONG $0x700f4166; WORD $0x4ed3 // pshufd xmm2, xmm11, 78
+ LONG $0x6f0f4166; BYTE $0xc3 // movdqa xmm0, xmm11
+ LONG $0xc1ef0f66 // pxor xmm0, xmm1
+ LONG $0xcaef0f66 // pxor xmm1, xmm2
+ LONG $0x37380f66; BYTE $0xc8 // pcmpgtq xmm1, xmm0
+ LONG $0xc16f0f66 // movdqa xmm0, xmm1
+ LONG $0x380f4166; WORD $0xd315 // blendvpd xmm2, xmm11, xmm0
+ LONG $0x7e0f4966; BYTE $0xd0 // movq r8, xmm2
+ WORD $0x394d; BYTE $0xcb // cmp r11, r9
+ JE LBB7_13
+
+LBB7_4:
+ WORD $0x8948; BYTE $0xc6 // mov rsi, rax
+
+LBB7_5:
+ LONG $0xdf048b4a // mov rax, qword [rdi + 8*r11]
+ WORD $0x3949; BYTE $0xc0 // cmp r8, rax
+ LONG $0xc0430f4c // cmovae r8, rax
+ WORD $0x3948; BYTE $0xc6 // cmp rsi, rax
+ LONG $0xc6470f48 // cmova rax, rsi
+ LONG $0x01c38349 // add r11, 1
+ WORD $0x8948; BYTE $0xc6 // mov rsi, rax
+ WORD $0x394d; BYTE $0xd9 // cmp r9, r11
+ JNE LBB7_5
+
+LBB7_13:
+ WORD $0x8948; BYTE $0x01 // mov qword [rcx], rax
+ WORD $0x894c; BYTE $0x02 // mov qword [rdx], r8
+ RET
+
+LBB7_7:
+ LONG $0x570f4566; BYTE $0xed // xorpd xmm13, xmm13
+ LONG $0x760f4566; BYTE $0xd2 // pcmpeqd xmm10, xmm10
+ WORD $0xc031 // xor eax, eax
+ LONG $0x760f4566; BYTE $0xdb // pcmpeqd xmm11, xmm11
+ LONG $0xf6570f66 // xorpd xmm6, xmm6
+ LONG $0x01c0f641 // test r8b, 1
+ JNE LBB7_11
+ JMP LBB7_12
diff --git a/vendor/github.com/apache/arrow/go/v14/internal/utils/transpose_ints.go b/vendor/github.com/apache/arrow/go/v14/internal/utils/transpose_ints.go
new file mode 100644
index 000000000..1666df129
--- /dev/null
+++ b/vendor/github.com/apache/arrow/go/v14/internal/utils/transpose_ints.go
@@ -0,0 +1,407 @@
+// Code generated by transpose_ints.go.tmpl. DO NOT EDIT.
+
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements. See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership. The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License. You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package utils
+
+// when we upgrade to support go1.18, this can be massively simplified by using
+// Go Generics, but since we aren't supporting go1.18 yet, I didn't want to use
+// them here so we can maintain the backwards compatibility.
+
+func transposeInt8Int8(src []int8, dest []int8, transposeMap []int32) {
+ for i, s := range src {
+ dest[i] = int8(transposeMap[s])
+ }
+}
+
+func transposeInt8Uint8(src []int8, dest []uint8, transposeMap []int32) {
+ for i, s := range src {
+ dest[i] = uint8(transposeMap[s])
+ }
+}
+
+func transposeInt8Int16(src []int8, dest []int16, transposeMap []int32) {
+ for i, s := range src {
+ dest[i] = int16(transposeMap[s])
+ }
+}
+
+func transposeInt8Uint16(src []int8, dest []uint16, transposeMap []int32) {
+ for i, s := range src {
+ dest[i] = uint16(transposeMap[s])
+ }
+}
+
+func transposeInt8Int32(src []int8, dest []int32, transposeMap []int32) {
+ for i, s := range src {
+ dest[i] = int32(transposeMap[s])
+ }
+}
+
+func transposeInt8Uint32(src []int8, dest []uint32, transposeMap []int32) {
+ for i, s := range src {
+ dest[i] = uint32(transposeMap[s])
+ }
+}
+
+func transposeInt8Int64(src []int8, dest []int64, transposeMap []int32) {
+ for i, s := range src {
+ dest[i] = int64(transposeMap[s])
+ }
+}
+
+func transposeInt8Uint64(src []int8, dest []uint64, transposeMap []int32) {
+ for i, s := range src {
+ dest[i] = uint64(transposeMap[s])
+ }
+}
+
+func transposeUint8Int8(src []uint8, dest []int8, transposeMap []int32) {
+ for i, s := range src {
+ dest[i] = int8(transposeMap[s])
+ }
+}
+
+func transposeUint8Uint8(src []uint8, dest []uint8, transposeMap []int32) {
+ for i, s := range src {
+ dest[i] = uint8(transposeMap[s])
+ }
+}
+
+func transposeUint8Int16(src []uint8, dest []int16, transposeMap []int32) {
+ for i, s := range src {
+ dest[i] = int16(transposeMap[s])
+ }
+}
+
+func transposeUint8Uint16(src []uint8, dest []uint16, transposeMap []int32) {
+ for i, s := range src {
+ dest[i] = uint16(transposeMap[s])
+ }
+}
+
+func transposeUint8Int32(src []uint8, dest []int32, transposeMap []int32) {
+ for i, s := range src {
+ dest[i] = int32(transposeMap[s])
+ }
+}
+
+func transposeUint8Uint32(src []uint8, dest []uint32, transposeMap []int32) {
+ for i, s := range src {
+ dest[i] = uint32(transposeMap[s])
+ }
+}
+
+func transposeUint8Int64(src []uint8, dest []int64, transposeMap []int32) {
+ for i, s := range src {
+ dest[i] = int64(transposeMap[s])
+ }
+}
+
+func transposeUint8Uint64(src []uint8, dest []uint64, transposeMap []int32) {
+ for i, s := range src {
+ dest[i] = uint64(transposeMap[s])
+ }
+}
+
+func transposeInt16Int8(src []int16, dest []int8, transposeMap []int32) {
+ for i, s := range src {
+ dest[i] = int8(transposeMap[s])
+ }
+}
+
+func transposeInt16Uint8(src []int16, dest []uint8, transposeMap []int32) {
+ for i, s := range src {
+ dest[i] = uint8(transposeMap[s])
+ }
+}
+
+func transposeInt16Int16(src []int16, dest []int16, transposeMap []int32) {
+ for i, s := range src {
+ dest[i] = int16(transposeMap[s])
+ }
+}
+
+func transposeInt16Uint16(src []int16, dest []uint16, transposeMap []int32) {
+ for i, s := range src {
+ dest[i] = uint16(transposeMap[s])
+ }
+}
+
+func transposeInt16Int32(src []int16, dest []int32, transposeMap []int32) {
+ for i, s := range src {
+ dest[i] = int32(transposeMap[s])
+ }
+}
+
+func transposeInt16Uint32(src []int16, dest []uint32, transposeMap []int32) {
+ for i, s := range src {
+ dest[i] = uint32(transposeMap[s])
+ }
+}
+
+func transposeInt16Int64(src []int16, dest []int64, transposeMap []int32) {
+ for i, s := range src {
+ dest[i] = int64(transposeMap[s])
+ }
+}
+
+func transposeInt16Uint64(src []int16, dest []uint64, transposeMap []int32) {
+ for i, s := range src {
+ dest[i] = uint64(transposeMap[s])
+ }
+}
+
+func transposeUint16Int8(src []uint16, dest []int8, transposeMap []int32) {
+ for i, s := range src {
+ dest[i] = int8(transposeMap[s])
+ }
+}
+
+func transposeUint16Uint8(src []uint16, dest []uint8, transposeMap []int32) {
+ for i, s := range src {
+ dest[i] = uint8(transposeMap[s])
+ }
+}
+
+func transposeUint16Int16(src []uint16, dest []int16, transposeMap []int32) {
+ for i, s := range src {
+ dest[i] = int16(transposeMap[s])
+ }
+}
+
+func transposeUint16Uint16(src []uint16, dest []uint16, transposeMap []int32) {
+ for i, s := range src {
+ dest[i] = uint16(transposeMap[s])
+ }
+}
+
+func transposeUint16Int32(src []uint16, dest []int32, transposeMap []int32) {
+ for i, s := range src {
+ dest[i] = int32(transposeMap[s])
+ }
+}
+
+func transposeUint16Uint32(src []uint16, dest []uint32, transposeMap []int32) {
+ for i, s := range src {
+ dest[i] = uint32(transposeMap[s])
+ }
+}
+
+func transposeUint16Int64(src []uint16, dest []int64, transposeMap []int32) {
+ for i, s := range src {
+ dest[i] = int64(transposeMap[s])
+ }
+}
+
+func transposeUint16Uint64(src []uint16, dest []uint64, transposeMap []int32) {
+ for i, s := range src {
+ dest[i] = uint64(transposeMap[s])
+ }
+}
+
+func transposeInt32Int8(src []int32, dest []int8, transposeMap []int32) {
+ for i, s := range src {
+ dest[i] = int8(transposeMap[s])
+ }
+}
+
+func transposeInt32Uint8(src []int32, dest []uint8, transposeMap []int32) {
+ for i, s := range src {
+ dest[i] = uint8(transposeMap[s])
+ }
+}
+
+func transposeInt32Int16(src []int32, dest []int16, transposeMap []int32) {
+ for i, s := range src {
+ dest[i] = int16(transposeMap[s])
+ }
+}
+
+func transposeInt32Uint16(src []int32, dest []uint16, transposeMap []int32) {
+ for i, s := range src {
+ dest[i] = uint16(transposeMap[s])
+ }
+}
+
+func transposeInt32Int32(src []int32, dest []int32, transposeMap []int32) {
+ for i, s := range src {
+ dest[i] = int32(transposeMap[s])
+ }
+}
+
+func transposeInt32Uint32(src []int32, dest []uint32, transposeMap []int32) {
+ for i, s := range src {
+ dest[i] = uint32(transposeMap[s])
+ }
+}
+
+func transposeInt32Int64(src []int32, dest []int64, transposeMap []int32) {
+ for i, s := range src {
+ dest[i] = int64(transposeMap[s])
+ }
+}
+
+func transposeInt32Uint64(src []int32, dest []uint64, transposeMap []int32) {
+ for i, s := range src {
+ dest[i] = uint64(transposeMap[s])
+ }
+}
+
+func transposeUint32Int8(src []uint32, dest []int8, transposeMap []int32) {
+ for i, s := range src {
+ dest[i] = int8(transposeMap[s])
+ }
+}
+
+func transposeUint32Uint8(src []uint32, dest []uint8, transposeMap []int32) {
+ for i, s := range src {
+ dest[i] = uint8(transposeMap[s])
+ }
+}
+
+func transposeUint32Int16(src []uint32, dest []int16, transposeMap []int32) {
+ for i, s := range src {
+ dest[i] = int16(transposeMap[s])
+ }
+}
+
+func transposeUint32Uint16(src []uint32, dest []uint16, transposeMap []int32) {
+ for i, s := range src {
+ dest[i] = uint16(transposeMap[s])
+ }
+}
+
+func transposeUint32Int32(src []uint32, dest []int32, transposeMap []int32) {
+ for i, s := range src {
+ dest[i] = int32(transposeMap[s])
+ }
+}
+
+func transposeUint32Uint32(src []uint32, dest []uint32, transposeMap []int32) {
+ for i, s := range src {
+ dest[i] = uint32(transposeMap[s])
+ }
+}
+
+func transposeUint32Int64(src []uint32, dest []int64, transposeMap []int32) {
+ for i, s := range src {
+ dest[i] = int64(transposeMap[s])
+ }
+}
+
+func transposeUint32Uint64(src []uint32, dest []uint64, transposeMap []int32) {
+ for i, s := range src {
+ dest[i] = uint64(transposeMap[s])
+ }
+}
+
+func transposeInt64Int8(src []int64, dest []int8, transposeMap []int32) {
+ for i, s := range src {
+ dest[i] = int8(transposeMap[s])
+ }
+}
+
+func transposeInt64Uint8(src []int64, dest []uint8, transposeMap []int32) {
+ for i, s := range src {
+ dest[i] = uint8(transposeMap[s])
+ }
+}
+
+func transposeInt64Int16(src []int64, dest []int16, transposeMap []int32) {
+ for i, s := range src {
+ dest[i] = int16(transposeMap[s])
+ }
+}
+
+func transposeInt64Uint16(src []int64, dest []uint16, transposeMap []int32) {
+ for i, s := range src {
+ dest[i] = uint16(transposeMap[s])
+ }
+}
+
+func transposeInt64Int32(src []int64, dest []int32, transposeMap []int32) {
+ for i, s := range src {
+ dest[i] = int32(transposeMap[s])
+ }
+}
+
+func transposeInt64Uint32(src []int64, dest []uint32, transposeMap []int32) {
+ for i, s := range src {
+ dest[i] = uint32(transposeMap[s])
+ }
+}
+
+func transposeInt64Int64(src []int64, dest []int64, transposeMap []int32) {
+ for i, s := range src {
+ dest[i] = int64(transposeMap[s])
+ }
+}
+
+func transposeInt64Uint64(src []int64, dest []uint64, transposeMap []int32) {
+ for i, s := range src {
+ dest[i] = uint64(transposeMap[s])
+ }
+}
+
+func transposeUint64Int8(src []uint64, dest []int8, transposeMap []int32) {
+ for i, s := range src {
+ dest[i] = int8(transposeMap[s])
+ }
+}
+
+func transposeUint64Uint8(src []uint64, dest []uint8, transposeMap []int32) {
+ for i, s := range src {
+ dest[i] = uint8(transposeMap[s])
+ }
+}
+
+func transposeUint64Int16(src []uint64, dest []int16, transposeMap []int32) {
+ for i, s := range src {
+ dest[i] = int16(transposeMap[s])
+ }
+}
+
+func transposeUint64Uint16(src []uint64, dest []uint16, transposeMap []int32) {
+ for i, s := range src {
+ dest[i] = uint16(transposeMap[s])
+ }
+}
+
+func transposeUint64Int32(src []uint64, dest []int32, transposeMap []int32) {
+ for i, s := range src {
+ dest[i] = int32(transposeMap[s])
+ }
+}
+
+func transposeUint64Uint32(src []uint64, dest []uint32, transposeMap []int32) {
+ for i, s := range src {
+ dest[i] = uint32(transposeMap[s])
+ }
+}
+
+func transposeUint64Int64(src []uint64, dest []int64, transposeMap []int32) {
+ for i, s := range src {
+ dest[i] = int64(transposeMap[s])
+ }
+}
+
+func transposeUint64Uint64(src []uint64, dest []uint64, transposeMap []int32) {
+ for i, s := range src {
+ dest[i] = uint64(transposeMap[s])
+ }
+}
diff --git a/vendor/github.com/apache/arrow/go/v14/internal/utils/transpose_ints.go.tmpl b/vendor/github.com/apache/arrow/go/v14/internal/utils/transpose_ints.go.tmpl
new file mode 100644
index 000000000..680ae1ee7
--- /dev/null
+++ b/vendor/github.com/apache/arrow/go/v14/internal/utils/transpose_ints.go.tmpl
@@ -0,0 +1,34 @@
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements. See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership. The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License. You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package utils
+
+{{ $typelist := .In }}
+{{range .In}}
+{{ $src := .Type }}
+{{ $srcName := .Name }}
+{{ range $typelist }}
+{{ $dest := .Type }}
+{{ $destName := .Name }}
+
+func transpose{{ $srcName }}{{ $destName }}(src []{{$src}}, dest []{{$dest}}, transposeMap []int32) {
+ for i, s := range src {
+ dest[i] = {{ $dest }}(transposeMap[s])
+ }
+}
+
+{{ end }}
+{{ end }}
diff --git a/vendor/github.com/apache/arrow/go/v14/internal/utils/transpose_ints.tmpldata b/vendor/github.com/apache/arrow/go/v14/internal/utils/transpose_ints.tmpldata
new file mode 100644
index 000000000..72eaf300c
--- /dev/null
+++ b/vendor/github.com/apache/arrow/go/v14/internal/utils/transpose_ints.tmpldata
@@ -0,0 +1,34 @@
+[
+ {
+ "Name": "Int8",
+ "Type": "int8"
+ },
+ {
+ "Name": "Uint8",
+ "Type": "uint8"
+ },
+ {
+ "Name": "Int16",
+ "Type": "int16"
+ },
+ {
+ "Name": "Uint16",
+ "Type": "uint16"
+ },
+ {
+ "Name": "Int32",
+ "Type": "int32"
+ },
+ {
+ "Name": "Uint32",
+ "Type": "uint32"
+ },
+ {
+ "Name": "Int64",
+ "Type": "int64"
+ },
+ {
+ "Name": "Uint64",
+ "Type": "uint64"
+ }
+]
diff --git a/vendor/github.com/apache/arrow/go/v14/internal/utils/transpose_ints_amd64.go b/vendor/github.com/apache/arrow/go/v14/internal/utils/transpose_ints_amd64.go
new file mode 100644
index 000000000..d4433d368
--- /dev/null
+++ b/vendor/github.com/apache/arrow/go/v14/internal/utils/transpose_ints_amd64.go
@@ -0,0 +1,325 @@
+// Code generated by transpose_ints_amd64.go.tmpl. DO NOT EDIT.
+
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements. See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership. The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License. You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+//go:build !noasm
+
+package utils
+
+import (
+ "golang.org/x/sys/cpu"
+)
+
+var (
+ TransposeInt8Int8 func([]int8, []int8, []int32)
+ TransposeInt8Uint8 func([]int8, []uint8, []int32)
+ TransposeInt8Int16 func([]int8, []int16, []int32)
+ TransposeInt8Uint16 func([]int8, []uint16, []int32)
+ TransposeInt8Int32 func([]int8, []int32, []int32)
+ TransposeInt8Uint32 func([]int8, []uint32, []int32)
+ TransposeInt8Int64 func([]int8, []int64, []int32)
+ TransposeInt8Uint64 func([]int8, []uint64, []int32)
+
+ TransposeUint8Int8 func([]uint8, []int8, []int32)
+ TransposeUint8Uint8 func([]uint8, []uint8, []int32)
+ TransposeUint8Int16 func([]uint8, []int16, []int32)
+ TransposeUint8Uint16 func([]uint8, []uint16, []int32)
+ TransposeUint8Int32 func([]uint8, []int32, []int32)
+ TransposeUint8Uint32 func([]uint8, []uint32, []int32)
+ TransposeUint8Int64 func([]uint8, []int64, []int32)
+ TransposeUint8Uint64 func([]uint8, []uint64, []int32)
+
+ TransposeInt16Int8 func([]int16, []int8, []int32)
+ TransposeInt16Uint8 func([]int16, []uint8, []int32)
+ TransposeInt16Int16 func([]int16, []int16, []int32)
+ TransposeInt16Uint16 func([]int16, []uint16, []int32)
+ TransposeInt16Int32 func([]int16, []int32, []int32)
+ TransposeInt16Uint32 func([]int16, []uint32, []int32)
+ TransposeInt16Int64 func([]int16, []int64, []int32)
+ TransposeInt16Uint64 func([]int16, []uint64, []int32)
+
+ TransposeUint16Int8 func([]uint16, []int8, []int32)
+ TransposeUint16Uint8 func([]uint16, []uint8, []int32)
+ TransposeUint16Int16 func([]uint16, []int16, []int32)
+ TransposeUint16Uint16 func([]uint16, []uint16, []int32)
+ TransposeUint16Int32 func([]uint16, []int32, []int32)
+ TransposeUint16Uint32 func([]uint16, []uint32, []int32)
+ TransposeUint16Int64 func([]uint16, []int64, []int32)
+ TransposeUint16Uint64 func([]uint16, []uint64, []int32)
+
+ TransposeInt32Int8 func([]int32, []int8, []int32)
+ TransposeInt32Uint8 func([]int32, []uint8, []int32)
+ TransposeInt32Int16 func([]int32, []int16, []int32)
+ TransposeInt32Uint16 func([]int32, []uint16, []int32)
+ TransposeInt32Int32 func([]int32, []int32, []int32)
+ TransposeInt32Uint32 func([]int32, []uint32, []int32)
+ TransposeInt32Int64 func([]int32, []int64, []int32)
+ TransposeInt32Uint64 func([]int32, []uint64, []int32)
+
+ TransposeUint32Int8 func([]uint32, []int8, []int32)
+ TransposeUint32Uint8 func([]uint32, []uint8, []int32)
+ TransposeUint32Int16 func([]uint32, []int16, []int32)
+ TransposeUint32Uint16 func([]uint32, []uint16, []int32)
+ TransposeUint32Int32 func([]uint32, []int32, []int32)
+ TransposeUint32Uint32 func([]uint32, []uint32, []int32)
+ TransposeUint32Int64 func([]uint32, []int64, []int32)
+ TransposeUint32Uint64 func([]uint32, []uint64, []int32)
+
+ TransposeInt64Int8 func([]int64, []int8, []int32)
+ TransposeInt64Uint8 func([]int64, []uint8, []int32)
+ TransposeInt64Int16 func([]int64, []int16, []int32)
+ TransposeInt64Uint16 func([]int64, []uint16, []int32)
+ TransposeInt64Int32 func([]int64, []int32, []int32)
+ TransposeInt64Uint32 func([]int64, []uint32, []int32)
+ TransposeInt64Int64 func([]int64, []int64, []int32)
+ TransposeInt64Uint64 func([]int64, []uint64, []int32)
+
+ TransposeUint64Int8 func([]uint64, []int8, []int32)
+ TransposeUint64Uint8 func([]uint64, []uint8, []int32)
+ TransposeUint64Int16 func([]uint64, []int16, []int32)
+ TransposeUint64Uint16 func([]uint64, []uint16, []int32)
+ TransposeUint64Int32 func([]uint64, []int32, []int32)
+ TransposeUint64Uint32 func([]uint64, []uint32, []int32)
+ TransposeUint64Int64 func([]uint64, []int64, []int32)
+ TransposeUint64Uint64 func([]uint64, []uint64, []int32)
+)
+
+func init() {
+ if cpu.X86.HasAVX2 {
+
+ TransposeInt8Int8 = transposeInt8Int8avx2
+ TransposeInt8Uint8 = transposeInt8Uint8avx2
+ TransposeInt8Int16 = transposeInt8Int16avx2
+ TransposeInt8Uint16 = transposeInt8Uint16avx2
+ TransposeInt8Int32 = transposeInt8Int32avx2
+ TransposeInt8Uint32 = transposeInt8Uint32avx2
+ TransposeInt8Int64 = transposeInt8Int64avx2
+ TransposeInt8Uint64 = transposeInt8Uint64avx2
+
+ TransposeUint8Int8 = transposeUint8Int8avx2
+ TransposeUint8Uint8 = transposeUint8Uint8avx2
+ TransposeUint8Int16 = transposeUint8Int16avx2
+ TransposeUint8Uint16 = transposeUint8Uint16avx2
+ TransposeUint8Int32 = transposeUint8Int32avx2
+ TransposeUint8Uint32 = transposeUint8Uint32avx2
+ TransposeUint8Int64 = transposeUint8Int64avx2
+ TransposeUint8Uint64 = transposeUint8Uint64avx2
+
+ TransposeInt16Int8 = transposeInt16Int8avx2
+ TransposeInt16Uint8 = transposeInt16Uint8avx2
+ TransposeInt16Int16 = transposeInt16Int16avx2
+ TransposeInt16Uint16 = transposeInt16Uint16avx2
+ TransposeInt16Int32 = transposeInt16Int32avx2
+ TransposeInt16Uint32 = transposeInt16Uint32avx2
+ TransposeInt16Int64 = transposeInt16Int64avx2
+ TransposeInt16Uint64 = transposeInt16Uint64avx2
+
+ TransposeUint16Int8 = transposeUint16Int8avx2
+ TransposeUint16Uint8 = transposeUint16Uint8avx2
+ TransposeUint16Int16 = transposeUint16Int16avx2
+ TransposeUint16Uint16 = transposeUint16Uint16avx2
+ TransposeUint16Int32 = transposeUint16Int32avx2
+ TransposeUint16Uint32 = transposeUint16Uint32avx2
+ TransposeUint16Int64 = transposeUint16Int64avx2
+ TransposeUint16Uint64 = transposeUint16Uint64avx2
+
+ TransposeInt32Int8 = transposeInt32Int8avx2
+ TransposeInt32Uint8 = transposeInt32Uint8avx2
+ TransposeInt32Int16 = transposeInt32Int16avx2
+ TransposeInt32Uint16 = transposeInt32Uint16avx2
+ TransposeInt32Int32 = transposeInt32Int32avx2
+ TransposeInt32Uint32 = transposeInt32Uint32avx2
+ TransposeInt32Int64 = transposeInt32Int64avx2
+ TransposeInt32Uint64 = transposeInt32Uint64avx2
+
+ TransposeUint32Int8 = transposeUint32Int8avx2
+ TransposeUint32Uint8 = transposeUint32Uint8avx2
+ TransposeUint32Int16 = transposeUint32Int16avx2
+ TransposeUint32Uint16 = transposeUint32Uint16avx2
+ TransposeUint32Int32 = transposeUint32Int32avx2
+ TransposeUint32Uint32 = transposeUint32Uint32avx2
+ TransposeUint32Int64 = transposeUint32Int64avx2
+ TransposeUint32Uint64 = transposeUint32Uint64avx2
+
+ TransposeInt64Int8 = transposeInt64Int8avx2
+ TransposeInt64Uint8 = transposeInt64Uint8avx2
+ TransposeInt64Int16 = transposeInt64Int16avx2
+ TransposeInt64Uint16 = transposeInt64Uint16avx2
+ TransposeInt64Int32 = transposeInt64Int32avx2
+ TransposeInt64Uint32 = transposeInt64Uint32avx2
+ TransposeInt64Int64 = transposeInt64Int64avx2
+ TransposeInt64Uint64 = transposeInt64Uint64avx2
+
+ TransposeUint64Int8 = transposeUint64Int8avx2
+ TransposeUint64Uint8 = transposeUint64Uint8avx2
+ TransposeUint64Int16 = transposeUint64Int16avx2
+ TransposeUint64Uint16 = transposeUint64Uint16avx2
+ TransposeUint64Int32 = transposeUint64Int32avx2
+ TransposeUint64Uint32 = transposeUint64Uint32avx2
+ TransposeUint64Int64 = transposeUint64Int64avx2
+ TransposeUint64Uint64 = transposeUint64Uint64avx2
+
+ } else if cpu.X86.HasSSE42 {
+
+ TransposeInt8Int8 = transposeInt8Int8sse4
+ TransposeInt8Uint8 = transposeInt8Uint8sse4
+ TransposeInt8Int16 = transposeInt8Int16sse4
+ TransposeInt8Uint16 = transposeInt8Uint16sse4
+ TransposeInt8Int32 = transposeInt8Int32sse4
+ TransposeInt8Uint32 = transposeInt8Uint32sse4
+ TransposeInt8Int64 = transposeInt8Int64sse4
+ TransposeInt8Uint64 = transposeInt8Uint64sse4
+
+ TransposeUint8Int8 = transposeUint8Int8sse4
+ TransposeUint8Uint8 = transposeUint8Uint8sse4
+ TransposeUint8Int16 = transposeUint8Int16sse4
+ TransposeUint8Uint16 = transposeUint8Uint16sse4
+ TransposeUint8Int32 = transposeUint8Int32sse4
+ TransposeUint8Uint32 = transposeUint8Uint32sse4
+ TransposeUint8Int64 = transposeUint8Int64sse4
+ TransposeUint8Uint64 = transposeUint8Uint64sse4
+
+ TransposeInt16Int8 = transposeInt16Int8sse4
+ TransposeInt16Uint8 = transposeInt16Uint8sse4
+ TransposeInt16Int16 = transposeInt16Int16sse4
+ TransposeInt16Uint16 = transposeInt16Uint16sse4
+ TransposeInt16Int32 = transposeInt16Int32sse4
+ TransposeInt16Uint32 = transposeInt16Uint32sse4
+ TransposeInt16Int64 = transposeInt16Int64sse4
+ TransposeInt16Uint64 = transposeInt16Uint64sse4
+
+ TransposeUint16Int8 = transposeUint16Int8sse4
+ TransposeUint16Uint8 = transposeUint16Uint8sse4
+ TransposeUint16Int16 = transposeUint16Int16sse4
+ TransposeUint16Uint16 = transposeUint16Uint16sse4
+ TransposeUint16Int32 = transposeUint16Int32sse4
+ TransposeUint16Uint32 = transposeUint16Uint32sse4
+ TransposeUint16Int64 = transposeUint16Int64sse4
+ TransposeUint16Uint64 = transposeUint16Uint64sse4
+
+ TransposeInt32Int8 = transposeInt32Int8sse4
+ TransposeInt32Uint8 = transposeInt32Uint8sse4
+ TransposeInt32Int16 = transposeInt32Int16sse4
+ TransposeInt32Uint16 = transposeInt32Uint16sse4
+ TransposeInt32Int32 = transposeInt32Int32sse4
+ TransposeInt32Uint32 = transposeInt32Uint32sse4
+ TransposeInt32Int64 = transposeInt32Int64sse4
+ TransposeInt32Uint64 = transposeInt32Uint64sse4
+
+ TransposeUint32Int8 = transposeUint32Int8sse4
+ TransposeUint32Uint8 = transposeUint32Uint8sse4
+ TransposeUint32Int16 = transposeUint32Int16sse4
+ TransposeUint32Uint16 = transposeUint32Uint16sse4
+ TransposeUint32Int32 = transposeUint32Int32sse4
+ TransposeUint32Uint32 = transposeUint32Uint32sse4
+ TransposeUint32Int64 = transposeUint32Int64sse4
+ TransposeUint32Uint64 = transposeUint32Uint64sse4
+
+ TransposeInt64Int8 = transposeInt64Int8sse4
+ TransposeInt64Uint8 = transposeInt64Uint8sse4
+ TransposeInt64Int16 = transposeInt64Int16sse4
+ TransposeInt64Uint16 = transposeInt64Uint16sse4
+ TransposeInt64Int32 = transposeInt64Int32sse4
+ TransposeInt64Uint32 = transposeInt64Uint32sse4
+ TransposeInt64Int64 = transposeInt64Int64sse4
+ TransposeInt64Uint64 = transposeInt64Uint64sse4
+
+ TransposeUint64Int8 = transposeUint64Int8sse4
+ TransposeUint64Uint8 = transposeUint64Uint8sse4
+ TransposeUint64Int16 = transposeUint64Int16sse4
+ TransposeUint64Uint16 = transposeUint64Uint16sse4
+ TransposeUint64Int32 = transposeUint64Int32sse4
+ TransposeUint64Uint32 = transposeUint64Uint32sse4
+ TransposeUint64Int64 = transposeUint64Int64sse4
+ TransposeUint64Uint64 = transposeUint64Uint64sse4
+
+ } else {
+
+ TransposeInt8Int8 = transposeInt8Int8
+ TransposeInt8Uint8 = transposeInt8Uint8
+ TransposeInt8Int16 = transposeInt8Int16
+ TransposeInt8Uint16 = transposeInt8Uint16
+ TransposeInt8Int32 = transposeInt8Int32
+ TransposeInt8Uint32 = transposeInt8Uint32
+ TransposeInt8Int64 = transposeInt8Int64
+ TransposeInt8Uint64 = transposeInt8Uint64
+
+ TransposeUint8Int8 = transposeUint8Int8
+ TransposeUint8Uint8 = transposeUint8Uint8
+ TransposeUint8Int16 = transposeUint8Int16
+ TransposeUint8Uint16 = transposeUint8Uint16
+ TransposeUint8Int32 = transposeUint8Int32
+ TransposeUint8Uint32 = transposeUint8Uint32
+ TransposeUint8Int64 = transposeUint8Int64
+ TransposeUint8Uint64 = transposeUint8Uint64
+
+ TransposeInt16Int8 = transposeInt16Int8
+ TransposeInt16Uint8 = transposeInt16Uint8
+ TransposeInt16Int16 = transposeInt16Int16
+ TransposeInt16Uint16 = transposeInt16Uint16
+ TransposeInt16Int32 = transposeInt16Int32
+ TransposeInt16Uint32 = transposeInt16Uint32
+ TransposeInt16Int64 = transposeInt16Int64
+ TransposeInt16Uint64 = transposeInt16Uint64
+
+ TransposeUint16Int8 = transposeUint16Int8
+ TransposeUint16Uint8 = transposeUint16Uint8
+ TransposeUint16Int16 = transposeUint16Int16
+ TransposeUint16Uint16 = transposeUint16Uint16
+ TransposeUint16Int32 = transposeUint16Int32
+ TransposeUint16Uint32 = transposeUint16Uint32
+ TransposeUint16Int64 = transposeUint16Int64
+ TransposeUint16Uint64 = transposeUint16Uint64
+
+ TransposeInt32Int8 = transposeInt32Int8
+ TransposeInt32Uint8 = transposeInt32Uint8
+ TransposeInt32Int16 = transposeInt32Int16
+ TransposeInt32Uint16 = transposeInt32Uint16
+ TransposeInt32Int32 = transposeInt32Int32
+ TransposeInt32Uint32 = transposeInt32Uint32
+ TransposeInt32Int64 = transposeInt32Int64
+ TransposeInt32Uint64 = transposeInt32Uint64
+
+ TransposeUint32Int8 = transposeUint32Int8
+ TransposeUint32Uint8 = transposeUint32Uint8
+ TransposeUint32Int16 = transposeUint32Int16
+ TransposeUint32Uint16 = transposeUint32Uint16
+ TransposeUint32Int32 = transposeUint32Int32
+ TransposeUint32Uint32 = transposeUint32Uint32
+ TransposeUint32Int64 = transposeUint32Int64
+ TransposeUint32Uint64 = transposeUint32Uint64
+
+ TransposeInt64Int8 = transposeInt64Int8
+ TransposeInt64Uint8 = transposeInt64Uint8
+ TransposeInt64Int16 = transposeInt64Int16
+ TransposeInt64Uint16 = transposeInt64Uint16
+ TransposeInt64Int32 = transposeInt64Int32
+ TransposeInt64Uint32 = transposeInt64Uint32
+ TransposeInt64Int64 = transposeInt64Int64
+ TransposeInt64Uint64 = transposeInt64Uint64
+
+ TransposeUint64Int8 = transposeUint64Int8
+ TransposeUint64Uint8 = transposeUint64Uint8
+ TransposeUint64Int16 = transposeUint64Int16
+ TransposeUint64Uint16 = transposeUint64Uint16
+ TransposeUint64Int32 = transposeUint64Int32
+ TransposeUint64Uint32 = transposeUint64Uint32
+ TransposeUint64Int64 = transposeUint64Int64
+ TransposeUint64Uint64 = transposeUint64Uint64
+
+ }
+}
diff --git a/vendor/github.com/apache/arrow/go/v14/internal/utils/transpose_ints_amd64.go.tmpl b/vendor/github.com/apache/arrow/go/v14/internal/utils/transpose_ints_amd64.go.tmpl
new file mode 100644
index 000000000..eac0208e5
--- /dev/null
+++ b/vendor/github.com/apache/arrow/go/v14/internal/utils/transpose_ints_amd64.go.tmpl
@@ -0,0 +1,75 @@
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements. See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership. The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License. You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+//go:build !noasm
+// +build !noasm
+
+package utils
+
+import (
+ "golang.org/x/sys/cpu"
+)
+
+var (
+{{ $typelist := .In }}
+{{range .In}}
+{{ $src := .Type -}}
+{{ $srcName := .Name -}}
+{{ range $typelist -}}
+{{ $dest := .Type -}}
+{{ $destName := .Name -}}
+ Transpose{{$srcName}}{{$destName}} func([]{{$src}}, []{{$dest}}, []int32)
+{{end}}
+{{end}}
+)
+
+
+func init() {
+ if cpu.X86.HasAVX2 {
+{{ $typelist := .In }}
+{{range .In}}
+{{ $src := .Type -}}
+{{ $srcName := .Name -}}
+{{ range $typelist -}}
+{{ $dest := .Type -}}
+{{ $destName := .Name -}}
+ Transpose{{$srcName}}{{$destName}} = transpose{{ $srcName }}{{ $destName }}avx2
+{{end}}
+{{end}}
+ } else if cpu.X86.HasSSE42 {
+{{ $typelist := .In }}
+{{range .In}}
+{{ $src := .Type -}}
+{{ $srcName := .Name -}}
+{{ range $typelist -}}
+{{ $dest := .Type -}}
+{{ $destName := .Name -}}
+ Transpose{{$srcName}}{{$destName}} = transpose{{ $srcName }}{{ $destName }}sse4
+{{end}}
+{{end}}
+ } else {
+{{ $typelist := .In }}
+{{range .In}}
+{{ $src := .Type -}}
+{{ $srcName := .Name -}}
+{{ range $typelist -}}
+{{ $dest := .Type -}}
+{{ $destName := .Name -}}
+ Transpose{{$srcName}}{{$destName}} = transpose{{ $srcName }}{{ $destName }}
+{{end}}
+{{end}}
+ }
+}
diff --git a/vendor/github.com/apache/arrow/go/v14/internal/utils/transpose_ints_arm64.go b/vendor/github.com/apache/arrow/go/v14/internal/utils/transpose_ints_arm64.go
new file mode 100644
index 000000000..cc957cdaa
--- /dev/null
+++ b/vendor/github.com/apache/arrow/go/v14/internal/utils/transpose_ints_arm64.go
@@ -0,0 +1,96 @@
+// Code generated by transpose_ints_s390x.go.tmpl. DO NOT EDIT.
+
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements. See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership. The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License. You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+//go:build !noasm
+
+package utils
+
+// if building with the 'noasm' tag, then point to the pure go implementations
+var (
+ TransposeInt8Int8 = transposeInt8Int8
+ TransposeInt8Uint8 = transposeInt8Uint8
+ TransposeInt8Int16 = transposeInt8Int16
+ TransposeInt8Uint16 = transposeInt8Uint16
+ TransposeInt8Int32 = transposeInt8Int32
+ TransposeInt8Uint32 = transposeInt8Uint32
+ TransposeInt8Int64 = transposeInt8Int64
+ TransposeInt8Uint64 = transposeInt8Uint64
+
+ TransposeUint8Int8 = transposeUint8Int8
+ TransposeUint8Uint8 = transposeUint8Uint8
+ TransposeUint8Int16 = transposeUint8Int16
+ TransposeUint8Uint16 = transposeUint8Uint16
+ TransposeUint8Int32 = transposeUint8Int32
+ TransposeUint8Uint32 = transposeUint8Uint32
+ TransposeUint8Int64 = transposeUint8Int64
+ TransposeUint8Uint64 = transposeUint8Uint64
+
+ TransposeInt16Int8 = transposeInt16Int8
+ TransposeInt16Uint8 = transposeInt16Uint8
+ TransposeInt16Int16 = transposeInt16Int16
+ TransposeInt16Uint16 = transposeInt16Uint16
+ TransposeInt16Int32 = transposeInt16Int32
+ TransposeInt16Uint32 = transposeInt16Uint32
+ TransposeInt16Int64 = transposeInt16Int64
+ TransposeInt16Uint64 = transposeInt16Uint64
+
+ TransposeUint16Int8 = transposeUint16Int8
+ TransposeUint16Uint8 = transposeUint16Uint8
+ TransposeUint16Int16 = transposeUint16Int16
+ TransposeUint16Uint16 = transposeUint16Uint16
+ TransposeUint16Int32 = transposeUint16Int32
+ TransposeUint16Uint32 = transposeUint16Uint32
+ TransposeUint16Int64 = transposeUint16Int64
+ TransposeUint16Uint64 = transposeUint16Uint64
+
+ TransposeInt32Int8 = transposeInt32Int8
+ TransposeInt32Uint8 = transposeInt32Uint8
+ TransposeInt32Int16 = transposeInt32Int16
+ TransposeInt32Uint16 = transposeInt32Uint16
+ TransposeInt32Int32 = transposeInt32Int32
+ TransposeInt32Uint32 = transposeInt32Uint32
+ TransposeInt32Int64 = transposeInt32Int64
+ TransposeInt32Uint64 = transposeInt32Uint64
+
+ TransposeUint32Int8 = transposeUint32Int8
+ TransposeUint32Uint8 = transposeUint32Uint8
+ TransposeUint32Int16 = transposeUint32Int16
+ TransposeUint32Uint16 = transposeUint32Uint16
+ TransposeUint32Int32 = transposeUint32Int32
+ TransposeUint32Uint32 = transposeUint32Uint32
+ TransposeUint32Int64 = transposeUint32Int64
+ TransposeUint32Uint64 = transposeUint32Uint64
+
+ TransposeInt64Int8 = transposeInt64Int8
+ TransposeInt64Uint8 = transposeInt64Uint8
+ TransposeInt64Int16 = transposeInt64Int16
+ TransposeInt64Uint16 = transposeInt64Uint16
+ TransposeInt64Int32 = transposeInt64Int32
+ TransposeInt64Uint32 = transposeInt64Uint32
+ TransposeInt64Int64 = transposeInt64Int64
+ TransposeInt64Uint64 = transposeInt64Uint64
+
+ TransposeUint64Int8 = transposeUint64Int8
+ TransposeUint64Uint8 = transposeUint64Uint8
+ TransposeUint64Int16 = transposeUint64Int16
+ TransposeUint64Uint16 = transposeUint64Uint16
+ TransposeUint64Int32 = transposeUint64Int32
+ TransposeUint64Uint32 = transposeUint64Uint32
+ TransposeUint64Int64 = transposeUint64Int64
+ TransposeUint64Uint64 = transposeUint64Uint64
+)
diff --git a/vendor/github.com/apache/arrow/go/v14/internal/utils/transpose_ints_avx2_amd64.go b/vendor/github.com/apache/arrow/go/v14/internal/utils/transpose_ints_avx2_amd64.go
new file mode 100644
index 000000000..f1421ddf5
--- /dev/null
+++ b/vendor/github.com/apache/arrow/go/v14/internal/utils/transpose_ints_avx2_amd64.go
@@ -0,0 +1,473 @@
+// Code generated by transpose_ints_simd.go.tmpl. DO NOT EDIT.
+
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements. See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership. The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License. You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+//go:build !noasm
+
+package utils
+
+import (
+ "unsafe"
+)
+
+//go:noescape
+func _transpose_int8_int8_avx2(src, dest unsafe.Pointer, length int, transposeMap unsafe.Pointer)
+
+func transposeInt8Int8avx2(src []int8, dest []int8, transposeMap []int32) {
+ _transpose_int8_int8_avx2(unsafe.Pointer(&src[0]), unsafe.Pointer(&dest[0]), len(dest), unsafe.Pointer(&transposeMap[0]))
+}
+
+//go:noescape
+func _transpose_int8_uint8_avx2(src, dest unsafe.Pointer, length int, transposeMap unsafe.Pointer)
+
+func transposeInt8Uint8avx2(src []int8, dest []uint8, transposeMap []int32) {
+ _transpose_int8_uint8_avx2(unsafe.Pointer(&src[0]), unsafe.Pointer(&dest[0]), len(dest), unsafe.Pointer(&transposeMap[0]))
+}
+
+//go:noescape
+func _transpose_int8_int16_avx2(src, dest unsafe.Pointer, length int, transposeMap unsafe.Pointer)
+
+func transposeInt8Int16avx2(src []int8, dest []int16, transposeMap []int32) {
+ _transpose_int8_int16_avx2(unsafe.Pointer(&src[0]), unsafe.Pointer(&dest[0]), len(dest), unsafe.Pointer(&transposeMap[0]))
+}
+
+//go:noescape
+func _transpose_int8_uint16_avx2(src, dest unsafe.Pointer, length int, transposeMap unsafe.Pointer)
+
+func transposeInt8Uint16avx2(src []int8, dest []uint16, transposeMap []int32) {
+ _transpose_int8_uint16_avx2(unsafe.Pointer(&src[0]), unsafe.Pointer(&dest[0]), len(dest), unsafe.Pointer(&transposeMap[0]))
+}
+
+//go:noescape
+func _transpose_int8_int32_avx2(src, dest unsafe.Pointer, length int, transposeMap unsafe.Pointer)
+
+func transposeInt8Int32avx2(src []int8, dest []int32, transposeMap []int32) {
+ _transpose_int8_int32_avx2(unsafe.Pointer(&src[0]), unsafe.Pointer(&dest[0]), len(dest), unsafe.Pointer(&transposeMap[0]))
+}
+
+//go:noescape
+func _transpose_int8_uint32_avx2(src, dest unsafe.Pointer, length int, transposeMap unsafe.Pointer)
+
+func transposeInt8Uint32avx2(src []int8, dest []uint32, transposeMap []int32) {
+ _transpose_int8_uint32_avx2(unsafe.Pointer(&src[0]), unsafe.Pointer(&dest[0]), len(dest), unsafe.Pointer(&transposeMap[0]))
+}
+
+//go:noescape
+func _transpose_int8_int64_avx2(src, dest unsafe.Pointer, length int, transposeMap unsafe.Pointer)
+
+func transposeInt8Int64avx2(src []int8, dest []int64, transposeMap []int32) {
+ _transpose_int8_int64_avx2(unsafe.Pointer(&src[0]), unsafe.Pointer(&dest[0]), len(dest), unsafe.Pointer(&transposeMap[0]))
+}
+
+//go:noescape
+func _transpose_int8_uint64_avx2(src, dest unsafe.Pointer, length int, transposeMap unsafe.Pointer)
+
+func transposeInt8Uint64avx2(src []int8, dest []uint64, transposeMap []int32) {
+ _transpose_int8_uint64_avx2(unsafe.Pointer(&src[0]), unsafe.Pointer(&dest[0]), len(dest), unsafe.Pointer(&transposeMap[0]))
+}
+
+//go:noescape
+func _transpose_uint8_int8_avx2(src, dest unsafe.Pointer, length int, transposeMap unsafe.Pointer)
+
+func transposeUint8Int8avx2(src []uint8, dest []int8, transposeMap []int32) {
+ _transpose_uint8_int8_avx2(unsafe.Pointer(&src[0]), unsafe.Pointer(&dest[0]), len(dest), unsafe.Pointer(&transposeMap[0]))
+}
+
+//go:noescape
+func _transpose_uint8_uint8_avx2(src, dest unsafe.Pointer, length int, transposeMap unsafe.Pointer)
+
+func transposeUint8Uint8avx2(src []uint8, dest []uint8, transposeMap []int32) {
+ _transpose_uint8_uint8_avx2(unsafe.Pointer(&src[0]), unsafe.Pointer(&dest[0]), len(dest), unsafe.Pointer(&transposeMap[0]))
+}
+
+//go:noescape
+func _transpose_uint8_int16_avx2(src, dest unsafe.Pointer, length int, transposeMap unsafe.Pointer)
+
+func transposeUint8Int16avx2(src []uint8, dest []int16, transposeMap []int32) {
+ _transpose_uint8_int16_avx2(unsafe.Pointer(&src[0]), unsafe.Pointer(&dest[0]), len(dest), unsafe.Pointer(&transposeMap[0]))
+}
+
+//go:noescape
+func _transpose_uint8_uint16_avx2(src, dest unsafe.Pointer, length int, transposeMap unsafe.Pointer)
+
+func transposeUint8Uint16avx2(src []uint8, dest []uint16, transposeMap []int32) {
+ _transpose_uint8_uint16_avx2(unsafe.Pointer(&src[0]), unsafe.Pointer(&dest[0]), len(dest), unsafe.Pointer(&transposeMap[0]))
+}
+
+//go:noescape
+func _transpose_uint8_int32_avx2(src, dest unsafe.Pointer, length int, transposeMap unsafe.Pointer)
+
+func transposeUint8Int32avx2(src []uint8, dest []int32, transposeMap []int32) {
+ _transpose_uint8_int32_avx2(unsafe.Pointer(&src[0]), unsafe.Pointer(&dest[0]), len(dest), unsafe.Pointer(&transposeMap[0]))
+}
+
+//go:noescape
+func _transpose_uint8_uint32_avx2(src, dest unsafe.Pointer, length int, transposeMap unsafe.Pointer)
+
+func transposeUint8Uint32avx2(src []uint8, dest []uint32, transposeMap []int32) {
+ _transpose_uint8_uint32_avx2(unsafe.Pointer(&src[0]), unsafe.Pointer(&dest[0]), len(dest), unsafe.Pointer(&transposeMap[0]))
+}
+
+//go:noescape
+func _transpose_uint8_int64_avx2(src, dest unsafe.Pointer, length int, transposeMap unsafe.Pointer)
+
+func transposeUint8Int64avx2(src []uint8, dest []int64, transposeMap []int32) {
+ _transpose_uint8_int64_avx2(unsafe.Pointer(&src[0]), unsafe.Pointer(&dest[0]), len(dest), unsafe.Pointer(&transposeMap[0]))
+}
+
+//go:noescape
+func _transpose_uint8_uint64_avx2(src, dest unsafe.Pointer, length int, transposeMap unsafe.Pointer)
+
+func transposeUint8Uint64avx2(src []uint8, dest []uint64, transposeMap []int32) {
+ _transpose_uint8_uint64_avx2(unsafe.Pointer(&src[0]), unsafe.Pointer(&dest[0]), len(dest), unsafe.Pointer(&transposeMap[0]))
+}
+
+//go:noescape
+func _transpose_int16_int8_avx2(src, dest unsafe.Pointer, length int, transposeMap unsafe.Pointer)
+
+func transposeInt16Int8avx2(src []int16, dest []int8, transposeMap []int32) {
+ _transpose_int16_int8_avx2(unsafe.Pointer(&src[0]), unsafe.Pointer(&dest[0]), len(dest), unsafe.Pointer(&transposeMap[0]))
+}
+
+//go:noescape
+func _transpose_int16_uint8_avx2(src, dest unsafe.Pointer, length int, transposeMap unsafe.Pointer)
+
+func transposeInt16Uint8avx2(src []int16, dest []uint8, transposeMap []int32) {
+ _transpose_int16_uint8_avx2(unsafe.Pointer(&src[0]), unsafe.Pointer(&dest[0]), len(dest), unsafe.Pointer(&transposeMap[0]))
+}
+
+//go:noescape
+func _transpose_int16_int16_avx2(src, dest unsafe.Pointer, length int, transposeMap unsafe.Pointer)
+
+func transposeInt16Int16avx2(src []int16, dest []int16, transposeMap []int32) {
+ _transpose_int16_int16_avx2(unsafe.Pointer(&src[0]), unsafe.Pointer(&dest[0]), len(dest), unsafe.Pointer(&transposeMap[0]))
+}
+
+//go:noescape
+func _transpose_int16_uint16_avx2(src, dest unsafe.Pointer, length int, transposeMap unsafe.Pointer)
+
+func transposeInt16Uint16avx2(src []int16, dest []uint16, transposeMap []int32) {
+ _transpose_int16_uint16_avx2(unsafe.Pointer(&src[0]), unsafe.Pointer(&dest[0]), len(dest), unsafe.Pointer(&transposeMap[0]))
+}
+
+//go:noescape
+func _transpose_int16_int32_avx2(src, dest unsafe.Pointer, length int, transposeMap unsafe.Pointer)
+
+func transposeInt16Int32avx2(src []int16, dest []int32, transposeMap []int32) {
+ _transpose_int16_int32_avx2(unsafe.Pointer(&src[0]), unsafe.Pointer(&dest[0]), len(dest), unsafe.Pointer(&transposeMap[0]))
+}
+
+//go:noescape
+func _transpose_int16_uint32_avx2(src, dest unsafe.Pointer, length int, transposeMap unsafe.Pointer)
+
+func transposeInt16Uint32avx2(src []int16, dest []uint32, transposeMap []int32) {
+ _transpose_int16_uint32_avx2(unsafe.Pointer(&src[0]), unsafe.Pointer(&dest[0]), len(dest), unsafe.Pointer(&transposeMap[0]))
+}
+
+//go:noescape
+func _transpose_int16_int64_avx2(src, dest unsafe.Pointer, length int, transposeMap unsafe.Pointer)
+
+func transposeInt16Int64avx2(src []int16, dest []int64, transposeMap []int32) {
+ _transpose_int16_int64_avx2(unsafe.Pointer(&src[0]), unsafe.Pointer(&dest[0]), len(dest), unsafe.Pointer(&transposeMap[0]))
+}
+
+//go:noescape
+func _transpose_int16_uint64_avx2(src, dest unsafe.Pointer, length int, transposeMap unsafe.Pointer)
+
+func transposeInt16Uint64avx2(src []int16, dest []uint64, transposeMap []int32) {
+ _transpose_int16_uint64_avx2(unsafe.Pointer(&src[0]), unsafe.Pointer(&dest[0]), len(dest), unsafe.Pointer(&transposeMap[0]))
+}
+
+//go:noescape
+func _transpose_uint16_int8_avx2(src, dest unsafe.Pointer, length int, transposeMap unsafe.Pointer)
+
+func transposeUint16Int8avx2(src []uint16, dest []int8, transposeMap []int32) {
+ _transpose_uint16_int8_avx2(unsafe.Pointer(&src[0]), unsafe.Pointer(&dest[0]), len(dest), unsafe.Pointer(&transposeMap[0]))
+}
+
+//go:noescape
+func _transpose_uint16_uint8_avx2(src, dest unsafe.Pointer, length int, transposeMap unsafe.Pointer)
+
+func transposeUint16Uint8avx2(src []uint16, dest []uint8, transposeMap []int32) {
+ _transpose_uint16_uint8_avx2(unsafe.Pointer(&src[0]), unsafe.Pointer(&dest[0]), len(dest), unsafe.Pointer(&transposeMap[0]))
+}
+
+//go:noescape
+func _transpose_uint16_int16_avx2(src, dest unsafe.Pointer, length int, transposeMap unsafe.Pointer)
+
+func transposeUint16Int16avx2(src []uint16, dest []int16, transposeMap []int32) {
+ _transpose_uint16_int16_avx2(unsafe.Pointer(&src[0]), unsafe.Pointer(&dest[0]), len(dest), unsafe.Pointer(&transposeMap[0]))
+}
+
+//go:noescape
+func _transpose_uint16_uint16_avx2(src, dest unsafe.Pointer, length int, transposeMap unsafe.Pointer)
+
+func transposeUint16Uint16avx2(src []uint16, dest []uint16, transposeMap []int32) {
+ _transpose_uint16_uint16_avx2(unsafe.Pointer(&src[0]), unsafe.Pointer(&dest[0]), len(dest), unsafe.Pointer(&transposeMap[0]))
+}
+
+//go:noescape
+func _transpose_uint16_int32_avx2(src, dest unsafe.Pointer, length int, transposeMap unsafe.Pointer)
+
+func transposeUint16Int32avx2(src []uint16, dest []int32, transposeMap []int32) {
+ _transpose_uint16_int32_avx2(unsafe.Pointer(&src[0]), unsafe.Pointer(&dest[0]), len(dest), unsafe.Pointer(&transposeMap[0]))
+}
+
+//go:noescape
+func _transpose_uint16_uint32_avx2(src, dest unsafe.Pointer, length int, transposeMap unsafe.Pointer)
+
+func transposeUint16Uint32avx2(src []uint16, dest []uint32, transposeMap []int32) {
+ _transpose_uint16_uint32_avx2(unsafe.Pointer(&src[0]), unsafe.Pointer(&dest[0]), len(dest), unsafe.Pointer(&transposeMap[0]))
+}
+
+//go:noescape
+func _transpose_uint16_int64_avx2(src, dest unsafe.Pointer, length int, transposeMap unsafe.Pointer)
+
+func transposeUint16Int64avx2(src []uint16, dest []int64, transposeMap []int32) {
+ _transpose_uint16_int64_avx2(unsafe.Pointer(&src[0]), unsafe.Pointer(&dest[0]), len(dest), unsafe.Pointer(&transposeMap[0]))
+}
+
+//go:noescape
+func _transpose_uint16_uint64_avx2(src, dest unsafe.Pointer, length int, transposeMap unsafe.Pointer)
+
+func transposeUint16Uint64avx2(src []uint16, dest []uint64, transposeMap []int32) {
+ _transpose_uint16_uint64_avx2(unsafe.Pointer(&src[0]), unsafe.Pointer(&dest[0]), len(dest), unsafe.Pointer(&transposeMap[0]))
+}
+
+//go:noescape
+func _transpose_int32_int8_avx2(src, dest unsafe.Pointer, length int, transposeMap unsafe.Pointer)
+
+func transposeInt32Int8avx2(src []int32, dest []int8, transposeMap []int32) {
+ _transpose_int32_int8_avx2(unsafe.Pointer(&src[0]), unsafe.Pointer(&dest[0]), len(dest), unsafe.Pointer(&transposeMap[0]))
+}
+
+//go:noescape
+func _transpose_int32_uint8_avx2(src, dest unsafe.Pointer, length int, transposeMap unsafe.Pointer)
+
+func transposeInt32Uint8avx2(src []int32, dest []uint8, transposeMap []int32) {
+ _transpose_int32_uint8_avx2(unsafe.Pointer(&src[0]), unsafe.Pointer(&dest[0]), len(dest), unsafe.Pointer(&transposeMap[0]))
+}
+
+//go:noescape
+func _transpose_int32_int16_avx2(src, dest unsafe.Pointer, length int, transposeMap unsafe.Pointer)
+
+func transposeInt32Int16avx2(src []int32, dest []int16, transposeMap []int32) {
+ _transpose_int32_int16_avx2(unsafe.Pointer(&src[0]), unsafe.Pointer(&dest[0]), len(dest), unsafe.Pointer(&transposeMap[0]))
+}
+
+//go:noescape
+func _transpose_int32_uint16_avx2(src, dest unsafe.Pointer, length int, transposeMap unsafe.Pointer)
+
+func transposeInt32Uint16avx2(src []int32, dest []uint16, transposeMap []int32) {
+ _transpose_int32_uint16_avx2(unsafe.Pointer(&src[0]), unsafe.Pointer(&dest[0]), len(dest), unsafe.Pointer(&transposeMap[0]))
+}
+
+//go:noescape
+func _transpose_int32_int32_avx2(src, dest unsafe.Pointer, length int, transposeMap unsafe.Pointer)
+
+func transposeInt32Int32avx2(src []int32, dest []int32, transposeMap []int32) {
+ _transpose_int32_int32_avx2(unsafe.Pointer(&src[0]), unsafe.Pointer(&dest[0]), len(dest), unsafe.Pointer(&transposeMap[0]))
+}
+
+//go:noescape
+func _transpose_int32_uint32_avx2(src, dest unsafe.Pointer, length int, transposeMap unsafe.Pointer)
+
+func transposeInt32Uint32avx2(src []int32, dest []uint32, transposeMap []int32) {
+ _transpose_int32_uint32_avx2(unsafe.Pointer(&src[0]), unsafe.Pointer(&dest[0]), len(dest), unsafe.Pointer(&transposeMap[0]))
+}
+
+//go:noescape
+func _transpose_int32_int64_avx2(src, dest unsafe.Pointer, length int, transposeMap unsafe.Pointer)
+
+func transposeInt32Int64avx2(src []int32, dest []int64, transposeMap []int32) {
+ _transpose_int32_int64_avx2(unsafe.Pointer(&src[0]), unsafe.Pointer(&dest[0]), len(dest), unsafe.Pointer(&transposeMap[0]))
+}
+
+//go:noescape
+func _transpose_int32_uint64_avx2(src, dest unsafe.Pointer, length int, transposeMap unsafe.Pointer)
+
+func transposeInt32Uint64avx2(src []int32, dest []uint64, transposeMap []int32) {
+ _transpose_int32_uint64_avx2(unsafe.Pointer(&src[0]), unsafe.Pointer(&dest[0]), len(dest), unsafe.Pointer(&transposeMap[0]))
+}
+
+//go:noescape
+func _transpose_uint32_int8_avx2(src, dest unsafe.Pointer, length int, transposeMap unsafe.Pointer)
+
+func transposeUint32Int8avx2(src []uint32, dest []int8, transposeMap []int32) {
+ _transpose_uint32_int8_avx2(unsafe.Pointer(&src[0]), unsafe.Pointer(&dest[0]), len(dest), unsafe.Pointer(&transposeMap[0]))
+}
+
+//go:noescape
+func _transpose_uint32_uint8_avx2(src, dest unsafe.Pointer, length int, transposeMap unsafe.Pointer)
+
+func transposeUint32Uint8avx2(src []uint32, dest []uint8, transposeMap []int32) {
+ _transpose_uint32_uint8_avx2(unsafe.Pointer(&src[0]), unsafe.Pointer(&dest[0]), len(dest), unsafe.Pointer(&transposeMap[0]))
+}
+
+//go:noescape
+func _transpose_uint32_int16_avx2(src, dest unsafe.Pointer, length int, transposeMap unsafe.Pointer)
+
+func transposeUint32Int16avx2(src []uint32, dest []int16, transposeMap []int32) {
+ _transpose_uint32_int16_avx2(unsafe.Pointer(&src[0]), unsafe.Pointer(&dest[0]), len(dest), unsafe.Pointer(&transposeMap[0]))
+}
+
+//go:noescape
+func _transpose_uint32_uint16_avx2(src, dest unsafe.Pointer, length int, transposeMap unsafe.Pointer)
+
+func transposeUint32Uint16avx2(src []uint32, dest []uint16, transposeMap []int32) {
+ _transpose_uint32_uint16_avx2(unsafe.Pointer(&src[0]), unsafe.Pointer(&dest[0]), len(dest), unsafe.Pointer(&transposeMap[0]))
+}
+
+//go:noescape
+func _transpose_uint32_int32_avx2(src, dest unsafe.Pointer, length int, transposeMap unsafe.Pointer)
+
+func transposeUint32Int32avx2(src []uint32, dest []int32, transposeMap []int32) {
+ _transpose_uint32_int32_avx2(unsafe.Pointer(&src[0]), unsafe.Pointer(&dest[0]), len(dest), unsafe.Pointer(&transposeMap[0]))
+}
+
+//go:noescape
+func _transpose_uint32_uint32_avx2(src, dest unsafe.Pointer, length int, transposeMap unsafe.Pointer)
+
+func transposeUint32Uint32avx2(src []uint32, dest []uint32, transposeMap []int32) {
+ _transpose_uint32_uint32_avx2(unsafe.Pointer(&src[0]), unsafe.Pointer(&dest[0]), len(dest), unsafe.Pointer(&transposeMap[0]))
+}
+
+//go:noescape
+func _transpose_uint32_int64_avx2(src, dest unsafe.Pointer, length int, transposeMap unsafe.Pointer)
+
+func transposeUint32Int64avx2(src []uint32, dest []int64, transposeMap []int32) {
+ _transpose_uint32_int64_avx2(unsafe.Pointer(&src[0]), unsafe.Pointer(&dest[0]), len(dest), unsafe.Pointer(&transposeMap[0]))
+}
+
+//go:noescape
+func _transpose_uint32_uint64_avx2(src, dest unsafe.Pointer, length int, transposeMap unsafe.Pointer)
+
+func transposeUint32Uint64avx2(src []uint32, dest []uint64, transposeMap []int32) {
+ _transpose_uint32_uint64_avx2(unsafe.Pointer(&src[0]), unsafe.Pointer(&dest[0]), len(dest), unsafe.Pointer(&transposeMap[0]))
+}
+
+//go:noescape
+func _transpose_int64_int8_avx2(src, dest unsafe.Pointer, length int, transposeMap unsafe.Pointer)
+
+func transposeInt64Int8avx2(src []int64, dest []int8, transposeMap []int32) {
+ _transpose_int64_int8_avx2(unsafe.Pointer(&src[0]), unsafe.Pointer(&dest[0]), len(dest), unsafe.Pointer(&transposeMap[0]))
+}
+
+//go:noescape
+func _transpose_int64_uint8_avx2(src, dest unsafe.Pointer, length int, transposeMap unsafe.Pointer)
+
+func transposeInt64Uint8avx2(src []int64, dest []uint8, transposeMap []int32) {
+ _transpose_int64_uint8_avx2(unsafe.Pointer(&src[0]), unsafe.Pointer(&dest[0]), len(dest), unsafe.Pointer(&transposeMap[0]))
+}
+
+//go:noescape
+func _transpose_int64_int16_avx2(src, dest unsafe.Pointer, length int, transposeMap unsafe.Pointer)
+
+func transposeInt64Int16avx2(src []int64, dest []int16, transposeMap []int32) {
+ _transpose_int64_int16_avx2(unsafe.Pointer(&src[0]), unsafe.Pointer(&dest[0]), len(dest), unsafe.Pointer(&transposeMap[0]))
+}
+
+//go:noescape
+func _transpose_int64_uint16_avx2(src, dest unsafe.Pointer, length int, transposeMap unsafe.Pointer)
+
+func transposeInt64Uint16avx2(src []int64, dest []uint16, transposeMap []int32) {
+ _transpose_int64_uint16_avx2(unsafe.Pointer(&src[0]), unsafe.Pointer(&dest[0]), len(dest), unsafe.Pointer(&transposeMap[0]))
+}
+
+//go:noescape
+func _transpose_int64_int32_avx2(src, dest unsafe.Pointer, length int, transposeMap unsafe.Pointer)
+
+func transposeInt64Int32avx2(src []int64, dest []int32, transposeMap []int32) {
+ _transpose_int64_int32_avx2(unsafe.Pointer(&src[0]), unsafe.Pointer(&dest[0]), len(dest), unsafe.Pointer(&transposeMap[0]))
+}
+
+//go:noescape
+func _transpose_int64_uint32_avx2(src, dest unsafe.Pointer, length int, transposeMap unsafe.Pointer)
+
+func transposeInt64Uint32avx2(src []int64, dest []uint32, transposeMap []int32) {
+ _transpose_int64_uint32_avx2(unsafe.Pointer(&src[0]), unsafe.Pointer(&dest[0]), len(dest), unsafe.Pointer(&transposeMap[0]))
+}
+
+//go:noescape
+func _transpose_int64_int64_avx2(src, dest unsafe.Pointer, length int, transposeMap unsafe.Pointer)
+
+func transposeInt64Int64avx2(src []int64, dest []int64, transposeMap []int32) {
+ _transpose_int64_int64_avx2(unsafe.Pointer(&src[0]), unsafe.Pointer(&dest[0]), len(dest), unsafe.Pointer(&transposeMap[0]))
+}
+
+//go:noescape
+func _transpose_int64_uint64_avx2(src, dest unsafe.Pointer, length int, transposeMap unsafe.Pointer)
+
+func transposeInt64Uint64avx2(src []int64, dest []uint64, transposeMap []int32) {
+ _transpose_int64_uint64_avx2(unsafe.Pointer(&src[0]), unsafe.Pointer(&dest[0]), len(dest), unsafe.Pointer(&transposeMap[0]))
+}
+
+//go:noescape
+func _transpose_uint64_int8_avx2(src, dest unsafe.Pointer, length int, transposeMap unsafe.Pointer)
+
+func transposeUint64Int8avx2(src []uint64, dest []int8, transposeMap []int32) {
+ _transpose_uint64_int8_avx2(unsafe.Pointer(&src[0]), unsafe.Pointer(&dest[0]), len(dest), unsafe.Pointer(&transposeMap[0]))
+}
+
+//go:noescape
+func _transpose_uint64_uint8_avx2(src, dest unsafe.Pointer, length int, transposeMap unsafe.Pointer)
+
+func transposeUint64Uint8avx2(src []uint64, dest []uint8, transposeMap []int32) {
+ _transpose_uint64_uint8_avx2(unsafe.Pointer(&src[0]), unsafe.Pointer(&dest[0]), len(dest), unsafe.Pointer(&transposeMap[0]))
+}
+
+//go:noescape
+func _transpose_uint64_int16_avx2(src, dest unsafe.Pointer, length int, transposeMap unsafe.Pointer)
+
+func transposeUint64Int16avx2(src []uint64, dest []int16, transposeMap []int32) {
+ _transpose_uint64_int16_avx2(unsafe.Pointer(&src[0]), unsafe.Pointer(&dest[0]), len(dest), unsafe.Pointer(&transposeMap[0]))
+}
+
+//go:noescape
+func _transpose_uint64_uint16_avx2(src, dest unsafe.Pointer, length int, transposeMap unsafe.Pointer)
+
+func transposeUint64Uint16avx2(src []uint64, dest []uint16, transposeMap []int32) {
+ _transpose_uint64_uint16_avx2(unsafe.Pointer(&src[0]), unsafe.Pointer(&dest[0]), len(dest), unsafe.Pointer(&transposeMap[0]))
+}
+
+//go:noescape
+func _transpose_uint64_int32_avx2(src, dest unsafe.Pointer, length int, transposeMap unsafe.Pointer)
+
+func transposeUint64Int32avx2(src []uint64, dest []int32, transposeMap []int32) {
+ _transpose_uint64_int32_avx2(unsafe.Pointer(&src[0]), unsafe.Pointer(&dest[0]), len(dest), unsafe.Pointer(&transposeMap[0]))
+}
+
+//go:noescape
+func _transpose_uint64_uint32_avx2(src, dest unsafe.Pointer, length int, transposeMap unsafe.Pointer)
+
+func transposeUint64Uint32avx2(src []uint64, dest []uint32, transposeMap []int32) {
+ _transpose_uint64_uint32_avx2(unsafe.Pointer(&src[0]), unsafe.Pointer(&dest[0]), len(dest), unsafe.Pointer(&transposeMap[0]))
+}
+
+//go:noescape
+func _transpose_uint64_int64_avx2(src, dest unsafe.Pointer, length int, transposeMap unsafe.Pointer)
+
+func transposeUint64Int64avx2(src []uint64, dest []int64, transposeMap []int32) {
+ _transpose_uint64_int64_avx2(unsafe.Pointer(&src[0]), unsafe.Pointer(&dest[0]), len(dest), unsafe.Pointer(&transposeMap[0]))
+}
+
+//go:noescape
+func _transpose_uint64_uint64_avx2(src, dest unsafe.Pointer, length int, transposeMap unsafe.Pointer)
+
+func transposeUint64Uint64avx2(src []uint64, dest []uint64, transposeMap []int32) {
+ _transpose_uint64_uint64_avx2(unsafe.Pointer(&src[0]), unsafe.Pointer(&dest[0]), len(dest), unsafe.Pointer(&transposeMap[0]))
+}
diff --git a/vendor/github.com/apache/arrow/go/v14/internal/utils/transpose_ints_avx2_amd64.s b/vendor/github.com/apache/arrow/go/v14/internal/utils/transpose_ints_avx2_amd64.s
new file mode 100644
index 000000000..fbcc101eb
--- /dev/null
+++ b/vendor/github.com/apache/arrow/go/v14/internal/utils/transpose_ints_avx2_amd64.s
@@ -0,0 +1,3074 @@
+//+build !noasm !appengine
+// AUTO-GENERATED BY C2GOASM -- DO NOT EDIT
+
+TEXT ·_transpose_uint8_uint8_avx2(SB), $0-32
+
+ MOVQ src+0(FP), DI
+ MOVQ dest+8(FP), SI
+ MOVQ length+16(FP), DX
+ MOVQ transposeMap+24(FP), CX
+
+ WORD $0xfa83; BYTE $0x04 // cmp edx, 4
+ JL LBB0_1
+
+LBB0_5:
+ WORD $0xd089 // mov eax, edx
+ WORD $0xb60f; BYTE $0x17 // movzx edx, byte [rdi]
+ LONG $0x9114b60f // movzx edx, byte [rcx + 4*rdx]
+ WORD $0x1688 // mov byte [rsi], dl
+ LONG $0x0157b60f // movzx edx, byte [rdi + 1]
+ LONG $0x9114b60f // movzx edx, byte [rcx + 4*rdx]
+ WORD $0x5688; BYTE $0x01 // mov byte [rsi + 1], dl
+ LONG $0x0257b60f // movzx edx, byte [rdi + 2]
+ LONG $0x9114b60f // movzx edx, byte [rcx + 4*rdx]
+ WORD $0x5688; BYTE $0x02 // mov byte [rsi + 2], dl
+ LONG $0x0357b60f // movzx edx, byte [rdi + 3]
+ LONG $0x9114b60f // movzx edx, byte [rcx + 4*rdx]
+ WORD $0x5688; BYTE $0x03 // mov byte [rsi + 3], dl
+ WORD $0x508d; BYTE $0xfc // lea edx, [rax - 4]
+ LONG $0x04c78348 // add rdi, 4
+ LONG $0x04c68348 // add rsi, 4
+ WORD $0xf883; BYTE $0x07 // cmp eax, 7
+ JG LBB0_5
+
+LBB0_1:
+ WORD $0xd285 // test edx, edx
+ JLE LBB0_4
+ WORD $0xc283; BYTE $0x01 // add edx, 1
+ WORD $0x3145; BYTE $0xc0 // xor r8d, r8d
+
+LBB0_3:
+ LONG $0x04b60f42; BYTE $0x07 // movzx eax, byte [rdi + r8]
+ LONG $0x8104b60f // movzx eax, byte [rcx + 4*rax]
+ LONG $0x06048842 // mov byte [rsi + r8], al
+ LONG $0x01c08349 // add r8, 1
+ WORD $0xc283; BYTE $0xff // add edx, -1
+ WORD $0xfa83; BYTE $0x01 // cmp edx, 1
+ JG LBB0_3
+
+LBB0_4:
+ RET
+
+TEXT ·_transpose_int8_uint8_avx2(SB), $0-32
+
+ MOVQ src+0(FP), DI
+ MOVQ dest+8(FP), SI
+ MOVQ length+16(FP), DX
+ MOVQ transposeMap+24(FP), CX
+
+ WORD $0xfa83; BYTE $0x04 // cmp edx, 4
+ JL LBB1_1
+
+LBB1_5:
+ WORD $0xd089 // mov eax, edx
+ LONG $0x17be0f48 // movsx rdx, byte [rdi]
+ LONG $0x9114b60f // movzx edx, byte [rcx + 4*rdx]
+ WORD $0x1688 // mov byte [rsi], dl
+ LONG $0x57be0f48; BYTE $0x01 // movsx rdx, byte [rdi + 1]
+ LONG $0x9114b60f // movzx edx, byte [rcx + 4*rdx]
+ WORD $0x5688; BYTE $0x01 // mov byte [rsi + 1], dl
+ LONG $0x57be0f48; BYTE $0x02 // movsx rdx, byte [rdi + 2]
+ LONG $0x9114b60f // movzx edx, byte [rcx + 4*rdx]
+ WORD $0x5688; BYTE $0x02 // mov byte [rsi + 2], dl
+ LONG $0x57be0f48; BYTE $0x03 // movsx rdx, byte [rdi + 3]
+ LONG $0x9114b60f // movzx edx, byte [rcx + 4*rdx]
+ WORD $0x5688; BYTE $0x03 // mov byte [rsi + 3], dl
+ WORD $0x508d; BYTE $0xfc // lea edx, [rax - 4]
+ LONG $0x04c78348 // add rdi, 4
+ LONG $0x04c68348 // add rsi, 4
+ WORD $0xf883; BYTE $0x07 // cmp eax, 7
+ JG LBB1_5
+
+LBB1_1:
+ WORD $0xd285 // test edx, edx
+ JLE LBB1_4
+ WORD $0xc283; BYTE $0x01 // add edx, 1
+ WORD $0x3145; BYTE $0xc0 // xor r8d, r8d
+
+LBB1_3:
+ LONG $0x04be0f4a; BYTE $0x07 // movsx rax, byte [rdi + r8]
+ LONG $0x8104b60f // movzx eax, byte [rcx + 4*rax]
+ LONG $0x06048842 // mov byte [rsi + r8], al
+ LONG $0x01c08349 // add r8, 1
+ WORD $0xc283; BYTE $0xff // add edx, -1
+ WORD $0xfa83; BYTE $0x01 // cmp edx, 1
+ JG LBB1_3
+
+LBB1_4:
+ RET
+
+TEXT ·_transpose_uint16_uint8_avx2(SB), $0-32
+
+ MOVQ src+0(FP), DI
+ MOVQ dest+8(FP), SI
+ MOVQ length+16(FP), DX
+ MOVQ transposeMap+24(FP), CX
+
+ WORD $0xfa83; BYTE $0x04 // cmp edx, 4
+ JL LBB2_1
+
+LBB2_5:
+ WORD $0xd089 // mov eax, edx
+ WORD $0xb70f; BYTE $0x17 // movzx edx, word [rdi]
+ LONG $0x9114b60f // movzx edx, byte [rcx + 4*rdx]
+ WORD $0x1688 // mov byte [rsi], dl
+ LONG $0x0257b70f // movzx edx, word [rdi + 2]
+ LONG $0x9114b60f // movzx edx, byte [rcx + 4*rdx]
+ WORD $0x5688; BYTE $0x01 // mov byte [rsi + 1], dl
+ LONG $0x0457b70f // movzx edx, word [rdi + 4]
+ LONG $0x9114b60f // movzx edx, byte [rcx + 4*rdx]
+ WORD $0x5688; BYTE $0x02 // mov byte [rsi + 2], dl
+ LONG $0x0657b70f // movzx edx, word [rdi + 6]
+ LONG $0x9114b60f // movzx edx, byte [rcx + 4*rdx]
+ WORD $0x5688; BYTE $0x03 // mov byte [rsi + 3], dl
+ WORD $0x508d; BYTE $0xfc // lea edx, [rax - 4]
+ LONG $0x08c78348 // add rdi, 8
+ LONG $0x04c68348 // add rsi, 4
+ WORD $0xf883; BYTE $0x07 // cmp eax, 7
+ JG LBB2_5
+
+LBB2_1:
+ WORD $0xd285 // test edx, edx
+ JLE LBB2_4
+ WORD $0xc283; BYTE $0x01 // add edx, 1
+ WORD $0x3145; BYTE $0xc0 // xor r8d, r8d
+
+LBB2_3:
+ LONG $0x04b70f42; BYTE $0x47 // movzx eax, word [rdi + 2*r8]
+ LONG $0x8104b60f // movzx eax, byte [rcx + 4*rax]
+ LONG $0x06048842 // mov byte [rsi + r8], al
+ LONG $0x01c08349 // add r8, 1
+ WORD $0xc283; BYTE $0xff // add edx, -1
+ WORD $0xfa83; BYTE $0x01 // cmp edx, 1
+ JG LBB2_3
+
+LBB2_4:
+ RET
+
+TEXT ·_transpose_int16_uint8_avx2(SB), $0-32
+
+ MOVQ src+0(FP), DI
+ MOVQ dest+8(FP), SI
+ MOVQ length+16(FP), DX
+ MOVQ transposeMap+24(FP), CX
+
+ WORD $0xfa83; BYTE $0x04 // cmp edx, 4
+ JL LBB3_1
+
+LBB3_5:
+ WORD $0xd089 // mov eax, edx
+ LONG $0x17bf0f48 // movsx rdx, word [rdi]
+ LONG $0x9114b60f // movzx edx, byte [rcx + 4*rdx]
+ WORD $0x1688 // mov byte [rsi], dl
+ LONG $0x57bf0f48; BYTE $0x02 // movsx rdx, word [rdi + 2]
+ LONG $0x9114b60f // movzx edx, byte [rcx + 4*rdx]
+ WORD $0x5688; BYTE $0x01 // mov byte [rsi + 1], dl
+ LONG $0x57bf0f48; BYTE $0x04 // movsx rdx, word [rdi + 4]
+ LONG $0x9114b60f // movzx edx, byte [rcx + 4*rdx]
+ WORD $0x5688; BYTE $0x02 // mov byte [rsi + 2], dl
+ LONG $0x57bf0f48; BYTE $0x06 // movsx rdx, word [rdi + 6]
+ LONG $0x9114b60f // movzx edx, byte [rcx + 4*rdx]
+ WORD $0x5688; BYTE $0x03 // mov byte [rsi + 3], dl
+ WORD $0x508d; BYTE $0xfc // lea edx, [rax - 4]
+ LONG $0x08c78348 // add rdi, 8
+ LONG $0x04c68348 // add rsi, 4
+ WORD $0xf883; BYTE $0x07 // cmp eax, 7
+ JG LBB3_5
+
+LBB3_1:
+ WORD $0xd285 // test edx, edx
+ JLE LBB3_4
+ WORD $0xc283; BYTE $0x01 // add edx, 1
+ WORD $0x3145; BYTE $0xc0 // xor r8d, r8d
+
+LBB3_3:
+ LONG $0x04bf0f4a; BYTE $0x47 // movsx rax, word [rdi + 2*r8]
+ LONG $0x8104b60f // movzx eax, byte [rcx + 4*rax]
+ LONG $0x06048842 // mov byte [rsi + r8], al
+ LONG $0x01c08349 // add r8, 1
+ WORD $0xc283; BYTE $0xff // add edx, -1
+ WORD $0xfa83; BYTE $0x01 // cmp edx, 1
+ JG LBB3_3
+
+LBB3_4:
+ RET
+
+TEXT ·_transpose_uint32_uint8_avx2(SB), $0-32
+
+ MOVQ src+0(FP), DI
+ MOVQ dest+8(FP), SI
+ MOVQ length+16(FP), DX
+ MOVQ transposeMap+24(FP), CX
+
+ WORD $0xfa83; BYTE $0x04 // cmp edx, 4
+ JL LBB4_1
+
+LBB4_5:
+ WORD $0xd089 // mov eax, edx
+ WORD $0x178b // mov edx, dword [rdi]
+ LONG $0x9114b60f // movzx edx, byte [rcx + 4*rdx]
+ WORD $0x1688 // mov byte [rsi], dl
+ WORD $0x578b; BYTE $0x04 // mov edx, dword [rdi + 4]
+ LONG $0x9114b60f // movzx edx, byte [rcx + 4*rdx]
+ WORD $0x5688; BYTE $0x01 // mov byte [rsi + 1], dl
+ WORD $0x578b; BYTE $0x08 // mov edx, dword [rdi + 8]
+ LONG $0x9114b60f // movzx edx, byte [rcx + 4*rdx]
+ WORD $0x5688; BYTE $0x02 // mov byte [rsi + 2], dl
+ WORD $0x578b; BYTE $0x0c // mov edx, dword [rdi + 12]
+ LONG $0x9114b60f // movzx edx, byte [rcx + 4*rdx]
+ WORD $0x5688; BYTE $0x03 // mov byte [rsi + 3], dl
+ WORD $0x508d; BYTE $0xfc // lea edx, [rax - 4]
+ LONG $0x10c78348 // add rdi, 16
+ LONG $0x04c68348 // add rsi, 4
+ WORD $0xf883; BYTE $0x07 // cmp eax, 7
+ JG LBB4_5
+
+LBB4_1:
+ WORD $0xd285 // test edx, edx
+ JLE LBB4_4
+ WORD $0xc283; BYTE $0x01 // add edx, 1
+ WORD $0x3145; BYTE $0xc0 // xor r8d, r8d
+
+LBB4_3:
+ LONG $0x87048b42 // mov eax, dword [rdi + 4*r8]
+ LONG $0x8104b60f // movzx eax, byte [rcx + 4*rax]
+ LONG $0x06048842 // mov byte [rsi + r8], al
+ LONG $0x01c08349 // add r8, 1
+ WORD $0xc283; BYTE $0xff // add edx, -1
+ WORD $0xfa83; BYTE $0x01 // cmp edx, 1
+ JG LBB4_3
+
+LBB4_4:
+ RET
+
+TEXT ·_transpose_int32_uint8_avx2(SB), $0-32
+
+ MOVQ src+0(FP), DI
+ MOVQ dest+8(FP), SI
+ MOVQ length+16(FP), DX
+ MOVQ transposeMap+24(FP), CX
+
+ WORD $0xfa83; BYTE $0x04 // cmp edx, 4
+ JL LBB5_1
+
+LBB5_5:
+ WORD $0xd089 // mov eax, edx
+ WORD $0x6348; BYTE $0x17 // movsxd rdx, dword [rdi]
+ LONG $0x9114b60f // movzx edx, byte [rcx + 4*rdx]
+ WORD $0x1688 // mov byte [rsi], dl
+ LONG $0x04576348 // movsxd rdx, dword [rdi + 4]
+ LONG $0x9114b60f // movzx edx, byte [rcx + 4*rdx]
+ WORD $0x5688; BYTE $0x01 // mov byte [rsi + 1], dl
+ LONG $0x08576348 // movsxd rdx, dword [rdi + 8]
+ LONG $0x9114b60f // movzx edx, byte [rcx + 4*rdx]
+ WORD $0x5688; BYTE $0x02 // mov byte [rsi + 2], dl
+ LONG $0x0c576348 // movsxd rdx, dword [rdi + 12]
+ LONG $0x9114b60f // movzx edx, byte [rcx + 4*rdx]
+ WORD $0x5688; BYTE $0x03 // mov byte [rsi + 3], dl
+ WORD $0x508d; BYTE $0xfc // lea edx, [rax - 4]
+ LONG $0x10c78348 // add rdi, 16
+ LONG $0x04c68348 // add rsi, 4
+ WORD $0xf883; BYTE $0x07 // cmp eax, 7
+ JG LBB5_5
+
+LBB5_1:
+ WORD $0xd285 // test edx, edx
+ JLE LBB5_4
+ WORD $0xc283; BYTE $0x01 // add edx, 1
+ WORD $0x3145; BYTE $0xc0 // xor r8d, r8d
+
+LBB5_3:
+ LONG $0x8704634a // movsxd rax, dword [rdi + 4*r8]
+ LONG $0x8104b60f // movzx eax, byte [rcx + 4*rax]
+ LONG $0x06048842 // mov byte [rsi + r8], al
+ LONG $0x01c08349 // add r8, 1
+ WORD $0xc283; BYTE $0xff // add edx, -1
+ WORD $0xfa83; BYTE $0x01 // cmp edx, 1
+ JG LBB5_3
+
+LBB5_4:
+ RET
+
+TEXT ·_transpose_uint64_uint8_avx2(SB), $0-32
+
+ MOVQ src+0(FP), DI
+ MOVQ dest+8(FP), SI
+ MOVQ length+16(FP), DX
+ MOVQ transposeMap+24(FP), CX
+
+ WORD $0xfa83; BYTE $0x04 // cmp edx, 4
+ JL LBB6_1
+
+LBB6_5:
+ WORD $0xd089 // mov eax, edx
+ WORD $0x8b48; BYTE $0x17 // mov rdx, qword [rdi]
+ LONG $0x9114b60f // movzx edx, byte [rcx + 4*rdx]
+ WORD $0x1688 // mov byte [rsi], dl
+ LONG $0x08578b48 // mov rdx, qword [rdi + 8]
+ LONG $0x9114b60f // movzx edx, byte [rcx + 4*rdx]
+ WORD $0x5688; BYTE $0x01 // mov byte [rsi + 1], dl
+ LONG $0x10578b48 // mov rdx, qword [rdi + 16]
+ LONG $0x9114b60f // movzx edx, byte [rcx + 4*rdx]
+ WORD $0x5688; BYTE $0x02 // mov byte [rsi + 2], dl
+ LONG $0x18578b48 // mov rdx, qword [rdi + 24]
+ LONG $0x9114b60f // movzx edx, byte [rcx + 4*rdx]
+ WORD $0x5688; BYTE $0x03 // mov byte [rsi + 3], dl
+ WORD $0x508d; BYTE $0xfc // lea edx, [rax - 4]
+ LONG $0x20c78348 // add rdi, 32
+ LONG $0x04c68348 // add rsi, 4
+ WORD $0xf883; BYTE $0x07 // cmp eax, 7
+ JG LBB6_5
+
+LBB6_1:
+ WORD $0xd285 // test edx, edx
+ JLE LBB6_4
+ WORD $0xc283; BYTE $0x01 // add edx, 1
+ WORD $0x3145; BYTE $0xc0 // xor r8d, r8d
+
+LBB6_3:
+ LONG $0xc7048b4a // mov rax, qword [rdi + 8*r8]
+ LONG $0x8104b60f // movzx eax, byte [rcx + 4*rax]
+ LONG $0x06048842 // mov byte [rsi + r8], al
+ LONG $0x01c08349 // add r8, 1
+ WORD $0xc283; BYTE $0xff // add edx, -1
+ WORD $0xfa83; BYTE $0x01 // cmp edx, 1
+ JG LBB6_3
+
+LBB6_4:
+ RET
+
+TEXT ·_transpose_int64_uint8_avx2(SB), $0-32
+
+ MOVQ src+0(FP), DI
+ MOVQ dest+8(FP), SI
+ MOVQ length+16(FP), DX
+ MOVQ transposeMap+24(FP), CX
+
+ WORD $0xfa83; BYTE $0x04 // cmp edx, 4
+ JL LBB7_1
+
+LBB7_5:
+ WORD $0xd089 // mov eax, edx
+ WORD $0x8b48; BYTE $0x17 // mov rdx, qword [rdi]
+ LONG $0x9114b60f // movzx edx, byte [rcx + 4*rdx]
+ WORD $0x1688 // mov byte [rsi], dl
+ LONG $0x08578b48 // mov rdx, qword [rdi + 8]
+ LONG $0x9114b60f // movzx edx, byte [rcx + 4*rdx]
+ WORD $0x5688; BYTE $0x01 // mov byte [rsi + 1], dl
+ LONG $0x10578b48 // mov rdx, qword [rdi + 16]
+ LONG $0x9114b60f // movzx edx, byte [rcx + 4*rdx]
+ WORD $0x5688; BYTE $0x02 // mov byte [rsi + 2], dl
+ LONG $0x18578b48 // mov rdx, qword [rdi + 24]
+ LONG $0x9114b60f // movzx edx, byte [rcx + 4*rdx]
+ WORD $0x5688; BYTE $0x03 // mov byte [rsi + 3], dl
+ WORD $0x508d; BYTE $0xfc // lea edx, [rax - 4]
+ LONG $0x20c78348 // add rdi, 32
+ LONG $0x04c68348 // add rsi, 4
+ WORD $0xf883; BYTE $0x07 // cmp eax, 7
+ JG LBB7_5
+
+LBB7_1:
+ WORD $0xd285 // test edx, edx
+ JLE LBB7_4
+ WORD $0xc283; BYTE $0x01 // add edx, 1
+ WORD $0x3145; BYTE $0xc0 // xor r8d, r8d
+
+LBB7_3:
+ LONG $0xc7048b4a // mov rax, qword [rdi + 8*r8]
+ LONG $0x8104b60f // movzx eax, byte [rcx + 4*rax]
+ LONG $0x06048842 // mov byte [rsi + r8], al
+ LONG $0x01c08349 // add r8, 1
+ WORD $0xc283; BYTE $0xff // add edx, -1
+ WORD $0xfa83; BYTE $0x01 // cmp edx, 1
+ JG LBB7_3
+
+LBB7_4:
+ RET
+
+TEXT ·_transpose_uint8_int8_avx2(SB), $0-32
+
+ MOVQ src+0(FP), DI
+ MOVQ dest+8(FP), SI
+ MOVQ length+16(FP), DX
+ MOVQ transposeMap+24(FP), CX
+
+ WORD $0xfa83; BYTE $0x04 // cmp edx, 4
+ JL LBB8_1
+
+LBB8_5:
+ WORD $0xd089 // mov eax, edx
+ WORD $0xb60f; BYTE $0x17 // movzx edx, byte [rdi]
+ LONG $0x9114b60f // movzx edx, byte [rcx + 4*rdx]
+ WORD $0x1688 // mov byte [rsi], dl
+ LONG $0x0157b60f // movzx edx, byte [rdi + 1]
+ LONG $0x9114b60f // movzx edx, byte [rcx + 4*rdx]
+ WORD $0x5688; BYTE $0x01 // mov byte [rsi + 1], dl
+ LONG $0x0257b60f // movzx edx, byte [rdi + 2]
+ LONG $0x9114b60f // movzx edx, byte [rcx + 4*rdx]
+ WORD $0x5688; BYTE $0x02 // mov byte [rsi + 2], dl
+ LONG $0x0357b60f // movzx edx, byte [rdi + 3]
+ LONG $0x9114b60f // movzx edx, byte [rcx + 4*rdx]
+ WORD $0x5688; BYTE $0x03 // mov byte [rsi + 3], dl
+ WORD $0x508d; BYTE $0xfc // lea edx, [rax - 4]
+ LONG $0x04c78348 // add rdi, 4
+ LONG $0x04c68348 // add rsi, 4
+ WORD $0xf883; BYTE $0x07 // cmp eax, 7
+ JG LBB8_5
+
+LBB8_1:
+ WORD $0xd285 // test edx, edx
+ JLE LBB8_4
+ WORD $0xc283; BYTE $0x01 // add edx, 1
+ WORD $0x3145; BYTE $0xc0 // xor r8d, r8d
+
+LBB8_3:
+ LONG $0x04b60f42; BYTE $0x07 // movzx eax, byte [rdi + r8]
+ LONG $0x8104b60f // movzx eax, byte [rcx + 4*rax]
+ LONG $0x06048842 // mov byte [rsi + r8], al
+ LONG $0x01c08349 // add r8, 1
+ WORD $0xc283; BYTE $0xff // add edx, -1
+ WORD $0xfa83; BYTE $0x01 // cmp edx, 1
+ JG LBB8_3
+
+LBB8_4:
+ RET
+
+TEXT ·_transpose_int8_int8_avx2(SB), $0-32
+
+ MOVQ src+0(FP), DI
+ MOVQ dest+8(FP), SI
+ MOVQ length+16(FP), DX
+ MOVQ transposeMap+24(FP), CX
+
+ WORD $0xfa83; BYTE $0x04 // cmp edx, 4
+ JL LBB9_1
+
+LBB9_5:
+ WORD $0xd089 // mov eax, edx
+ LONG $0x17be0f48 // movsx rdx, byte [rdi]
+ LONG $0x9114b60f // movzx edx, byte [rcx + 4*rdx]
+ WORD $0x1688 // mov byte [rsi], dl
+ LONG $0x57be0f48; BYTE $0x01 // movsx rdx, byte [rdi + 1]
+ LONG $0x9114b60f // movzx edx, byte [rcx + 4*rdx]
+ WORD $0x5688; BYTE $0x01 // mov byte [rsi + 1], dl
+ LONG $0x57be0f48; BYTE $0x02 // movsx rdx, byte [rdi + 2]
+ LONG $0x9114b60f // movzx edx, byte [rcx + 4*rdx]
+ WORD $0x5688; BYTE $0x02 // mov byte [rsi + 2], dl
+ LONG $0x57be0f48; BYTE $0x03 // movsx rdx, byte [rdi + 3]
+ LONG $0x9114b60f // movzx edx, byte [rcx + 4*rdx]
+ WORD $0x5688; BYTE $0x03 // mov byte [rsi + 3], dl
+ WORD $0x508d; BYTE $0xfc // lea edx, [rax - 4]
+ LONG $0x04c78348 // add rdi, 4
+ LONG $0x04c68348 // add rsi, 4
+ WORD $0xf883; BYTE $0x07 // cmp eax, 7
+ JG LBB9_5
+
+LBB9_1:
+ WORD $0xd285 // test edx, edx
+ JLE LBB9_4
+ WORD $0xc283; BYTE $0x01 // add edx, 1
+ WORD $0x3145; BYTE $0xc0 // xor r8d, r8d
+
+LBB9_3:
+ LONG $0x04be0f4a; BYTE $0x07 // movsx rax, byte [rdi + r8]
+ LONG $0x8104b60f // movzx eax, byte [rcx + 4*rax]
+ LONG $0x06048842 // mov byte [rsi + r8], al
+ LONG $0x01c08349 // add r8, 1
+ WORD $0xc283; BYTE $0xff // add edx, -1
+ WORD $0xfa83; BYTE $0x01 // cmp edx, 1
+ JG LBB9_3
+
+LBB9_4:
+ RET
+
+TEXT ·_transpose_uint16_int8_avx2(SB), $0-32
+
+ MOVQ src+0(FP), DI
+ MOVQ dest+8(FP), SI
+ MOVQ length+16(FP), DX
+ MOVQ transposeMap+24(FP), CX
+
+ WORD $0xfa83; BYTE $0x04 // cmp edx, 4
+ JL LBB10_1
+
+LBB10_5:
+ WORD $0xd089 // mov eax, edx
+ WORD $0xb70f; BYTE $0x17 // movzx edx, word [rdi]
+ LONG $0x9114b60f // movzx edx, byte [rcx + 4*rdx]
+ WORD $0x1688 // mov byte [rsi], dl
+ LONG $0x0257b70f // movzx edx, word [rdi + 2]
+ LONG $0x9114b60f // movzx edx, byte [rcx + 4*rdx]
+ WORD $0x5688; BYTE $0x01 // mov byte [rsi + 1], dl
+ LONG $0x0457b70f // movzx edx, word [rdi + 4]
+ LONG $0x9114b60f // movzx edx, byte [rcx + 4*rdx]
+ WORD $0x5688; BYTE $0x02 // mov byte [rsi + 2], dl
+ LONG $0x0657b70f // movzx edx, word [rdi + 6]
+ LONG $0x9114b60f // movzx edx, byte [rcx + 4*rdx]
+ WORD $0x5688; BYTE $0x03 // mov byte [rsi + 3], dl
+ WORD $0x508d; BYTE $0xfc // lea edx, [rax - 4]
+ LONG $0x08c78348 // add rdi, 8
+ LONG $0x04c68348 // add rsi, 4
+ WORD $0xf883; BYTE $0x07 // cmp eax, 7
+ JG LBB10_5
+
+LBB10_1:
+ WORD $0xd285 // test edx, edx
+ JLE LBB10_4
+ WORD $0xc283; BYTE $0x01 // add edx, 1
+ WORD $0x3145; BYTE $0xc0 // xor r8d, r8d
+
+LBB10_3:
+ LONG $0x04b70f42; BYTE $0x47 // movzx eax, word [rdi + 2*r8]
+ LONG $0x8104b60f // movzx eax, byte [rcx + 4*rax]
+ LONG $0x06048842 // mov byte [rsi + r8], al
+ LONG $0x01c08349 // add r8, 1
+ WORD $0xc283; BYTE $0xff // add edx, -1
+ WORD $0xfa83; BYTE $0x01 // cmp edx, 1
+ JG LBB10_3
+
+LBB10_4:
+ RET
+
+TEXT ·_transpose_int16_int8_avx2(SB), $0-32
+
+ MOVQ src+0(FP), DI
+ MOVQ dest+8(FP), SI
+ MOVQ length+16(FP), DX
+ MOVQ transposeMap+24(FP), CX
+
+ WORD $0xfa83; BYTE $0x04 // cmp edx, 4
+ JL LBB11_1
+
+LBB11_5:
+ WORD $0xd089 // mov eax, edx
+ LONG $0x17bf0f48 // movsx rdx, word [rdi]
+ LONG $0x9114b60f // movzx edx, byte [rcx + 4*rdx]
+ WORD $0x1688 // mov byte [rsi], dl
+ LONG $0x57bf0f48; BYTE $0x02 // movsx rdx, word [rdi + 2]
+ LONG $0x9114b60f // movzx edx, byte [rcx + 4*rdx]
+ WORD $0x5688; BYTE $0x01 // mov byte [rsi + 1], dl
+ LONG $0x57bf0f48; BYTE $0x04 // movsx rdx, word [rdi + 4]
+ LONG $0x9114b60f // movzx edx, byte [rcx + 4*rdx]
+ WORD $0x5688; BYTE $0x02 // mov byte [rsi + 2], dl
+ LONG $0x57bf0f48; BYTE $0x06 // movsx rdx, word [rdi + 6]
+ LONG $0x9114b60f // movzx edx, byte [rcx + 4*rdx]
+ WORD $0x5688; BYTE $0x03 // mov byte [rsi + 3], dl
+ WORD $0x508d; BYTE $0xfc // lea edx, [rax - 4]
+ LONG $0x08c78348 // add rdi, 8
+ LONG $0x04c68348 // add rsi, 4
+ WORD $0xf883; BYTE $0x07 // cmp eax, 7
+ JG LBB11_5
+
+LBB11_1:
+ WORD $0xd285 // test edx, edx
+ JLE LBB11_4
+ WORD $0xc283; BYTE $0x01 // add edx, 1
+ WORD $0x3145; BYTE $0xc0 // xor r8d, r8d
+
+LBB11_3:
+ LONG $0x04bf0f4a; BYTE $0x47 // movsx rax, word [rdi + 2*r8]
+ LONG $0x8104b60f // movzx eax, byte [rcx + 4*rax]
+ LONG $0x06048842 // mov byte [rsi + r8], al
+ LONG $0x01c08349 // add r8, 1
+ WORD $0xc283; BYTE $0xff // add edx, -1
+ WORD $0xfa83; BYTE $0x01 // cmp edx, 1
+ JG LBB11_3
+
+LBB11_4:
+ RET
+
+TEXT ·_transpose_uint32_int8_avx2(SB), $0-32
+
+ MOVQ src+0(FP), DI
+ MOVQ dest+8(FP), SI
+ MOVQ length+16(FP), DX
+ MOVQ transposeMap+24(FP), CX
+
+ WORD $0xfa83; BYTE $0x04 // cmp edx, 4
+ JL LBB12_1
+
+LBB12_5:
+ WORD $0xd089 // mov eax, edx
+ WORD $0x178b // mov edx, dword [rdi]
+ LONG $0x9114b60f // movzx edx, byte [rcx + 4*rdx]
+ WORD $0x1688 // mov byte [rsi], dl
+ WORD $0x578b; BYTE $0x04 // mov edx, dword [rdi + 4]
+ LONG $0x9114b60f // movzx edx, byte [rcx + 4*rdx]
+ WORD $0x5688; BYTE $0x01 // mov byte [rsi + 1], dl
+ WORD $0x578b; BYTE $0x08 // mov edx, dword [rdi + 8]
+ LONG $0x9114b60f // movzx edx, byte [rcx + 4*rdx]
+ WORD $0x5688; BYTE $0x02 // mov byte [rsi + 2], dl
+ WORD $0x578b; BYTE $0x0c // mov edx, dword [rdi + 12]
+ LONG $0x9114b60f // movzx edx, byte [rcx + 4*rdx]
+ WORD $0x5688; BYTE $0x03 // mov byte [rsi + 3], dl
+ WORD $0x508d; BYTE $0xfc // lea edx, [rax - 4]
+ LONG $0x10c78348 // add rdi, 16
+ LONG $0x04c68348 // add rsi, 4
+ WORD $0xf883; BYTE $0x07 // cmp eax, 7
+ JG LBB12_5
+
+LBB12_1:
+ WORD $0xd285 // test edx, edx
+ JLE LBB12_4
+ WORD $0xc283; BYTE $0x01 // add edx, 1
+ WORD $0x3145; BYTE $0xc0 // xor r8d, r8d
+
+LBB12_3:
+ LONG $0x87048b42 // mov eax, dword [rdi + 4*r8]
+ LONG $0x8104b60f // movzx eax, byte [rcx + 4*rax]
+ LONG $0x06048842 // mov byte [rsi + r8], al
+ LONG $0x01c08349 // add r8, 1
+ WORD $0xc283; BYTE $0xff // add edx, -1
+ WORD $0xfa83; BYTE $0x01 // cmp edx, 1
+ JG LBB12_3
+
+LBB12_4:
+ RET
+
+TEXT ·_transpose_int32_int8_avx2(SB), $0-32
+
+ MOVQ src+0(FP), DI
+ MOVQ dest+8(FP), SI
+ MOVQ length+16(FP), DX
+ MOVQ transposeMap+24(FP), CX
+
+ WORD $0xfa83; BYTE $0x04 // cmp edx, 4
+ JL LBB13_1
+
+LBB13_5:
+ WORD $0xd089 // mov eax, edx
+ WORD $0x6348; BYTE $0x17 // movsxd rdx, dword [rdi]
+ LONG $0x9114b60f // movzx edx, byte [rcx + 4*rdx]
+ WORD $0x1688 // mov byte [rsi], dl
+ LONG $0x04576348 // movsxd rdx, dword [rdi + 4]
+ LONG $0x9114b60f // movzx edx, byte [rcx + 4*rdx]
+ WORD $0x5688; BYTE $0x01 // mov byte [rsi + 1], dl
+ LONG $0x08576348 // movsxd rdx, dword [rdi + 8]
+ LONG $0x9114b60f // movzx edx, byte [rcx + 4*rdx]
+ WORD $0x5688; BYTE $0x02 // mov byte [rsi + 2], dl
+ LONG $0x0c576348 // movsxd rdx, dword [rdi + 12]
+ LONG $0x9114b60f // movzx edx, byte [rcx + 4*rdx]
+ WORD $0x5688; BYTE $0x03 // mov byte [rsi + 3], dl
+ WORD $0x508d; BYTE $0xfc // lea edx, [rax - 4]
+ LONG $0x10c78348 // add rdi, 16
+ LONG $0x04c68348 // add rsi, 4
+ WORD $0xf883; BYTE $0x07 // cmp eax, 7
+ JG LBB13_5
+
+LBB13_1:
+ WORD $0xd285 // test edx, edx
+ JLE LBB13_4
+ WORD $0xc283; BYTE $0x01 // add edx, 1
+ WORD $0x3145; BYTE $0xc0 // xor r8d, r8d
+
+LBB13_3:
+ LONG $0x8704634a // movsxd rax, dword [rdi + 4*r8]
+ LONG $0x8104b60f // movzx eax, byte [rcx + 4*rax]
+ LONG $0x06048842 // mov byte [rsi + r8], al
+ LONG $0x01c08349 // add r8, 1
+ WORD $0xc283; BYTE $0xff // add edx, -1
+ WORD $0xfa83; BYTE $0x01 // cmp edx, 1
+ JG LBB13_3
+
+LBB13_4:
+ RET
+
+TEXT ·_transpose_uint64_int8_avx2(SB), $0-32
+
+ MOVQ src+0(FP), DI
+ MOVQ dest+8(FP), SI
+ MOVQ length+16(FP), DX
+ MOVQ transposeMap+24(FP), CX
+
+ WORD $0xfa83; BYTE $0x04 // cmp edx, 4
+ JL LBB14_1
+
+LBB14_5:
+ WORD $0xd089 // mov eax, edx
+ WORD $0x8b48; BYTE $0x17 // mov rdx, qword [rdi]
+ LONG $0x9114b60f // movzx edx, byte [rcx + 4*rdx]
+ WORD $0x1688 // mov byte [rsi], dl
+ LONG $0x08578b48 // mov rdx, qword [rdi + 8]
+ LONG $0x9114b60f // movzx edx, byte [rcx + 4*rdx]
+ WORD $0x5688; BYTE $0x01 // mov byte [rsi + 1], dl
+ LONG $0x10578b48 // mov rdx, qword [rdi + 16]
+ LONG $0x9114b60f // movzx edx, byte [rcx + 4*rdx]
+ WORD $0x5688; BYTE $0x02 // mov byte [rsi + 2], dl
+ LONG $0x18578b48 // mov rdx, qword [rdi + 24]
+ LONG $0x9114b60f // movzx edx, byte [rcx + 4*rdx]
+ WORD $0x5688; BYTE $0x03 // mov byte [rsi + 3], dl
+ WORD $0x508d; BYTE $0xfc // lea edx, [rax - 4]
+ LONG $0x20c78348 // add rdi, 32
+ LONG $0x04c68348 // add rsi, 4
+ WORD $0xf883; BYTE $0x07 // cmp eax, 7
+ JG LBB14_5
+
+LBB14_1:
+ WORD $0xd285 // test edx, edx
+ JLE LBB14_4
+ WORD $0xc283; BYTE $0x01 // add edx, 1
+ WORD $0x3145; BYTE $0xc0 // xor r8d, r8d
+
+LBB14_3:
+ LONG $0xc7048b4a // mov rax, qword [rdi + 8*r8]
+ LONG $0x8104b60f // movzx eax, byte [rcx + 4*rax]
+ LONG $0x06048842 // mov byte [rsi + r8], al
+ LONG $0x01c08349 // add r8, 1
+ WORD $0xc283; BYTE $0xff // add edx, -1
+ WORD $0xfa83; BYTE $0x01 // cmp edx, 1
+ JG LBB14_3
+
+LBB14_4:
+ RET
+
+TEXT ·_transpose_int64_int8_avx2(SB), $0-32
+
+ MOVQ src+0(FP), DI
+ MOVQ dest+8(FP), SI
+ MOVQ length+16(FP), DX
+ MOVQ transposeMap+24(FP), CX
+
+ WORD $0xfa83; BYTE $0x04 // cmp edx, 4
+ JL LBB15_1
+
+LBB15_5:
+ WORD $0xd089 // mov eax, edx
+ WORD $0x8b48; BYTE $0x17 // mov rdx, qword [rdi]
+ LONG $0x9114b60f // movzx edx, byte [rcx + 4*rdx]
+ WORD $0x1688 // mov byte [rsi], dl
+ LONG $0x08578b48 // mov rdx, qword [rdi + 8]
+ LONG $0x9114b60f // movzx edx, byte [rcx + 4*rdx]
+ WORD $0x5688; BYTE $0x01 // mov byte [rsi + 1], dl
+ LONG $0x10578b48 // mov rdx, qword [rdi + 16]
+ LONG $0x9114b60f // movzx edx, byte [rcx + 4*rdx]
+ WORD $0x5688; BYTE $0x02 // mov byte [rsi + 2], dl
+ LONG $0x18578b48 // mov rdx, qword [rdi + 24]
+ LONG $0x9114b60f // movzx edx, byte [rcx + 4*rdx]
+ WORD $0x5688; BYTE $0x03 // mov byte [rsi + 3], dl
+ WORD $0x508d; BYTE $0xfc // lea edx, [rax - 4]
+ LONG $0x20c78348 // add rdi, 32
+ LONG $0x04c68348 // add rsi, 4
+ WORD $0xf883; BYTE $0x07 // cmp eax, 7
+ JG LBB15_5
+
+LBB15_1:
+ WORD $0xd285 // test edx, edx
+ JLE LBB15_4
+ WORD $0xc283; BYTE $0x01 // add edx, 1
+ WORD $0x3145; BYTE $0xc0 // xor r8d, r8d
+
+LBB15_3:
+ LONG $0xc7048b4a // mov rax, qword [rdi + 8*r8]
+ LONG $0x8104b60f // movzx eax, byte [rcx + 4*rax]
+ LONG $0x06048842 // mov byte [rsi + r8], al
+ LONG $0x01c08349 // add r8, 1
+ WORD $0xc283; BYTE $0xff // add edx, -1
+ WORD $0xfa83; BYTE $0x01 // cmp edx, 1
+ JG LBB15_3
+
+LBB15_4:
+ RET
+
+TEXT ·_transpose_uint8_uint16_avx2(SB), $0-32
+
+ MOVQ src+0(FP), DI
+ MOVQ dest+8(FP), SI
+ MOVQ length+16(FP), DX
+ MOVQ transposeMap+24(FP), CX
+
+ WORD $0xfa83; BYTE $0x04 // cmp edx, 4
+ JL LBB16_1
+
+LBB16_5:
+ WORD $0xd089 // mov eax, edx
+ WORD $0xb60f; BYTE $0x17 // movzx edx, byte [rdi]
+ LONG $0x9114b70f // movzx edx, word [rcx + 4*rdx]
+ WORD $0x8966; BYTE $0x16 // mov word [rsi], dx
+ LONG $0x0157b60f // movzx edx, byte [rdi + 1]
+ LONG $0x9114b70f // movzx edx, word [rcx + 4*rdx]
+ LONG $0x02568966 // mov word [rsi + 2], dx
+ LONG $0x0257b60f // movzx edx, byte [rdi + 2]
+ LONG $0x9114b70f // movzx edx, word [rcx + 4*rdx]
+ LONG $0x04568966 // mov word [rsi + 4], dx
+ LONG $0x0357b60f // movzx edx, byte [rdi + 3]
+ LONG $0x9114b70f // movzx edx, word [rcx + 4*rdx]
+ LONG $0x06568966 // mov word [rsi + 6], dx
+ WORD $0x508d; BYTE $0xfc // lea edx, [rax - 4]
+ LONG $0x04c78348 // add rdi, 4
+ LONG $0x08c68348 // add rsi, 8
+ WORD $0xf883; BYTE $0x07 // cmp eax, 7
+ JG LBB16_5
+
+LBB16_1:
+ WORD $0xd285 // test edx, edx
+ JLE LBB16_4
+ WORD $0xc283; BYTE $0x01 // add edx, 1
+ WORD $0x3145; BYTE $0xc0 // xor r8d, r8d
+
+LBB16_3:
+ LONG $0x04b60f42; BYTE $0x07 // movzx eax, byte [rdi + r8]
+ LONG $0x8104b70f // movzx eax, word [rcx + 4*rax]
+ LONG $0x04894266; BYTE $0x46 // mov word [rsi + 2*r8], ax
+ LONG $0x01c08349 // add r8, 1
+ WORD $0xc283; BYTE $0xff // add edx, -1
+ WORD $0xfa83; BYTE $0x01 // cmp edx, 1
+ JG LBB16_3
+
+LBB16_4:
+ RET
+
+TEXT ·_transpose_int8_uint16_avx2(SB), $0-32
+
+ MOVQ src+0(FP), DI
+ MOVQ dest+8(FP), SI
+ MOVQ length+16(FP), DX
+ MOVQ transposeMap+24(FP), CX
+
+ WORD $0xfa83; BYTE $0x04 // cmp edx, 4
+ JL LBB17_1
+
+LBB17_5:
+ WORD $0xd089 // mov eax, edx
+ LONG $0x17be0f48 // movsx rdx, byte [rdi]
+ LONG $0x9114b70f // movzx edx, word [rcx + 4*rdx]
+ WORD $0x8966; BYTE $0x16 // mov word [rsi], dx
+ LONG $0x57be0f48; BYTE $0x01 // movsx rdx, byte [rdi + 1]
+ LONG $0x9114b70f // movzx edx, word [rcx + 4*rdx]
+ LONG $0x02568966 // mov word [rsi + 2], dx
+ LONG $0x57be0f48; BYTE $0x02 // movsx rdx, byte [rdi + 2]
+ LONG $0x9114b70f // movzx edx, word [rcx + 4*rdx]
+ LONG $0x04568966 // mov word [rsi + 4], dx
+ LONG $0x57be0f48; BYTE $0x03 // movsx rdx, byte [rdi + 3]
+ LONG $0x9114b70f // movzx edx, word [rcx + 4*rdx]
+ LONG $0x06568966 // mov word [rsi + 6], dx
+ WORD $0x508d; BYTE $0xfc // lea edx, [rax - 4]
+ LONG $0x04c78348 // add rdi, 4
+ LONG $0x08c68348 // add rsi, 8
+ WORD $0xf883; BYTE $0x07 // cmp eax, 7
+ JG LBB17_5
+
+LBB17_1:
+ WORD $0xd285 // test edx, edx
+ JLE LBB17_4
+ WORD $0xc283; BYTE $0x01 // add edx, 1
+ WORD $0x3145; BYTE $0xc0 // xor r8d, r8d
+
+LBB17_3:
+ LONG $0x04be0f4a; BYTE $0x07 // movsx rax, byte [rdi + r8]
+ LONG $0x8104b70f // movzx eax, word [rcx + 4*rax]
+ LONG $0x04894266; BYTE $0x46 // mov word [rsi + 2*r8], ax
+ LONG $0x01c08349 // add r8, 1
+ WORD $0xc283; BYTE $0xff // add edx, -1
+ WORD $0xfa83; BYTE $0x01 // cmp edx, 1
+ JG LBB17_3
+
+LBB17_4:
+ RET
+
+TEXT ·_transpose_uint16_uint16_avx2(SB), $0-32
+
+ MOVQ src+0(FP), DI
+ MOVQ dest+8(FP), SI
+ MOVQ length+16(FP), DX
+ MOVQ transposeMap+24(FP), CX
+
+ WORD $0xfa83; BYTE $0x04 // cmp edx, 4
+ JL LBB18_1
+
+LBB18_5:
+ WORD $0xd089 // mov eax, edx
+ WORD $0xb70f; BYTE $0x17 // movzx edx, word [rdi]
+ LONG $0x9114b70f // movzx edx, word [rcx + 4*rdx]
+ WORD $0x8966; BYTE $0x16 // mov word [rsi], dx
+ LONG $0x0257b70f // movzx edx, word [rdi + 2]
+ LONG $0x9114b70f // movzx edx, word [rcx + 4*rdx]
+ LONG $0x02568966 // mov word [rsi + 2], dx
+ LONG $0x0457b70f // movzx edx, word [rdi + 4]
+ LONG $0x9114b70f // movzx edx, word [rcx + 4*rdx]
+ LONG $0x04568966 // mov word [rsi + 4], dx
+ LONG $0x0657b70f // movzx edx, word [rdi + 6]
+ LONG $0x9114b70f // movzx edx, word [rcx + 4*rdx]
+ LONG $0x06568966 // mov word [rsi + 6], dx
+ WORD $0x508d; BYTE $0xfc // lea edx, [rax - 4]
+ LONG $0x08c78348 // add rdi, 8
+ LONG $0x08c68348 // add rsi, 8
+ WORD $0xf883; BYTE $0x07 // cmp eax, 7
+ JG LBB18_5
+
+LBB18_1:
+ WORD $0xd285 // test edx, edx
+ JLE LBB18_4
+ WORD $0xc283; BYTE $0x01 // add edx, 1
+ WORD $0x3145; BYTE $0xc0 // xor r8d, r8d
+
+LBB18_3:
+ LONG $0x04b70f42; BYTE $0x07 // movzx eax, word [rdi + r8]
+ LONG $0x8104b70f // movzx eax, word [rcx + 4*rax]
+ LONG $0x04894266; BYTE $0x06 // mov word [rsi + r8], ax
+ LONG $0x02c08349 // add r8, 2
+ WORD $0xc283; BYTE $0xff // add edx, -1
+ WORD $0xfa83; BYTE $0x01 // cmp edx, 1
+ JG LBB18_3
+
+LBB18_4:
+ RET
+
+TEXT ·_transpose_int16_uint16_avx2(SB), $0-32
+
+ MOVQ src+0(FP), DI
+ MOVQ dest+8(FP), SI
+ MOVQ length+16(FP), DX
+ MOVQ transposeMap+24(FP), CX
+
+ WORD $0xfa83; BYTE $0x04 // cmp edx, 4
+ JL LBB19_1
+
+LBB19_5:
+ WORD $0xd089 // mov eax, edx
+ LONG $0x17bf0f48 // movsx rdx, word [rdi]
+ LONG $0x9114b70f // movzx edx, word [rcx + 4*rdx]
+ WORD $0x8966; BYTE $0x16 // mov word [rsi], dx
+ LONG $0x57bf0f48; BYTE $0x02 // movsx rdx, word [rdi + 2]
+ LONG $0x9114b70f // movzx edx, word [rcx + 4*rdx]
+ LONG $0x02568966 // mov word [rsi + 2], dx
+ LONG $0x57bf0f48; BYTE $0x04 // movsx rdx, word [rdi + 4]
+ LONG $0x9114b70f // movzx edx, word [rcx + 4*rdx]
+ LONG $0x04568966 // mov word [rsi + 4], dx
+ LONG $0x57bf0f48; BYTE $0x06 // movsx rdx, word [rdi + 6]
+ LONG $0x9114b70f // movzx edx, word [rcx + 4*rdx]
+ LONG $0x06568966 // mov word [rsi + 6], dx
+ WORD $0x508d; BYTE $0xfc // lea edx, [rax - 4]
+ LONG $0x08c78348 // add rdi, 8
+ LONG $0x08c68348 // add rsi, 8
+ WORD $0xf883; BYTE $0x07 // cmp eax, 7
+ JG LBB19_5
+
+LBB19_1:
+ WORD $0xd285 // test edx, edx
+ JLE LBB19_4
+ WORD $0xc283; BYTE $0x01 // add edx, 1
+ WORD $0x3145; BYTE $0xc0 // xor r8d, r8d
+
+LBB19_3:
+ LONG $0x04bf0f4a; BYTE $0x07 // movsx rax, word [rdi + r8]
+ LONG $0x8104b70f // movzx eax, word [rcx + 4*rax]
+ LONG $0x04894266; BYTE $0x06 // mov word [rsi + r8], ax
+ LONG $0x02c08349 // add r8, 2
+ WORD $0xc283; BYTE $0xff // add edx, -1
+ WORD $0xfa83; BYTE $0x01 // cmp edx, 1
+ JG LBB19_3
+
+LBB19_4:
+ RET
+
+TEXT ·_transpose_uint32_uint16_avx2(SB), $0-32
+
+ MOVQ src+0(FP), DI
+ MOVQ dest+8(FP), SI
+ MOVQ length+16(FP), DX
+ MOVQ transposeMap+24(FP), CX
+
+ WORD $0xfa83; BYTE $0x04 // cmp edx, 4
+ JL LBB20_1
+
+LBB20_5:
+ WORD $0xd089 // mov eax, edx
+ WORD $0x178b // mov edx, dword [rdi]
+ LONG $0x9114b70f // movzx edx, word [rcx + 4*rdx]
+ WORD $0x8966; BYTE $0x16 // mov word [rsi], dx
+ WORD $0x578b; BYTE $0x04 // mov edx, dword [rdi + 4]
+ LONG $0x9114b70f // movzx edx, word [rcx + 4*rdx]
+ LONG $0x02568966 // mov word [rsi + 2], dx
+ WORD $0x578b; BYTE $0x08 // mov edx, dword [rdi + 8]
+ LONG $0x9114b70f // movzx edx, word [rcx + 4*rdx]
+ LONG $0x04568966 // mov word [rsi + 4], dx
+ WORD $0x578b; BYTE $0x0c // mov edx, dword [rdi + 12]
+ LONG $0x9114b70f // movzx edx, word [rcx + 4*rdx]
+ LONG $0x06568966 // mov word [rsi + 6], dx
+ WORD $0x508d; BYTE $0xfc // lea edx, [rax - 4]
+ LONG $0x10c78348 // add rdi, 16
+ LONG $0x08c68348 // add rsi, 8
+ WORD $0xf883; BYTE $0x07 // cmp eax, 7
+ JG LBB20_5
+
+LBB20_1:
+ WORD $0xd285 // test edx, edx
+ JLE LBB20_4
+ WORD $0xc283; BYTE $0x01 // add edx, 1
+ WORD $0x3145; BYTE $0xc0 // xor r8d, r8d
+
+LBB20_3:
+ LONG $0x47048b42 // mov eax, dword [rdi + 2*r8]
+ LONG $0x8104b70f // movzx eax, word [rcx + 4*rax]
+ LONG $0x04894266; BYTE $0x06 // mov word [rsi + r8], ax
+ LONG $0x02c08349 // add r8, 2
+ WORD $0xc283; BYTE $0xff // add edx, -1
+ WORD $0xfa83; BYTE $0x01 // cmp edx, 1
+ JG LBB20_3
+
+LBB20_4:
+ RET
+
+TEXT ·_transpose_int32_uint16_avx2(SB), $0-32
+
+ MOVQ src+0(FP), DI
+ MOVQ dest+8(FP), SI
+ MOVQ length+16(FP), DX
+ MOVQ transposeMap+24(FP), CX
+
+ WORD $0xfa83; BYTE $0x04 // cmp edx, 4
+ JL LBB21_1
+
+LBB21_5:
+ WORD $0xd089 // mov eax, edx
+ WORD $0x6348; BYTE $0x17 // movsxd rdx, dword [rdi]
+ LONG $0x9114b70f // movzx edx, word [rcx + 4*rdx]
+ WORD $0x8966; BYTE $0x16 // mov word [rsi], dx
+ LONG $0x04576348 // movsxd rdx, dword [rdi + 4]
+ LONG $0x9114b70f // movzx edx, word [rcx + 4*rdx]
+ LONG $0x02568966 // mov word [rsi + 2], dx
+ LONG $0x08576348 // movsxd rdx, dword [rdi + 8]
+ LONG $0x9114b70f // movzx edx, word [rcx + 4*rdx]
+ LONG $0x04568966 // mov word [rsi + 4], dx
+ LONG $0x0c576348 // movsxd rdx, dword [rdi + 12]
+ LONG $0x9114b70f // movzx edx, word [rcx + 4*rdx]
+ LONG $0x06568966 // mov word [rsi + 6], dx
+ WORD $0x508d; BYTE $0xfc // lea edx, [rax - 4]
+ LONG $0x10c78348 // add rdi, 16
+ LONG $0x08c68348 // add rsi, 8
+ WORD $0xf883; BYTE $0x07 // cmp eax, 7
+ JG LBB21_5
+
+LBB21_1:
+ WORD $0xd285 // test edx, edx
+ JLE LBB21_4
+ WORD $0xc283; BYTE $0x01 // add edx, 1
+ WORD $0x3145; BYTE $0xc0 // xor r8d, r8d
+
+LBB21_3:
+ LONG $0x4704634a // movsxd rax, dword [rdi + 2*r8]
+ LONG $0x8104b70f // movzx eax, word [rcx + 4*rax]
+ LONG $0x04894266; BYTE $0x06 // mov word [rsi + r8], ax
+ LONG $0x02c08349 // add r8, 2
+ WORD $0xc283; BYTE $0xff // add edx, -1
+ WORD $0xfa83; BYTE $0x01 // cmp edx, 1
+ JG LBB21_3
+
+LBB21_4:
+ RET
+
+TEXT ·_transpose_uint64_uint16_avx2(SB), $0-32
+
+ MOVQ src+0(FP), DI
+ MOVQ dest+8(FP), SI
+ MOVQ length+16(FP), DX
+ MOVQ transposeMap+24(FP), CX
+
+ WORD $0xfa83; BYTE $0x04 // cmp edx, 4
+ JL LBB22_1
+
+LBB22_5:
+ WORD $0xd089 // mov eax, edx
+ WORD $0x8b48; BYTE $0x17 // mov rdx, qword [rdi]
+ LONG $0x9114b70f // movzx edx, word [rcx + 4*rdx]
+ WORD $0x8966; BYTE $0x16 // mov word [rsi], dx
+ LONG $0x08578b48 // mov rdx, qword [rdi + 8]
+ LONG $0x9114b70f // movzx edx, word [rcx + 4*rdx]
+ LONG $0x02568966 // mov word [rsi + 2], dx
+ LONG $0x10578b48 // mov rdx, qword [rdi + 16]
+ LONG $0x9114b70f // movzx edx, word [rcx + 4*rdx]
+ LONG $0x04568966 // mov word [rsi + 4], dx
+ LONG $0x18578b48 // mov rdx, qword [rdi + 24]
+ LONG $0x9114b70f // movzx edx, word [rcx + 4*rdx]
+ LONG $0x06568966 // mov word [rsi + 6], dx
+ WORD $0x508d; BYTE $0xfc // lea edx, [rax - 4]
+ LONG $0x20c78348 // add rdi, 32
+ LONG $0x08c68348 // add rsi, 8
+ WORD $0xf883; BYTE $0x07 // cmp eax, 7
+ JG LBB22_5
+
+LBB22_1:
+ WORD $0xd285 // test edx, edx
+ JLE LBB22_4
+ WORD $0xc283; BYTE $0x01 // add edx, 1
+ WORD $0x3145; BYTE $0xc0 // xor r8d, r8d
+
+LBB22_3:
+ LONG $0x87048b4a // mov rax, qword [rdi + 4*r8]
+ LONG $0x8104b70f // movzx eax, word [rcx + 4*rax]
+ LONG $0x04894266; BYTE $0x06 // mov word [rsi + r8], ax
+ LONG $0x02c08349 // add r8, 2
+ WORD $0xc283; BYTE $0xff // add edx, -1
+ WORD $0xfa83; BYTE $0x01 // cmp edx, 1
+ JG LBB22_3
+
+LBB22_4:
+ RET
+
+TEXT ·_transpose_int64_uint16_avx2(SB), $0-32
+
+ MOVQ src+0(FP), DI
+ MOVQ dest+8(FP), SI
+ MOVQ length+16(FP), DX
+ MOVQ transposeMap+24(FP), CX
+
+ WORD $0xfa83; BYTE $0x04 // cmp edx, 4
+ JL LBB23_1
+
+LBB23_5:
+ WORD $0xd089 // mov eax, edx
+ WORD $0x8b48; BYTE $0x17 // mov rdx, qword [rdi]
+ LONG $0x9114b70f // movzx edx, word [rcx + 4*rdx]
+ WORD $0x8966; BYTE $0x16 // mov word [rsi], dx
+ LONG $0x08578b48 // mov rdx, qword [rdi + 8]
+ LONG $0x9114b70f // movzx edx, word [rcx + 4*rdx]
+ LONG $0x02568966 // mov word [rsi + 2], dx
+ LONG $0x10578b48 // mov rdx, qword [rdi + 16]
+ LONG $0x9114b70f // movzx edx, word [rcx + 4*rdx]
+ LONG $0x04568966 // mov word [rsi + 4], dx
+ LONG $0x18578b48 // mov rdx, qword [rdi + 24]
+ LONG $0x9114b70f // movzx edx, word [rcx + 4*rdx]
+ LONG $0x06568966 // mov word [rsi + 6], dx
+ WORD $0x508d; BYTE $0xfc // lea edx, [rax - 4]
+ LONG $0x20c78348 // add rdi, 32
+ LONG $0x08c68348 // add rsi, 8
+ WORD $0xf883; BYTE $0x07 // cmp eax, 7
+ JG LBB23_5
+
+LBB23_1:
+ WORD $0xd285 // test edx, edx
+ JLE LBB23_4
+ WORD $0xc283; BYTE $0x01 // add edx, 1
+ WORD $0x3145; BYTE $0xc0 // xor r8d, r8d
+
+LBB23_3:
+ LONG $0x87048b4a // mov rax, qword [rdi + 4*r8]
+ LONG $0x8104b70f // movzx eax, word [rcx + 4*rax]
+ LONG $0x04894266; BYTE $0x06 // mov word [rsi + r8], ax
+ LONG $0x02c08349 // add r8, 2
+ WORD $0xc283; BYTE $0xff // add edx, -1
+ WORD $0xfa83; BYTE $0x01 // cmp edx, 1
+ JG LBB23_3
+
+LBB23_4:
+ RET
+
+TEXT ·_transpose_uint8_int16_avx2(SB), $0-32
+
+ MOVQ src+0(FP), DI
+ MOVQ dest+8(FP), SI
+ MOVQ length+16(FP), DX
+ MOVQ transposeMap+24(FP), CX
+
+ WORD $0xfa83; BYTE $0x04 // cmp edx, 4
+ JL LBB24_1
+
+LBB24_5:
+ WORD $0xd089 // mov eax, edx
+ WORD $0xb60f; BYTE $0x17 // movzx edx, byte [rdi]
+ LONG $0x9114b70f // movzx edx, word [rcx + 4*rdx]
+ WORD $0x8966; BYTE $0x16 // mov word [rsi], dx
+ LONG $0x0157b60f // movzx edx, byte [rdi + 1]
+ LONG $0x9114b70f // movzx edx, word [rcx + 4*rdx]
+ LONG $0x02568966 // mov word [rsi + 2], dx
+ LONG $0x0257b60f // movzx edx, byte [rdi + 2]
+ LONG $0x9114b70f // movzx edx, word [rcx + 4*rdx]
+ LONG $0x04568966 // mov word [rsi + 4], dx
+ LONG $0x0357b60f // movzx edx, byte [rdi + 3]
+ LONG $0x9114b70f // movzx edx, word [rcx + 4*rdx]
+ LONG $0x06568966 // mov word [rsi + 6], dx
+ WORD $0x508d; BYTE $0xfc // lea edx, [rax - 4]
+ LONG $0x04c78348 // add rdi, 4
+ LONG $0x08c68348 // add rsi, 8
+ WORD $0xf883; BYTE $0x07 // cmp eax, 7
+ JG LBB24_5
+
+LBB24_1:
+ WORD $0xd285 // test edx, edx
+ JLE LBB24_4
+ WORD $0xc283; BYTE $0x01 // add edx, 1
+ WORD $0x3145; BYTE $0xc0 // xor r8d, r8d
+
+LBB24_3:
+ LONG $0x04b60f42; BYTE $0x07 // movzx eax, byte [rdi + r8]
+ LONG $0x8104b70f // movzx eax, word [rcx + 4*rax]
+ LONG $0x04894266; BYTE $0x46 // mov word [rsi + 2*r8], ax
+ LONG $0x01c08349 // add r8, 1
+ WORD $0xc283; BYTE $0xff // add edx, -1
+ WORD $0xfa83; BYTE $0x01 // cmp edx, 1
+ JG LBB24_3
+
+LBB24_4:
+ RET
+
+TEXT ·_transpose_int8_int16_avx2(SB), $0-32
+
+ MOVQ src+0(FP), DI
+ MOVQ dest+8(FP), SI
+ MOVQ length+16(FP), DX
+ MOVQ transposeMap+24(FP), CX
+
+ WORD $0xfa83; BYTE $0x04 // cmp edx, 4
+ JL LBB25_1
+
+LBB25_5:
+ WORD $0xd089 // mov eax, edx
+ LONG $0x17be0f48 // movsx rdx, byte [rdi]
+ LONG $0x9114b70f // movzx edx, word [rcx + 4*rdx]
+ WORD $0x8966; BYTE $0x16 // mov word [rsi], dx
+ LONG $0x57be0f48; BYTE $0x01 // movsx rdx, byte [rdi + 1]
+ LONG $0x9114b70f // movzx edx, word [rcx + 4*rdx]
+ LONG $0x02568966 // mov word [rsi + 2], dx
+ LONG $0x57be0f48; BYTE $0x02 // movsx rdx, byte [rdi + 2]
+ LONG $0x9114b70f // movzx edx, word [rcx + 4*rdx]
+ LONG $0x04568966 // mov word [rsi + 4], dx
+ LONG $0x57be0f48; BYTE $0x03 // movsx rdx, byte [rdi + 3]
+ LONG $0x9114b70f // movzx edx, word [rcx + 4*rdx]
+ LONG $0x06568966 // mov word [rsi + 6], dx
+ WORD $0x508d; BYTE $0xfc // lea edx, [rax - 4]
+ LONG $0x04c78348 // add rdi, 4
+ LONG $0x08c68348 // add rsi, 8
+ WORD $0xf883; BYTE $0x07 // cmp eax, 7
+ JG LBB25_5
+
+LBB25_1:
+ WORD $0xd285 // test edx, edx
+ JLE LBB25_4
+ WORD $0xc283; BYTE $0x01 // add edx, 1
+ WORD $0x3145; BYTE $0xc0 // xor r8d, r8d
+
+LBB25_3:
+ LONG $0x04be0f4a; BYTE $0x07 // movsx rax, byte [rdi + r8]
+ LONG $0x8104b70f // movzx eax, word [rcx + 4*rax]
+ LONG $0x04894266; BYTE $0x46 // mov word [rsi + 2*r8], ax
+ LONG $0x01c08349 // add r8, 1
+ WORD $0xc283; BYTE $0xff // add edx, -1
+ WORD $0xfa83; BYTE $0x01 // cmp edx, 1
+ JG LBB25_3
+
+LBB25_4:
+ RET
+
+TEXT ·_transpose_uint16_int16_avx2(SB), $0-32
+
+ MOVQ src+0(FP), DI
+ MOVQ dest+8(FP), SI
+ MOVQ length+16(FP), DX
+ MOVQ transposeMap+24(FP), CX
+
+ WORD $0xfa83; BYTE $0x04 // cmp edx, 4
+ JL LBB26_1
+
+LBB26_5:
+ WORD $0xd089 // mov eax, edx
+ WORD $0xb70f; BYTE $0x17 // movzx edx, word [rdi]
+ LONG $0x9114b70f // movzx edx, word [rcx + 4*rdx]
+ WORD $0x8966; BYTE $0x16 // mov word [rsi], dx
+ LONG $0x0257b70f // movzx edx, word [rdi + 2]
+ LONG $0x9114b70f // movzx edx, word [rcx + 4*rdx]
+ LONG $0x02568966 // mov word [rsi + 2], dx
+ LONG $0x0457b70f // movzx edx, word [rdi + 4]
+ LONG $0x9114b70f // movzx edx, word [rcx + 4*rdx]
+ LONG $0x04568966 // mov word [rsi + 4], dx
+ LONG $0x0657b70f // movzx edx, word [rdi + 6]
+ LONG $0x9114b70f // movzx edx, word [rcx + 4*rdx]
+ LONG $0x06568966 // mov word [rsi + 6], dx
+ WORD $0x508d; BYTE $0xfc // lea edx, [rax - 4]
+ LONG $0x08c78348 // add rdi, 8
+ LONG $0x08c68348 // add rsi, 8
+ WORD $0xf883; BYTE $0x07 // cmp eax, 7
+ JG LBB26_5
+
+LBB26_1:
+ WORD $0xd285 // test edx, edx
+ JLE LBB26_4
+ WORD $0xc283; BYTE $0x01 // add edx, 1
+ WORD $0x3145; BYTE $0xc0 // xor r8d, r8d
+
+LBB26_3:
+ LONG $0x04b70f42; BYTE $0x07 // movzx eax, word [rdi + r8]
+ LONG $0x8104b70f // movzx eax, word [rcx + 4*rax]
+ LONG $0x04894266; BYTE $0x06 // mov word [rsi + r8], ax
+ LONG $0x02c08349 // add r8, 2
+ WORD $0xc283; BYTE $0xff // add edx, -1
+ WORD $0xfa83; BYTE $0x01 // cmp edx, 1
+ JG LBB26_3
+
+LBB26_4:
+ RET
+
+TEXT ·_transpose_int16_int16_avx2(SB), $0-32
+
+ MOVQ src+0(FP), DI
+ MOVQ dest+8(FP), SI
+ MOVQ length+16(FP), DX
+ MOVQ transposeMap+24(FP), CX
+
+ WORD $0xfa83; BYTE $0x04 // cmp edx, 4
+ JL LBB27_1
+
+LBB27_5:
+ WORD $0xd089 // mov eax, edx
+ LONG $0x17bf0f48 // movsx rdx, word [rdi]
+ LONG $0x9114b70f // movzx edx, word [rcx + 4*rdx]
+ WORD $0x8966; BYTE $0x16 // mov word [rsi], dx
+ LONG $0x57bf0f48; BYTE $0x02 // movsx rdx, word [rdi + 2]
+ LONG $0x9114b70f // movzx edx, word [rcx + 4*rdx]
+ LONG $0x02568966 // mov word [rsi + 2], dx
+ LONG $0x57bf0f48; BYTE $0x04 // movsx rdx, word [rdi + 4]
+ LONG $0x9114b70f // movzx edx, word [rcx + 4*rdx]
+ LONG $0x04568966 // mov word [rsi + 4], dx
+ LONG $0x57bf0f48; BYTE $0x06 // movsx rdx, word [rdi + 6]
+ LONG $0x9114b70f // movzx edx, word [rcx + 4*rdx]
+ LONG $0x06568966 // mov word [rsi + 6], dx
+ WORD $0x508d; BYTE $0xfc // lea edx, [rax - 4]
+ LONG $0x08c78348 // add rdi, 8
+ LONG $0x08c68348 // add rsi, 8
+ WORD $0xf883; BYTE $0x07 // cmp eax, 7
+ JG LBB27_5
+
+LBB27_1:
+ WORD $0xd285 // test edx, edx
+ JLE LBB27_4
+ WORD $0xc283; BYTE $0x01 // add edx, 1
+ WORD $0x3145; BYTE $0xc0 // xor r8d, r8d
+
+LBB27_3:
+ LONG $0x04bf0f4a; BYTE $0x07 // movsx rax, word [rdi + r8]
+ LONG $0x8104b70f // movzx eax, word [rcx + 4*rax]
+ LONG $0x04894266; BYTE $0x06 // mov word [rsi + r8], ax
+ LONG $0x02c08349 // add r8, 2
+ WORD $0xc283; BYTE $0xff // add edx, -1
+ WORD $0xfa83; BYTE $0x01 // cmp edx, 1
+ JG LBB27_3
+
+LBB27_4:
+ RET
+
+TEXT ·_transpose_uint32_int16_avx2(SB), $0-32
+
+ MOVQ src+0(FP), DI
+ MOVQ dest+8(FP), SI
+ MOVQ length+16(FP), DX
+ MOVQ transposeMap+24(FP), CX
+
+ WORD $0xfa83; BYTE $0x04 // cmp edx, 4
+ JL LBB28_1
+
+LBB28_5:
+ WORD $0xd089 // mov eax, edx
+ WORD $0x178b // mov edx, dword [rdi]
+ LONG $0x9114b70f // movzx edx, word [rcx + 4*rdx]
+ WORD $0x8966; BYTE $0x16 // mov word [rsi], dx
+ WORD $0x578b; BYTE $0x04 // mov edx, dword [rdi + 4]
+ LONG $0x9114b70f // movzx edx, word [rcx + 4*rdx]
+ LONG $0x02568966 // mov word [rsi + 2], dx
+ WORD $0x578b; BYTE $0x08 // mov edx, dword [rdi + 8]
+ LONG $0x9114b70f // movzx edx, word [rcx + 4*rdx]
+ LONG $0x04568966 // mov word [rsi + 4], dx
+ WORD $0x578b; BYTE $0x0c // mov edx, dword [rdi + 12]
+ LONG $0x9114b70f // movzx edx, word [rcx + 4*rdx]
+ LONG $0x06568966 // mov word [rsi + 6], dx
+ WORD $0x508d; BYTE $0xfc // lea edx, [rax - 4]
+ LONG $0x10c78348 // add rdi, 16
+ LONG $0x08c68348 // add rsi, 8
+ WORD $0xf883; BYTE $0x07 // cmp eax, 7
+ JG LBB28_5
+
+LBB28_1:
+ WORD $0xd285 // test edx, edx
+ JLE LBB28_4
+ WORD $0xc283; BYTE $0x01 // add edx, 1
+ WORD $0x3145; BYTE $0xc0 // xor r8d, r8d
+
+LBB28_3:
+ LONG $0x47048b42 // mov eax, dword [rdi + 2*r8]
+ LONG $0x8104b70f // movzx eax, word [rcx + 4*rax]
+ LONG $0x04894266; BYTE $0x06 // mov word [rsi + r8], ax
+ LONG $0x02c08349 // add r8, 2
+ WORD $0xc283; BYTE $0xff // add edx, -1
+ WORD $0xfa83; BYTE $0x01 // cmp edx, 1
+ JG LBB28_3
+
+LBB28_4:
+ RET
+
+TEXT ·_transpose_int32_int16_avx2(SB), $0-32
+
+ MOVQ src+0(FP), DI
+ MOVQ dest+8(FP), SI
+ MOVQ length+16(FP), DX
+ MOVQ transposeMap+24(FP), CX
+
+ WORD $0xfa83; BYTE $0x04 // cmp edx, 4
+ JL LBB29_1
+
+LBB29_5:
+ WORD $0xd089 // mov eax, edx
+ WORD $0x6348; BYTE $0x17 // movsxd rdx, dword [rdi]
+ LONG $0x9114b70f // movzx edx, word [rcx + 4*rdx]
+ WORD $0x8966; BYTE $0x16 // mov word [rsi], dx
+ LONG $0x04576348 // movsxd rdx, dword [rdi + 4]
+ LONG $0x9114b70f // movzx edx, word [rcx + 4*rdx]
+ LONG $0x02568966 // mov word [rsi + 2], dx
+ LONG $0x08576348 // movsxd rdx, dword [rdi + 8]
+ LONG $0x9114b70f // movzx edx, word [rcx + 4*rdx]
+ LONG $0x04568966 // mov word [rsi + 4], dx
+ LONG $0x0c576348 // movsxd rdx, dword [rdi + 12]
+ LONG $0x9114b70f // movzx edx, word [rcx + 4*rdx]
+ LONG $0x06568966 // mov word [rsi + 6], dx
+ WORD $0x508d; BYTE $0xfc // lea edx, [rax - 4]
+ LONG $0x10c78348 // add rdi, 16
+ LONG $0x08c68348 // add rsi, 8
+ WORD $0xf883; BYTE $0x07 // cmp eax, 7
+ JG LBB29_5
+
+LBB29_1:
+ WORD $0xd285 // test edx, edx
+ JLE LBB29_4
+ WORD $0xc283; BYTE $0x01 // add edx, 1
+ WORD $0x3145; BYTE $0xc0 // xor r8d, r8d
+
+LBB29_3:
+ LONG $0x4704634a // movsxd rax, dword [rdi + 2*r8]
+ LONG $0x8104b70f // movzx eax, word [rcx + 4*rax]
+ LONG $0x04894266; BYTE $0x06 // mov word [rsi + r8], ax
+ LONG $0x02c08349 // add r8, 2
+ WORD $0xc283; BYTE $0xff // add edx, -1
+ WORD $0xfa83; BYTE $0x01 // cmp edx, 1
+ JG LBB29_3
+
+LBB29_4:
+ RET
+
+TEXT ·_transpose_uint64_int16_avx2(SB), $0-32
+
+ MOVQ src+0(FP), DI
+ MOVQ dest+8(FP), SI
+ MOVQ length+16(FP), DX
+ MOVQ transposeMap+24(FP), CX
+
+ WORD $0xfa83; BYTE $0x04 // cmp edx, 4
+ JL LBB30_1
+
+LBB30_5:
+ WORD $0xd089 // mov eax, edx
+ WORD $0x8b48; BYTE $0x17 // mov rdx, qword [rdi]
+ LONG $0x9114b70f // movzx edx, word [rcx + 4*rdx]
+ WORD $0x8966; BYTE $0x16 // mov word [rsi], dx
+ LONG $0x08578b48 // mov rdx, qword [rdi + 8]
+ LONG $0x9114b70f // movzx edx, word [rcx + 4*rdx]
+ LONG $0x02568966 // mov word [rsi + 2], dx
+ LONG $0x10578b48 // mov rdx, qword [rdi + 16]
+ LONG $0x9114b70f // movzx edx, word [rcx + 4*rdx]
+ LONG $0x04568966 // mov word [rsi + 4], dx
+ LONG $0x18578b48 // mov rdx, qword [rdi + 24]
+ LONG $0x9114b70f // movzx edx, word [rcx + 4*rdx]
+ LONG $0x06568966 // mov word [rsi + 6], dx
+ WORD $0x508d; BYTE $0xfc // lea edx, [rax - 4]
+ LONG $0x20c78348 // add rdi, 32
+ LONG $0x08c68348 // add rsi, 8
+ WORD $0xf883; BYTE $0x07 // cmp eax, 7
+ JG LBB30_5
+
+LBB30_1:
+ WORD $0xd285 // test edx, edx
+ JLE LBB30_4
+ WORD $0xc283; BYTE $0x01 // add edx, 1
+ WORD $0x3145; BYTE $0xc0 // xor r8d, r8d
+
+LBB30_3:
+ LONG $0x87048b4a // mov rax, qword [rdi + 4*r8]
+ LONG $0x8104b70f // movzx eax, word [rcx + 4*rax]
+ LONG $0x04894266; BYTE $0x06 // mov word [rsi + r8], ax
+ LONG $0x02c08349 // add r8, 2
+ WORD $0xc283; BYTE $0xff // add edx, -1
+ WORD $0xfa83; BYTE $0x01 // cmp edx, 1
+ JG LBB30_3
+
+LBB30_4:
+ RET
+
+TEXT ·_transpose_int64_int16_avx2(SB), $0-32
+
+ MOVQ src+0(FP), DI
+ MOVQ dest+8(FP), SI
+ MOVQ length+16(FP), DX
+ MOVQ transposeMap+24(FP), CX
+
+ WORD $0xfa83; BYTE $0x04 // cmp edx, 4
+ JL LBB31_1
+
+LBB31_5:
+ WORD $0xd089 // mov eax, edx
+ WORD $0x8b48; BYTE $0x17 // mov rdx, qword [rdi]
+ LONG $0x9114b70f // movzx edx, word [rcx + 4*rdx]
+ WORD $0x8966; BYTE $0x16 // mov word [rsi], dx
+ LONG $0x08578b48 // mov rdx, qword [rdi + 8]
+ LONG $0x9114b70f // movzx edx, word [rcx + 4*rdx]
+ LONG $0x02568966 // mov word [rsi + 2], dx
+ LONG $0x10578b48 // mov rdx, qword [rdi + 16]
+ LONG $0x9114b70f // movzx edx, word [rcx + 4*rdx]
+ LONG $0x04568966 // mov word [rsi + 4], dx
+ LONG $0x18578b48 // mov rdx, qword [rdi + 24]
+ LONG $0x9114b70f // movzx edx, word [rcx + 4*rdx]
+ LONG $0x06568966 // mov word [rsi + 6], dx
+ WORD $0x508d; BYTE $0xfc // lea edx, [rax - 4]
+ LONG $0x20c78348 // add rdi, 32
+ LONG $0x08c68348 // add rsi, 8
+ WORD $0xf883; BYTE $0x07 // cmp eax, 7
+ JG LBB31_5
+
+LBB31_1:
+ WORD $0xd285 // test edx, edx
+ JLE LBB31_4
+ WORD $0xc283; BYTE $0x01 // add edx, 1
+ WORD $0x3145; BYTE $0xc0 // xor r8d, r8d
+
+LBB31_3:
+ LONG $0x87048b4a // mov rax, qword [rdi + 4*r8]
+ LONG $0x8104b70f // movzx eax, word [rcx + 4*rax]
+ LONG $0x04894266; BYTE $0x06 // mov word [rsi + r8], ax
+ LONG $0x02c08349 // add r8, 2
+ WORD $0xc283; BYTE $0xff // add edx, -1
+ WORD $0xfa83; BYTE $0x01 // cmp edx, 1
+ JG LBB31_3
+
+LBB31_4:
+ RET
+
+TEXT ·_transpose_uint8_uint32_avx2(SB), $0-32
+
+ MOVQ src+0(FP), DI
+ MOVQ dest+8(FP), SI
+ MOVQ length+16(FP), DX
+ MOVQ transposeMap+24(FP), CX
+
+ WORD $0xfa83; BYTE $0x04 // cmp edx, 4
+ JL LBB32_1
+
+LBB32_5:
+ WORD $0xd089 // mov eax, edx
+ WORD $0xb60f; BYTE $0x17 // movzx edx, byte [rdi]
+ WORD $0x148b; BYTE $0x91 // mov edx, dword [rcx + 4*rdx]
+ WORD $0x1689 // mov dword [rsi], edx
+ LONG $0x0157b60f // movzx edx, byte [rdi + 1]
+ WORD $0x148b; BYTE $0x91 // mov edx, dword [rcx + 4*rdx]
+ WORD $0x5689; BYTE $0x04 // mov dword [rsi + 4], edx
+ LONG $0x0257b60f // movzx edx, byte [rdi + 2]
+ WORD $0x148b; BYTE $0x91 // mov edx, dword [rcx + 4*rdx]
+ WORD $0x5689; BYTE $0x08 // mov dword [rsi + 8], edx
+ LONG $0x0357b60f // movzx edx, byte [rdi + 3]
+ WORD $0x148b; BYTE $0x91 // mov edx, dword [rcx + 4*rdx]
+ WORD $0x5689; BYTE $0x0c // mov dword [rsi + 12], edx
+ WORD $0x508d; BYTE $0xfc // lea edx, [rax - 4]
+ LONG $0x04c78348 // add rdi, 4
+ LONG $0x10c68348 // add rsi, 16
+ WORD $0xf883; BYTE $0x07 // cmp eax, 7
+ JG LBB32_5
+
+LBB32_1:
+ WORD $0xd285 // test edx, edx
+ JLE LBB32_4
+ WORD $0xc283; BYTE $0x01 // add edx, 1
+ WORD $0x3145; BYTE $0xc0 // xor r8d, r8d
+
+LBB32_3:
+ LONG $0x04b60f42; BYTE $0x07 // movzx eax, byte [rdi + r8]
+ WORD $0x048b; BYTE $0x81 // mov eax, dword [rcx + 4*rax]
+ LONG $0x86048942 // mov dword [rsi + 4*r8], eax
+ LONG $0x01c08349 // add r8, 1
+ WORD $0xc283; BYTE $0xff // add edx, -1
+ WORD $0xfa83; BYTE $0x01 // cmp edx, 1
+ JG LBB32_3
+
+LBB32_4:
+ RET
+
+TEXT ·_transpose_int8_uint32_avx2(SB), $0-32
+
+ MOVQ src+0(FP), DI
+ MOVQ dest+8(FP), SI
+ MOVQ length+16(FP), DX
+ MOVQ transposeMap+24(FP), CX
+
+ WORD $0xfa83; BYTE $0x04 // cmp edx, 4
+ JL LBB33_1
+
+LBB33_5:
+ WORD $0xd089 // mov eax, edx
+ LONG $0x17be0f48 // movsx rdx, byte [rdi]
+ WORD $0x148b; BYTE $0x91 // mov edx, dword [rcx + 4*rdx]
+ WORD $0x1689 // mov dword [rsi], edx
+ LONG $0x57be0f48; BYTE $0x01 // movsx rdx, byte [rdi + 1]
+ WORD $0x148b; BYTE $0x91 // mov edx, dword [rcx + 4*rdx]
+ WORD $0x5689; BYTE $0x04 // mov dword [rsi + 4], edx
+ LONG $0x57be0f48; BYTE $0x02 // movsx rdx, byte [rdi + 2]
+ WORD $0x148b; BYTE $0x91 // mov edx, dword [rcx + 4*rdx]
+ WORD $0x5689; BYTE $0x08 // mov dword [rsi + 8], edx
+ LONG $0x57be0f48; BYTE $0x03 // movsx rdx, byte [rdi + 3]
+ WORD $0x148b; BYTE $0x91 // mov edx, dword [rcx + 4*rdx]
+ WORD $0x5689; BYTE $0x0c // mov dword [rsi + 12], edx
+ WORD $0x508d; BYTE $0xfc // lea edx, [rax - 4]
+ LONG $0x04c78348 // add rdi, 4
+ LONG $0x10c68348 // add rsi, 16
+ WORD $0xf883; BYTE $0x07 // cmp eax, 7
+ JG LBB33_5
+
+LBB33_1:
+ WORD $0xd285 // test edx, edx
+ JLE LBB33_4
+ WORD $0xc283; BYTE $0x01 // add edx, 1
+ WORD $0x3145; BYTE $0xc0 // xor r8d, r8d
+
+LBB33_3:
+ LONG $0x04be0f4a; BYTE $0x07 // movsx rax, byte [rdi + r8]
+ WORD $0x048b; BYTE $0x81 // mov eax, dword [rcx + 4*rax]
+ LONG $0x86048942 // mov dword [rsi + 4*r8], eax
+ LONG $0x01c08349 // add r8, 1
+ WORD $0xc283; BYTE $0xff // add edx, -1
+ WORD $0xfa83; BYTE $0x01 // cmp edx, 1
+ JG LBB33_3
+
+LBB33_4:
+ RET
+
+TEXT ·_transpose_uint16_uint32_avx2(SB), $0-32
+
+ MOVQ src+0(FP), DI
+ MOVQ dest+8(FP), SI
+ MOVQ length+16(FP), DX
+ MOVQ transposeMap+24(FP), CX
+
+ WORD $0xfa83; BYTE $0x04 // cmp edx, 4
+ JL LBB34_1
+
+LBB34_5:
+ WORD $0xd089 // mov eax, edx
+ WORD $0xb70f; BYTE $0x17 // movzx edx, word [rdi]
+ WORD $0x148b; BYTE $0x91 // mov edx, dword [rcx + 4*rdx]
+ WORD $0x1689 // mov dword [rsi], edx
+ LONG $0x0257b70f // movzx edx, word [rdi + 2]
+ WORD $0x148b; BYTE $0x91 // mov edx, dword [rcx + 4*rdx]
+ WORD $0x5689; BYTE $0x04 // mov dword [rsi + 4], edx
+ LONG $0x0457b70f // movzx edx, word [rdi + 4]
+ WORD $0x148b; BYTE $0x91 // mov edx, dword [rcx + 4*rdx]
+ WORD $0x5689; BYTE $0x08 // mov dword [rsi + 8], edx
+ LONG $0x0657b70f // movzx edx, word [rdi + 6]
+ WORD $0x148b; BYTE $0x91 // mov edx, dword [rcx + 4*rdx]
+ WORD $0x5689; BYTE $0x0c // mov dword [rsi + 12], edx
+ WORD $0x508d; BYTE $0xfc // lea edx, [rax - 4]
+ LONG $0x08c78348 // add rdi, 8
+ LONG $0x10c68348 // add rsi, 16
+ WORD $0xf883; BYTE $0x07 // cmp eax, 7
+ JG LBB34_5
+
+LBB34_1:
+ WORD $0xd285 // test edx, edx
+ JLE LBB34_4
+ WORD $0xc283; BYTE $0x01 // add edx, 1
+ WORD $0x3145; BYTE $0xc0 // xor r8d, r8d
+
+LBB34_3:
+ LONG $0x04b70f42; BYTE $0x07 // movzx eax, word [rdi + r8]
+ WORD $0x048b; BYTE $0x81 // mov eax, dword [rcx + 4*rax]
+ LONG $0x46048942 // mov dword [rsi + 2*r8], eax
+ LONG $0x02c08349 // add r8, 2
+ WORD $0xc283; BYTE $0xff // add edx, -1
+ WORD $0xfa83; BYTE $0x01 // cmp edx, 1
+ JG LBB34_3
+
+LBB34_4:
+ RET
+
+TEXT ·_transpose_int16_uint32_avx2(SB), $0-32
+
+ MOVQ src+0(FP), DI
+ MOVQ dest+8(FP), SI
+ MOVQ length+16(FP), DX
+ MOVQ transposeMap+24(FP), CX
+
+ WORD $0xfa83; BYTE $0x04 // cmp edx, 4
+ JL LBB35_1
+
+LBB35_5:
+ WORD $0xd089 // mov eax, edx
+ LONG $0x17bf0f48 // movsx rdx, word [rdi]
+ WORD $0x148b; BYTE $0x91 // mov edx, dword [rcx + 4*rdx]
+ WORD $0x1689 // mov dword [rsi], edx
+ LONG $0x57bf0f48; BYTE $0x02 // movsx rdx, word [rdi + 2]
+ WORD $0x148b; BYTE $0x91 // mov edx, dword [rcx + 4*rdx]
+ WORD $0x5689; BYTE $0x04 // mov dword [rsi + 4], edx
+ LONG $0x57bf0f48; BYTE $0x04 // movsx rdx, word [rdi + 4]
+ WORD $0x148b; BYTE $0x91 // mov edx, dword [rcx + 4*rdx]
+ WORD $0x5689; BYTE $0x08 // mov dword [rsi + 8], edx
+ LONG $0x57bf0f48; BYTE $0x06 // movsx rdx, word [rdi + 6]
+ WORD $0x148b; BYTE $0x91 // mov edx, dword [rcx + 4*rdx]
+ WORD $0x5689; BYTE $0x0c // mov dword [rsi + 12], edx
+ WORD $0x508d; BYTE $0xfc // lea edx, [rax - 4]
+ LONG $0x08c78348 // add rdi, 8
+ LONG $0x10c68348 // add rsi, 16
+ WORD $0xf883; BYTE $0x07 // cmp eax, 7
+ JG LBB35_5
+
+LBB35_1:
+ WORD $0xd285 // test edx, edx
+ JLE LBB35_4
+ WORD $0xc283; BYTE $0x01 // add edx, 1
+ WORD $0x3145; BYTE $0xc0 // xor r8d, r8d
+
+LBB35_3:
+ LONG $0x04bf0f4a; BYTE $0x07 // movsx rax, word [rdi + r8]
+ WORD $0x048b; BYTE $0x81 // mov eax, dword [rcx + 4*rax]
+ LONG $0x46048942 // mov dword [rsi + 2*r8], eax
+ LONG $0x02c08349 // add r8, 2
+ WORD $0xc283; BYTE $0xff // add edx, -1
+ WORD $0xfa83; BYTE $0x01 // cmp edx, 1
+ JG LBB35_3
+
+LBB35_4:
+ RET
+
+TEXT ·_transpose_uint32_uint32_avx2(SB), $0-32
+
+ MOVQ src+0(FP), DI
+ MOVQ dest+8(FP), SI
+ MOVQ length+16(FP), DX
+ MOVQ transposeMap+24(FP), CX
+
+ WORD $0xfa83; BYTE $0x04 // cmp edx, 4
+ JL LBB36_1
+
+LBB36_5:
+ WORD $0xd089 // mov eax, edx
+ WORD $0x178b // mov edx, dword [rdi]
+ WORD $0x148b; BYTE $0x91 // mov edx, dword [rcx + 4*rdx]
+ WORD $0x1689 // mov dword [rsi], edx
+ WORD $0x578b; BYTE $0x04 // mov edx, dword [rdi + 4]
+ WORD $0x148b; BYTE $0x91 // mov edx, dword [rcx + 4*rdx]
+ WORD $0x5689; BYTE $0x04 // mov dword [rsi + 4], edx
+ WORD $0x578b; BYTE $0x08 // mov edx, dword [rdi + 8]
+ WORD $0x148b; BYTE $0x91 // mov edx, dword [rcx + 4*rdx]
+ WORD $0x5689; BYTE $0x08 // mov dword [rsi + 8], edx
+ WORD $0x578b; BYTE $0x0c // mov edx, dword [rdi + 12]
+ WORD $0x148b; BYTE $0x91 // mov edx, dword [rcx + 4*rdx]
+ WORD $0x5689; BYTE $0x0c // mov dword [rsi + 12], edx
+ WORD $0x508d; BYTE $0xfc // lea edx, [rax - 4]
+ LONG $0x10c78348 // add rdi, 16
+ LONG $0x10c68348 // add rsi, 16
+ WORD $0xf883; BYTE $0x07 // cmp eax, 7
+ JG LBB36_5
+
+LBB36_1:
+ WORD $0xd285 // test edx, edx
+ JLE LBB36_4
+ WORD $0xc283; BYTE $0x01 // add edx, 1
+ WORD $0x3145; BYTE $0xc0 // xor r8d, r8d
+
+LBB36_3:
+ LONG $0x07048b42 // mov eax, dword [rdi + r8]
+ WORD $0x048b; BYTE $0x81 // mov eax, dword [rcx + 4*rax]
+ LONG $0x06048942 // mov dword [rsi + r8], eax
+ LONG $0x04c08349 // add r8, 4
+ WORD $0xc283; BYTE $0xff // add edx, -1
+ WORD $0xfa83; BYTE $0x01 // cmp edx, 1
+ JG LBB36_3
+
+LBB36_4:
+ RET
+
+TEXT ·_transpose_int32_uint32_avx2(SB), $0-32
+
+ MOVQ src+0(FP), DI
+ MOVQ dest+8(FP), SI
+ MOVQ length+16(FP), DX
+ MOVQ transposeMap+24(FP), CX
+
+ WORD $0xfa83; BYTE $0x04 // cmp edx, 4
+ JL LBB37_1
+
+LBB37_5:
+ WORD $0xd089 // mov eax, edx
+ WORD $0x6348; BYTE $0x17 // movsxd rdx, dword [rdi]
+ WORD $0x148b; BYTE $0x91 // mov edx, dword [rcx + 4*rdx]
+ WORD $0x1689 // mov dword [rsi], edx
+ LONG $0x04576348 // movsxd rdx, dword [rdi + 4]
+ WORD $0x148b; BYTE $0x91 // mov edx, dword [rcx + 4*rdx]
+ WORD $0x5689; BYTE $0x04 // mov dword [rsi + 4], edx
+ LONG $0x08576348 // movsxd rdx, dword [rdi + 8]
+ WORD $0x148b; BYTE $0x91 // mov edx, dword [rcx + 4*rdx]
+ WORD $0x5689; BYTE $0x08 // mov dword [rsi + 8], edx
+ LONG $0x0c576348 // movsxd rdx, dword [rdi + 12]
+ WORD $0x148b; BYTE $0x91 // mov edx, dword [rcx + 4*rdx]
+ WORD $0x5689; BYTE $0x0c // mov dword [rsi + 12], edx
+ WORD $0x508d; BYTE $0xfc // lea edx, [rax - 4]
+ LONG $0x10c78348 // add rdi, 16
+ LONG $0x10c68348 // add rsi, 16
+ WORD $0xf883; BYTE $0x07 // cmp eax, 7
+ JG LBB37_5
+
+LBB37_1:
+ WORD $0xd285 // test edx, edx
+ JLE LBB37_4
+ WORD $0xc283; BYTE $0x01 // add edx, 1
+ WORD $0x3145; BYTE $0xc0 // xor r8d, r8d
+
+LBB37_3:
+ LONG $0x0704634a // movsxd rax, dword [rdi + r8]
+ WORD $0x048b; BYTE $0x81 // mov eax, dword [rcx + 4*rax]
+ LONG $0x06048942 // mov dword [rsi + r8], eax
+ LONG $0x04c08349 // add r8, 4
+ WORD $0xc283; BYTE $0xff // add edx, -1
+ WORD $0xfa83; BYTE $0x01 // cmp edx, 1
+ JG LBB37_3
+
+LBB37_4:
+ RET
+
+TEXT ·_transpose_uint64_uint32_avx2(SB), $0-32
+
+ MOVQ src+0(FP), DI
+ MOVQ dest+8(FP), SI
+ MOVQ length+16(FP), DX
+ MOVQ transposeMap+24(FP), CX
+
+ WORD $0xfa83; BYTE $0x04 // cmp edx, 4
+ JL LBB38_1
+
+LBB38_5:
+ WORD $0xd089 // mov eax, edx
+ WORD $0x8b48; BYTE $0x17 // mov rdx, qword [rdi]
+ WORD $0x148b; BYTE $0x91 // mov edx, dword [rcx + 4*rdx]
+ WORD $0x1689 // mov dword [rsi], edx
+ LONG $0x08578b48 // mov rdx, qword [rdi + 8]
+ WORD $0x148b; BYTE $0x91 // mov edx, dword [rcx + 4*rdx]
+ WORD $0x5689; BYTE $0x04 // mov dword [rsi + 4], edx
+ LONG $0x10578b48 // mov rdx, qword [rdi + 16]
+ WORD $0x148b; BYTE $0x91 // mov edx, dword [rcx + 4*rdx]
+ WORD $0x5689; BYTE $0x08 // mov dword [rsi + 8], edx
+ LONG $0x18578b48 // mov rdx, qword [rdi + 24]
+ WORD $0x148b; BYTE $0x91 // mov edx, dword [rcx + 4*rdx]
+ WORD $0x5689; BYTE $0x0c // mov dword [rsi + 12], edx
+ WORD $0x508d; BYTE $0xfc // lea edx, [rax - 4]
+ LONG $0x20c78348 // add rdi, 32
+ LONG $0x10c68348 // add rsi, 16
+ WORD $0xf883; BYTE $0x07 // cmp eax, 7
+ JG LBB38_5
+
+LBB38_1:
+ WORD $0xd285 // test edx, edx
+ JLE LBB38_4
+ WORD $0xc283; BYTE $0x01 // add edx, 1
+ WORD $0x3145; BYTE $0xc0 // xor r8d, r8d
+
+LBB38_3:
+ LONG $0x47048b4a // mov rax, qword [rdi + 2*r8]
+ WORD $0x048b; BYTE $0x81 // mov eax, dword [rcx + 4*rax]
+ LONG $0x06048942 // mov dword [rsi + r8], eax
+ LONG $0x04c08349 // add r8, 4
+ WORD $0xc283; BYTE $0xff // add edx, -1
+ WORD $0xfa83; BYTE $0x01 // cmp edx, 1
+ JG LBB38_3
+
+LBB38_4:
+ RET
+
+TEXT ·_transpose_int64_uint32_avx2(SB), $0-32
+
+ MOVQ src+0(FP), DI
+ MOVQ dest+8(FP), SI
+ MOVQ length+16(FP), DX
+ MOVQ transposeMap+24(FP), CX
+
+ WORD $0xfa83; BYTE $0x04 // cmp edx, 4
+ JL LBB39_1
+
+LBB39_5:
+ WORD $0xd089 // mov eax, edx
+ WORD $0x8b48; BYTE $0x17 // mov rdx, qword [rdi]
+ WORD $0x148b; BYTE $0x91 // mov edx, dword [rcx + 4*rdx]
+ WORD $0x1689 // mov dword [rsi], edx
+ LONG $0x08578b48 // mov rdx, qword [rdi + 8]
+ WORD $0x148b; BYTE $0x91 // mov edx, dword [rcx + 4*rdx]
+ WORD $0x5689; BYTE $0x04 // mov dword [rsi + 4], edx
+ LONG $0x10578b48 // mov rdx, qword [rdi + 16]
+ WORD $0x148b; BYTE $0x91 // mov edx, dword [rcx + 4*rdx]
+ WORD $0x5689; BYTE $0x08 // mov dword [rsi + 8], edx
+ LONG $0x18578b48 // mov rdx, qword [rdi + 24]
+ WORD $0x148b; BYTE $0x91 // mov edx, dword [rcx + 4*rdx]
+ WORD $0x5689; BYTE $0x0c // mov dword [rsi + 12], edx
+ WORD $0x508d; BYTE $0xfc // lea edx, [rax - 4]
+ LONG $0x20c78348 // add rdi, 32
+ LONG $0x10c68348 // add rsi, 16
+ WORD $0xf883; BYTE $0x07 // cmp eax, 7
+ JG LBB39_5
+
+LBB39_1:
+ WORD $0xd285 // test edx, edx
+ JLE LBB39_4
+ WORD $0xc283; BYTE $0x01 // add edx, 1
+ WORD $0x3145; BYTE $0xc0 // xor r8d, r8d
+
+LBB39_3:
+ LONG $0x47048b4a // mov rax, qword [rdi + 2*r8]
+ WORD $0x048b; BYTE $0x81 // mov eax, dword [rcx + 4*rax]
+ LONG $0x06048942 // mov dword [rsi + r8], eax
+ LONG $0x04c08349 // add r8, 4
+ WORD $0xc283; BYTE $0xff // add edx, -1
+ WORD $0xfa83; BYTE $0x01 // cmp edx, 1
+ JG LBB39_3
+
+LBB39_4:
+ RET
+
+TEXT ·_transpose_uint8_int32_avx2(SB), $0-32
+
+ MOVQ src+0(FP), DI
+ MOVQ dest+8(FP), SI
+ MOVQ length+16(FP), DX
+ MOVQ transposeMap+24(FP), CX
+
+ WORD $0xfa83; BYTE $0x04 // cmp edx, 4
+ JL LBB40_1
+
+LBB40_5:
+ WORD $0xd089 // mov eax, edx
+ WORD $0xb60f; BYTE $0x17 // movzx edx, byte [rdi]
+ WORD $0x148b; BYTE $0x91 // mov edx, dword [rcx + 4*rdx]
+ WORD $0x1689 // mov dword [rsi], edx
+ LONG $0x0157b60f // movzx edx, byte [rdi + 1]
+ WORD $0x148b; BYTE $0x91 // mov edx, dword [rcx + 4*rdx]
+ WORD $0x5689; BYTE $0x04 // mov dword [rsi + 4], edx
+ LONG $0x0257b60f // movzx edx, byte [rdi + 2]
+ WORD $0x148b; BYTE $0x91 // mov edx, dword [rcx + 4*rdx]
+ WORD $0x5689; BYTE $0x08 // mov dword [rsi + 8], edx
+ LONG $0x0357b60f // movzx edx, byte [rdi + 3]
+ WORD $0x148b; BYTE $0x91 // mov edx, dword [rcx + 4*rdx]
+ WORD $0x5689; BYTE $0x0c // mov dword [rsi + 12], edx
+ WORD $0x508d; BYTE $0xfc // lea edx, [rax - 4]
+ LONG $0x04c78348 // add rdi, 4
+ LONG $0x10c68348 // add rsi, 16
+ WORD $0xf883; BYTE $0x07 // cmp eax, 7
+ JG LBB40_5
+
+LBB40_1:
+ WORD $0xd285 // test edx, edx
+ JLE LBB40_4
+ WORD $0xc283; BYTE $0x01 // add edx, 1
+ WORD $0x3145; BYTE $0xc0 // xor r8d, r8d
+
+LBB40_3:
+ LONG $0x04b60f42; BYTE $0x07 // movzx eax, byte [rdi + r8]
+ WORD $0x048b; BYTE $0x81 // mov eax, dword [rcx + 4*rax]
+ LONG $0x86048942 // mov dword [rsi + 4*r8], eax
+ LONG $0x01c08349 // add r8, 1
+ WORD $0xc283; BYTE $0xff // add edx, -1
+ WORD $0xfa83; BYTE $0x01 // cmp edx, 1
+ JG LBB40_3
+
+LBB40_4:
+ RET
+
+TEXT ·_transpose_int8_int32_avx2(SB), $0-32
+
+ MOVQ src+0(FP), DI
+ MOVQ dest+8(FP), SI
+ MOVQ length+16(FP), DX
+ MOVQ transposeMap+24(FP), CX
+
+ WORD $0xfa83; BYTE $0x04 // cmp edx, 4
+ JL LBB41_1
+
+LBB41_5:
+ WORD $0xd089 // mov eax, edx
+ LONG $0x17be0f48 // movsx rdx, byte [rdi]
+ WORD $0x148b; BYTE $0x91 // mov edx, dword [rcx + 4*rdx]
+ WORD $0x1689 // mov dword [rsi], edx
+ LONG $0x57be0f48; BYTE $0x01 // movsx rdx, byte [rdi + 1]
+ WORD $0x148b; BYTE $0x91 // mov edx, dword [rcx + 4*rdx]
+ WORD $0x5689; BYTE $0x04 // mov dword [rsi + 4], edx
+ LONG $0x57be0f48; BYTE $0x02 // movsx rdx, byte [rdi + 2]
+ WORD $0x148b; BYTE $0x91 // mov edx, dword [rcx + 4*rdx]
+ WORD $0x5689; BYTE $0x08 // mov dword [rsi + 8], edx
+ LONG $0x57be0f48; BYTE $0x03 // movsx rdx, byte [rdi + 3]
+ WORD $0x148b; BYTE $0x91 // mov edx, dword [rcx + 4*rdx]
+ WORD $0x5689; BYTE $0x0c // mov dword [rsi + 12], edx
+ WORD $0x508d; BYTE $0xfc // lea edx, [rax - 4]
+ LONG $0x04c78348 // add rdi, 4
+ LONG $0x10c68348 // add rsi, 16
+ WORD $0xf883; BYTE $0x07 // cmp eax, 7
+ JG LBB41_5
+
+LBB41_1:
+ WORD $0xd285 // test edx, edx
+ JLE LBB41_4
+ WORD $0xc283; BYTE $0x01 // add edx, 1
+ WORD $0x3145; BYTE $0xc0 // xor r8d, r8d
+
+LBB41_3:
+ LONG $0x04be0f4a; BYTE $0x07 // movsx rax, byte [rdi + r8]
+ WORD $0x048b; BYTE $0x81 // mov eax, dword [rcx + 4*rax]
+ LONG $0x86048942 // mov dword [rsi + 4*r8], eax
+ LONG $0x01c08349 // add r8, 1
+ WORD $0xc283; BYTE $0xff // add edx, -1
+ WORD $0xfa83; BYTE $0x01 // cmp edx, 1
+ JG LBB41_3
+
+LBB41_4:
+ RET
+
+TEXT ·_transpose_uint16_int32_avx2(SB), $0-32
+
+ MOVQ src+0(FP), DI
+ MOVQ dest+8(FP), SI
+ MOVQ length+16(FP), DX
+ MOVQ transposeMap+24(FP), CX
+
+ WORD $0xfa83; BYTE $0x04 // cmp edx, 4
+ JL LBB42_1
+
+LBB42_5:
+ WORD $0xd089 // mov eax, edx
+ WORD $0xb70f; BYTE $0x17 // movzx edx, word [rdi]
+ WORD $0x148b; BYTE $0x91 // mov edx, dword [rcx + 4*rdx]
+ WORD $0x1689 // mov dword [rsi], edx
+ LONG $0x0257b70f // movzx edx, word [rdi + 2]
+ WORD $0x148b; BYTE $0x91 // mov edx, dword [rcx + 4*rdx]
+ WORD $0x5689; BYTE $0x04 // mov dword [rsi + 4], edx
+ LONG $0x0457b70f // movzx edx, word [rdi + 4]
+ WORD $0x148b; BYTE $0x91 // mov edx, dword [rcx + 4*rdx]
+ WORD $0x5689; BYTE $0x08 // mov dword [rsi + 8], edx
+ LONG $0x0657b70f // movzx edx, word [rdi + 6]
+ WORD $0x148b; BYTE $0x91 // mov edx, dword [rcx + 4*rdx]
+ WORD $0x5689; BYTE $0x0c // mov dword [rsi + 12], edx
+ WORD $0x508d; BYTE $0xfc // lea edx, [rax - 4]
+ LONG $0x08c78348 // add rdi, 8
+ LONG $0x10c68348 // add rsi, 16
+ WORD $0xf883; BYTE $0x07 // cmp eax, 7
+ JG LBB42_5
+
+LBB42_1:
+ WORD $0xd285 // test edx, edx
+ JLE LBB42_4
+ WORD $0xc283; BYTE $0x01 // add edx, 1
+ WORD $0x3145; BYTE $0xc0 // xor r8d, r8d
+
+LBB42_3:
+ LONG $0x04b70f42; BYTE $0x07 // movzx eax, word [rdi + r8]
+ WORD $0x048b; BYTE $0x81 // mov eax, dword [rcx + 4*rax]
+ LONG $0x46048942 // mov dword [rsi + 2*r8], eax
+ LONG $0x02c08349 // add r8, 2
+ WORD $0xc283; BYTE $0xff // add edx, -1
+ WORD $0xfa83; BYTE $0x01 // cmp edx, 1
+ JG LBB42_3
+
+LBB42_4:
+ RET
+
+TEXT ·_transpose_int16_int32_avx2(SB), $0-32
+
+ MOVQ src+0(FP), DI
+ MOVQ dest+8(FP), SI
+ MOVQ length+16(FP), DX
+ MOVQ transposeMap+24(FP), CX
+
+ WORD $0xfa83; BYTE $0x04 // cmp edx, 4
+ JL LBB43_1
+
+LBB43_5:
+ WORD $0xd089 // mov eax, edx
+ LONG $0x17bf0f48 // movsx rdx, word [rdi]
+ WORD $0x148b; BYTE $0x91 // mov edx, dword [rcx + 4*rdx]
+ WORD $0x1689 // mov dword [rsi], edx
+ LONG $0x57bf0f48; BYTE $0x02 // movsx rdx, word [rdi + 2]
+ WORD $0x148b; BYTE $0x91 // mov edx, dword [rcx + 4*rdx]
+ WORD $0x5689; BYTE $0x04 // mov dword [rsi + 4], edx
+ LONG $0x57bf0f48; BYTE $0x04 // movsx rdx, word [rdi + 4]
+ WORD $0x148b; BYTE $0x91 // mov edx, dword [rcx + 4*rdx]
+ WORD $0x5689; BYTE $0x08 // mov dword [rsi + 8], edx
+ LONG $0x57bf0f48; BYTE $0x06 // movsx rdx, word [rdi + 6]
+ WORD $0x148b; BYTE $0x91 // mov edx, dword [rcx + 4*rdx]
+ WORD $0x5689; BYTE $0x0c // mov dword [rsi + 12], edx
+ WORD $0x508d; BYTE $0xfc // lea edx, [rax - 4]
+ LONG $0x08c78348 // add rdi, 8
+ LONG $0x10c68348 // add rsi, 16
+ WORD $0xf883; BYTE $0x07 // cmp eax, 7
+ JG LBB43_5
+
+LBB43_1:
+ WORD $0xd285 // test edx, edx
+ JLE LBB43_4
+ WORD $0xc283; BYTE $0x01 // add edx, 1
+ WORD $0x3145; BYTE $0xc0 // xor r8d, r8d
+
+LBB43_3:
+ LONG $0x04bf0f4a; BYTE $0x07 // movsx rax, word [rdi + r8]
+ WORD $0x048b; BYTE $0x81 // mov eax, dword [rcx + 4*rax]
+ LONG $0x46048942 // mov dword [rsi + 2*r8], eax
+ LONG $0x02c08349 // add r8, 2
+ WORD $0xc283; BYTE $0xff // add edx, -1
+ WORD $0xfa83; BYTE $0x01 // cmp edx, 1
+ JG LBB43_3
+
+LBB43_4:
+ RET
+
+TEXT ·_transpose_uint32_int32_avx2(SB), $0-32
+
+ MOVQ src+0(FP), DI
+ MOVQ dest+8(FP), SI
+ MOVQ length+16(FP), DX
+ MOVQ transposeMap+24(FP), CX
+
+ WORD $0xfa83; BYTE $0x04 // cmp edx, 4
+ JL LBB44_1
+
+LBB44_5:
+ WORD $0xd089 // mov eax, edx
+ WORD $0x178b // mov edx, dword [rdi]
+ WORD $0x148b; BYTE $0x91 // mov edx, dword [rcx + 4*rdx]
+ WORD $0x1689 // mov dword [rsi], edx
+ WORD $0x578b; BYTE $0x04 // mov edx, dword [rdi + 4]
+ WORD $0x148b; BYTE $0x91 // mov edx, dword [rcx + 4*rdx]
+ WORD $0x5689; BYTE $0x04 // mov dword [rsi + 4], edx
+ WORD $0x578b; BYTE $0x08 // mov edx, dword [rdi + 8]
+ WORD $0x148b; BYTE $0x91 // mov edx, dword [rcx + 4*rdx]
+ WORD $0x5689; BYTE $0x08 // mov dword [rsi + 8], edx
+ WORD $0x578b; BYTE $0x0c // mov edx, dword [rdi + 12]
+ WORD $0x148b; BYTE $0x91 // mov edx, dword [rcx + 4*rdx]
+ WORD $0x5689; BYTE $0x0c // mov dword [rsi + 12], edx
+ WORD $0x508d; BYTE $0xfc // lea edx, [rax - 4]
+ LONG $0x10c78348 // add rdi, 16
+ LONG $0x10c68348 // add rsi, 16
+ WORD $0xf883; BYTE $0x07 // cmp eax, 7
+ JG LBB44_5
+
+LBB44_1:
+ WORD $0xd285 // test edx, edx
+ JLE LBB44_4
+ WORD $0xc283; BYTE $0x01 // add edx, 1
+ WORD $0x3145; BYTE $0xc0 // xor r8d, r8d
+
+LBB44_3:
+ LONG $0x07048b42 // mov eax, dword [rdi + r8]
+ WORD $0x048b; BYTE $0x81 // mov eax, dword [rcx + 4*rax]
+ LONG $0x06048942 // mov dword [rsi + r8], eax
+ LONG $0x04c08349 // add r8, 4
+ WORD $0xc283; BYTE $0xff // add edx, -1
+ WORD $0xfa83; BYTE $0x01 // cmp edx, 1
+ JG LBB44_3
+
+LBB44_4:
+ RET
+
+TEXT ·_transpose_int32_int32_avx2(SB), $0-32
+
+ MOVQ src+0(FP), DI
+ MOVQ dest+8(FP), SI
+ MOVQ length+16(FP), DX
+ MOVQ transposeMap+24(FP), CX
+
+ WORD $0xfa83; BYTE $0x04 // cmp edx, 4
+ JL LBB45_1
+
+LBB45_5:
+ WORD $0xd089 // mov eax, edx
+ WORD $0x6348; BYTE $0x17 // movsxd rdx, dword [rdi]
+ WORD $0x148b; BYTE $0x91 // mov edx, dword [rcx + 4*rdx]
+ WORD $0x1689 // mov dword [rsi], edx
+ LONG $0x04576348 // movsxd rdx, dword [rdi + 4]
+ WORD $0x148b; BYTE $0x91 // mov edx, dword [rcx + 4*rdx]
+ WORD $0x5689; BYTE $0x04 // mov dword [rsi + 4], edx
+ LONG $0x08576348 // movsxd rdx, dword [rdi + 8]
+ WORD $0x148b; BYTE $0x91 // mov edx, dword [rcx + 4*rdx]
+ WORD $0x5689; BYTE $0x08 // mov dword [rsi + 8], edx
+ LONG $0x0c576348 // movsxd rdx, dword [rdi + 12]
+ WORD $0x148b; BYTE $0x91 // mov edx, dword [rcx + 4*rdx]
+ WORD $0x5689; BYTE $0x0c // mov dword [rsi + 12], edx
+ WORD $0x508d; BYTE $0xfc // lea edx, [rax - 4]
+ LONG $0x10c78348 // add rdi, 16
+ LONG $0x10c68348 // add rsi, 16
+ WORD $0xf883; BYTE $0x07 // cmp eax, 7
+ JG LBB45_5
+
+LBB45_1:
+ WORD $0xd285 // test edx, edx
+ JLE LBB45_4
+ WORD $0xc283; BYTE $0x01 // add edx, 1
+ WORD $0x3145; BYTE $0xc0 // xor r8d, r8d
+
+LBB45_3:
+ LONG $0x0704634a // movsxd rax, dword [rdi + r8]
+ WORD $0x048b; BYTE $0x81 // mov eax, dword [rcx + 4*rax]
+ LONG $0x06048942 // mov dword [rsi + r8], eax
+ LONG $0x04c08349 // add r8, 4
+ WORD $0xc283; BYTE $0xff // add edx, -1
+ WORD $0xfa83; BYTE $0x01 // cmp edx, 1
+ JG LBB45_3
+
+LBB45_4:
+ RET
+
+TEXT ·_transpose_uint64_int32_avx2(SB), $0-32
+
+ MOVQ src+0(FP), DI
+ MOVQ dest+8(FP), SI
+ MOVQ length+16(FP), DX
+ MOVQ transposeMap+24(FP), CX
+
+ WORD $0xfa83; BYTE $0x04 // cmp edx, 4
+ JL LBB46_1
+
+LBB46_5:
+ WORD $0xd089 // mov eax, edx
+ WORD $0x8b48; BYTE $0x17 // mov rdx, qword [rdi]
+ WORD $0x148b; BYTE $0x91 // mov edx, dword [rcx + 4*rdx]
+ WORD $0x1689 // mov dword [rsi], edx
+ LONG $0x08578b48 // mov rdx, qword [rdi + 8]
+ WORD $0x148b; BYTE $0x91 // mov edx, dword [rcx + 4*rdx]
+ WORD $0x5689; BYTE $0x04 // mov dword [rsi + 4], edx
+ LONG $0x10578b48 // mov rdx, qword [rdi + 16]
+ WORD $0x148b; BYTE $0x91 // mov edx, dword [rcx + 4*rdx]
+ WORD $0x5689; BYTE $0x08 // mov dword [rsi + 8], edx
+ LONG $0x18578b48 // mov rdx, qword [rdi + 24]
+ WORD $0x148b; BYTE $0x91 // mov edx, dword [rcx + 4*rdx]
+ WORD $0x5689; BYTE $0x0c // mov dword [rsi + 12], edx
+ WORD $0x508d; BYTE $0xfc // lea edx, [rax - 4]
+ LONG $0x20c78348 // add rdi, 32
+ LONG $0x10c68348 // add rsi, 16
+ WORD $0xf883; BYTE $0x07 // cmp eax, 7
+ JG LBB46_5
+
+LBB46_1:
+ WORD $0xd285 // test edx, edx
+ JLE LBB46_4
+ WORD $0xc283; BYTE $0x01 // add edx, 1
+ WORD $0x3145; BYTE $0xc0 // xor r8d, r8d
+
+LBB46_3:
+ LONG $0x47048b4a // mov rax, qword [rdi + 2*r8]
+ WORD $0x048b; BYTE $0x81 // mov eax, dword [rcx + 4*rax]
+ LONG $0x06048942 // mov dword [rsi + r8], eax
+ LONG $0x04c08349 // add r8, 4
+ WORD $0xc283; BYTE $0xff // add edx, -1
+ WORD $0xfa83; BYTE $0x01 // cmp edx, 1
+ JG LBB46_3
+
+LBB46_4:
+ RET
+
+TEXT ·_transpose_int64_int32_avx2(SB), $0-32
+
+ MOVQ src+0(FP), DI
+ MOVQ dest+8(FP), SI
+ MOVQ length+16(FP), DX
+ MOVQ transposeMap+24(FP), CX
+
+ WORD $0xfa83; BYTE $0x04 // cmp edx, 4
+ JL LBB47_1
+
+LBB47_5:
+ WORD $0xd089 // mov eax, edx
+ WORD $0x8b48; BYTE $0x17 // mov rdx, qword [rdi]
+ WORD $0x148b; BYTE $0x91 // mov edx, dword [rcx + 4*rdx]
+ WORD $0x1689 // mov dword [rsi], edx
+ LONG $0x08578b48 // mov rdx, qword [rdi + 8]
+ WORD $0x148b; BYTE $0x91 // mov edx, dword [rcx + 4*rdx]
+ WORD $0x5689; BYTE $0x04 // mov dword [rsi + 4], edx
+ LONG $0x10578b48 // mov rdx, qword [rdi + 16]
+ WORD $0x148b; BYTE $0x91 // mov edx, dword [rcx + 4*rdx]
+ WORD $0x5689; BYTE $0x08 // mov dword [rsi + 8], edx
+ LONG $0x18578b48 // mov rdx, qword [rdi + 24]
+ WORD $0x148b; BYTE $0x91 // mov edx, dword [rcx + 4*rdx]
+ WORD $0x5689; BYTE $0x0c // mov dword [rsi + 12], edx
+ WORD $0x508d; BYTE $0xfc // lea edx, [rax - 4]
+ LONG $0x20c78348 // add rdi, 32
+ LONG $0x10c68348 // add rsi, 16
+ WORD $0xf883; BYTE $0x07 // cmp eax, 7
+ JG LBB47_5
+
+LBB47_1:
+ WORD $0xd285 // test edx, edx
+ JLE LBB47_4
+ WORD $0xc283; BYTE $0x01 // add edx, 1
+ WORD $0x3145; BYTE $0xc0 // xor r8d, r8d
+
+LBB47_3:
+ LONG $0x47048b4a // mov rax, qword [rdi + 2*r8]
+ WORD $0x048b; BYTE $0x81 // mov eax, dword [rcx + 4*rax]
+ LONG $0x06048942 // mov dword [rsi + r8], eax
+ LONG $0x04c08349 // add r8, 4
+ WORD $0xc283; BYTE $0xff // add edx, -1
+ WORD $0xfa83; BYTE $0x01 // cmp edx, 1
+ JG LBB47_3
+
+LBB47_4:
+ RET
+
+TEXT ·_transpose_uint8_uint64_avx2(SB), $0-32
+
+ MOVQ src+0(FP), DI
+ MOVQ dest+8(FP), SI
+ MOVQ length+16(FP), DX
+ MOVQ transposeMap+24(FP), CX
+
+ WORD $0xfa83; BYTE $0x04 // cmp edx, 4
+ JL LBB48_1
+
+LBB48_5:
+ WORD $0xd089 // mov eax, edx
+ WORD $0xb60f; BYTE $0x17 // movzx edx, byte [rdi]
+ LONG $0x91146348 // movsxd rdx, dword [rcx + 4*rdx]
+ WORD $0x8948; BYTE $0x16 // mov qword [rsi], rdx
+ LONG $0x0157b60f // movzx edx, byte [rdi + 1]
+ LONG $0x91146348 // movsxd rdx, dword [rcx + 4*rdx]
+ LONG $0x08568948 // mov qword [rsi + 8], rdx
+ LONG $0x0257b60f // movzx edx, byte [rdi + 2]
+ LONG $0x91146348 // movsxd rdx, dword [rcx + 4*rdx]
+ LONG $0x10568948 // mov qword [rsi + 16], rdx
+ LONG $0x0357b60f // movzx edx, byte [rdi + 3]
+ LONG $0x91146348 // movsxd rdx, dword [rcx + 4*rdx]
+ LONG $0x18568948 // mov qword [rsi + 24], rdx
+ WORD $0x508d; BYTE $0xfc // lea edx, [rax - 4]
+ LONG $0x04c78348 // add rdi, 4
+ LONG $0x20c68348 // add rsi, 32
+ WORD $0xf883; BYTE $0x07 // cmp eax, 7
+ JG LBB48_5
+
+LBB48_1:
+ WORD $0xd285 // test edx, edx
+ JLE LBB48_4
+ WORD $0xc283; BYTE $0x01 // add edx, 1
+ WORD $0x3145; BYTE $0xc0 // xor r8d, r8d
+
+LBB48_3:
+ LONG $0x04b60f42; BYTE $0x07 // movzx eax, byte [rdi + r8]
+ LONG $0x81046348 // movsxd rax, dword [rcx + 4*rax]
+ LONG $0xc604894a // mov qword [rsi + 8*r8], rax
+ LONG $0x01c08349 // add r8, 1
+ WORD $0xc283; BYTE $0xff // add edx, -1
+ WORD $0xfa83; BYTE $0x01 // cmp edx, 1
+ JG LBB48_3
+
+LBB48_4:
+ RET
+
+TEXT ·_transpose_int8_uint64_avx2(SB), $0-32
+
+ MOVQ src+0(FP), DI
+ MOVQ dest+8(FP), SI
+ MOVQ length+16(FP), DX
+ MOVQ transposeMap+24(FP), CX
+
+ WORD $0xfa83; BYTE $0x04 // cmp edx, 4
+ JL LBB49_1
+
+LBB49_5:
+ WORD $0xd089 // mov eax, edx
+ LONG $0x17be0f48 // movsx rdx, byte [rdi]
+ LONG $0x91146348 // movsxd rdx, dword [rcx + 4*rdx]
+ WORD $0x8948; BYTE $0x16 // mov qword [rsi], rdx
+ LONG $0x57be0f48; BYTE $0x01 // movsx rdx, byte [rdi + 1]
+ LONG $0x91146348 // movsxd rdx, dword [rcx + 4*rdx]
+ LONG $0x08568948 // mov qword [rsi + 8], rdx
+ LONG $0x57be0f48; BYTE $0x02 // movsx rdx, byte [rdi + 2]
+ LONG $0x91146348 // movsxd rdx, dword [rcx + 4*rdx]
+ LONG $0x10568948 // mov qword [rsi + 16], rdx
+ LONG $0x57be0f48; BYTE $0x03 // movsx rdx, byte [rdi + 3]
+ LONG $0x91146348 // movsxd rdx, dword [rcx + 4*rdx]
+ LONG $0x18568948 // mov qword [rsi + 24], rdx
+ WORD $0x508d; BYTE $0xfc // lea edx, [rax - 4]
+ LONG $0x04c78348 // add rdi, 4
+ LONG $0x20c68348 // add rsi, 32
+ WORD $0xf883; BYTE $0x07 // cmp eax, 7
+ JG LBB49_5
+
+LBB49_1:
+ WORD $0xd285 // test edx, edx
+ JLE LBB49_4
+ WORD $0xc283; BYTE $0x01 // add edx, 1
+ WORD $0x3145; BYTE $0xc0 // xor r8d, r8d
+
+LBB49_3:
+ LONG $0x04be0f4a; BYTE $0x07 // movsx rax, byte [rdi + r8]
+ LONG $0x81046348 // movsxd rax, dword [rcx + 4*rax]
+ LONG $0xc604894a // mov qword [rsi + 8*r8], rax
+ LONG $0x01c08349 // add r8, 1
+ WORD $0xc283; BYTE $0xff // add edx, -1
+ WORD $0xfa83; BYTE $0x01 // cmp edx, 1
+ JG LBB49_3
+
+LBB49_4:
+ RET
+
+TEXT ·_transpose_uint16_uint64_avx2(SB), $0-32
+
+ MOVQ src+0(FP), DI
+ MOVQ dest+8(FP), SI
+ MOVQ length+16(FP), DX
+ MOVQ transposeMap+24(FP), CX
+
+ WORD $0xfa83; BYTE $0x04 // cmp edx, 4
+ JL LBB50_1
+
+LBB50_5:
+ WORD $0xd089 // mov eax, edx
+ WORD $0xb70f; BYTE $0x17 // movzx edx, word [rdi]
+ LONG $0x91146348 // movsxd rdx, dword [rcx + 4*rdx]
+ WORD $0x8948; BYTE $0x16 // mov qword [rsi], rdx
+ LONG $0x0257b70f // movzx edx, word [rdi + 2]
+ LONG $0x91146348 // movsxd rdx, dword [rcx + 4*rdx]
+ LONG $0x08568948 // mov qword [rsi + 8], rdx
+ LONG $0x0457b70f // movzx edx, word [rdi + 4]
+ LONG $0x91146348 // movsxd rdx, dword [rcx + 4*rdx]
+ LONG $0x10568948 // mov qword [rsi + 16], rdx
+ LONG $0x0657b70f // movzx edx, word [rdi + 6]
+ LONG $0x91146348 // movsxd rdx, dword [rcx + 4*rdx]
+ LONG $0x18568948 // mov qword [rsi + 24], rdx
+ WORD $0x508d; BYTE $0xfc // lea edx, [rax - 4]
+ LONG $0x08c78348 // add rdi, 8
+ LONG $0x20c68348 // add rsi, 32
+ WORD $0xf883; BYTE $0x07 // cmp eax, 7
+ JG LBB50_5
+
+LBB50_1:
+ WORD $0xd285 // test edx, edx
+ JLE LBB50_4
+ WORD $0xc283; BYTE $0x01 // add edx, 1
+ WORD $0x3145; BYTE $0xc0 // xor r8d, r8d
+
+LBB50_3:
+ LONG $0x04b70f42; BYTE $0x07 // movzx eax, word [rdi + r8]
+ LONG $0x81046348 // movsxd rax, dword [rcx + 4*rax]
+ LONG $0x8604894a // mov qword [rsi + 4*r8], rax
+ LONG $0x02c08349 // add r8, 2
+ WORD $0xc283; BYTE $0xff // add edx, -1
+ WORD $0xfa83; BYTE $0x01 // cmp edx, 1
+ JG LBB50_3
+
+LBB50_4:
+ RET
+
+TEXT ·_transpose_int16_uint64_avx2(SB), $0-32
+
+ MOVQ src+0(FP), DI
+ MOVQ dest+8(FP), SI
+ MOVQ length+16(FP), DX
+ MOVQ transposeMap+24(FP), CX
+
+ WORD $0xfa83; BYTE $0x04 // cmp edx, 4
+ JL LBB51_1
+
+LBB51_5:
+ WORD $0xd089 // mov eax, edx
+ LONG $0x17bf0f48 // movsx rdx, word [rdi]
+ LONG $0x91146348 // movsxd rdx, dword [rcx + 4*rdx]
+ WORD $0x8948; BYTE $0x16 // mov qword [rsi], rdx
+ LONG $0x57bf0f48; BYTE $0x02 // movsx rdx, word [rdi + 2]
+ LONG $0x91146348 // movsxd rdx, dword [rcx + 4*rdx]
+ LONG $0x08568948 // mov qword [rsi + 8], rdx
+ LONG $0x57bf0f48; BYTE $0x04 // movsx rdx, word [rdi + 4]
+ LONG $0x91146348 // movsxd rdx, dword [rcx + 4*rdx]
+ LONG $0x10568948 // mov qword [rsi + 16], rdx
+ LONG $0x57bf0f48; BYTE $0x06 // movsx rdx, word [rdi + 6]
+ LONG $0x91146348 // movsxd rdx, dword [rcx + 4*rdx]
+ LONG $0x18568948 // mov qword [rsi + 24], rdx
+ WORD $0x508d; BYTE $0xfc // lea edx, [rax - 4]
+ LONG $0x08c78348 // add rdi, 8
+ LONG $0x20c68348 // add rsi, 32
+ WORD $0xf883; BYTE $0x07 // cmp eax, 7
+ JG LBB51_5
+
+LBB51_1:
+ WORD $0xd285 // test edx, edx
+ JLE LBB51_4
+ WORD $0xc283; BYTE $0x01 // add edx, 1
+ WORD $0x3145; BYTE $0xc0 // xor r8d, r8d
+
+LBB51_3:
+ LONG $0x04bf0f4a; BYTE $0x07 // movsx rax, word [rdi + r8]
+ LONG $0x81046348 // movsxd rax, dword [rcx + 4*rax]
+ LONG $0x8604894a // mov qword [rsi + 4*r8], rax
+ LONG $0x02c08349 // add r8, 2
+ WORD $0xc283; BYTE $0xff // add edx, -1
+ WORD $0xfa83; BYTE $0x01 // cmp edx, 1
+ JG LBB51_3
+
+LBB51_4:
+ RET
+
+TEXT ·_transpose_uint32_uint64_avx2(SB), $0-32
+
+ MOVQ src+0(FP), DI
+ MOVQ dest+8(FP), SI
+ MOVQ length+16(FP), DX
+ MOVQ transposeMap+24(FP), CX
+
+ WORD $0xfa83; BYTE $0x04 // cmp edx, 4
+ JL LBB52_1
+
+LBB52_5:
+ WORD $0xd089 // mov eax, edx
+ WORD $0x178b // mov edx, dword [rdi]
+ LONG $0x91146348 // movsxd rdx, dword [rcx + 4*rdx]
+ WORD $0x8948; BYTE $0x16 // mov qword [rsi], rdx
+ WORD $0x578b; BYTE $0x04 // mov edx, dword [rdi + 4]
+ LONG $0x91146348 // movsxd rdx, dword [rcx + 4*rdx]
+ LONG $0x08568948 // mov qword [rsi + 8], rdx
+ WORD $0x578b; BYTE $0x08 // mov edx, dword [rdi + 8]
+ LONG $0x91146348 // movsxd rdx, dword [rcx + 4*rdx]
+ LONG $0x10568948 // mov qword [rsi + 16], rdx
+ WORD $0x578b; BYTE $0x0c // mov edx, dword [rdi + 12]
+ LONG $0x91146348 // movsxd rdx, dword [rcx + 4*rdx]
+ LONG $0x18568948 // mov qword [rsi + 24], rdx
+ WORD $0x508d; BYTE $0xfc // lea edx, [rax - 4]
+ LONG $0x10c78348 // add rdi, 16
+ LONG $0x20c68348 // add rsi, 32
+ WORD $0xf883; BYTE $0x07 // cmp eax, 7
+ JG LBB52_5
+
+LBB52_1:
+ WORD $0xd285 // test edx, edx
+ JLE LBB52_4
+ WORD $0xc283; BYTE $0x01 // add edx, 1
+ WORD $0x3145; BYTE $0xc0 // xor r8d, r8d
+
+LBB52_3:
+ LONG $0x07048b42 // mov eax, dword [rdi + r8]
+ LONG $0x81046348 // movsxd rax, dword [rcx + 4*rax]
+ LONG $0x4604894a // mov qword [rsi + 2*r8], rax
+ LONG $0x04c08349 // add r8, 4
+ WORD $0xc283; BYTE $0xff // add edx, -1
+ WORD $0xfa83; BYTE $0x01 // cmp edx, 1
+ JG LBB52_3
+
+LBB52_4:
+ RET
+
+TEXT ·_transpose_int32_uint64_avx2(SB), $0-32
+
+ MOVQ src+0(FP), DI
+ MOVQ dest+8(FP), SI
+ MOVQ length+16(FP), DX
+ MOVQ transposeMap+24(FP), CX
+
+ WORD $0xfa83; BYTE $0x04 // cmp edx, 4
+ JL LBB53_1
+
+LBB53_5:
+ WORD $0xd089 // mov eax, edx
+ WORD $0x6348; BYTE $0x17 // movsxd rdx, dword [rdi]
+ LONG $0x91146348 // movsxd rdx, dword [rcx + 4*rdx]
+ WORD $0x8948; BYTE $0x16 // mov qword [rsi], rdx
+ LONG $0x04576348 // movsxd rdx, dword [rdi + 4]
+ LONG $0x91146348 // movsxd rdx, dword [rcx + 4*rdx]
+ LONG $0x08568948 // mov qword [rsi + 8], rdx
+ LONG $0x08576348 // movsxd rdx, dword [rdi + 8]
+ LONG $0x91146348 // movsxd rdx, dword [rcx + 4*rdx]
+ LONG $0x10568948 // mov qword [rsi + 16], rdx
+ LONG $0x0c576348 // movsxd rdx, dword [rdi + 12]
+ LONG $0x91146348 // movsxd rdx, dword [rcx + 4*rdx]
+ LONG $0x18568948 // mov qword [rsi + 24], rdx
+ WORD $0x508d; BYTE $0xfc // lea edx, [rax - 4]
+ LONG $0x10c78348 // add rdi, 16
+ LONG $0x20c68348 // add rsi, 32
+ WORD $0xf883; BYTE $0x07 // cmp eax, 7
+ JG LBB53_5
+
+LBB53_1:
+ WORD $0xd285 // test edx, edx
+ JLE LBB53_4
+ WORD $0xc283; BYTE $0x01 // add edx, 1
+ WORD $0x3145; BYTE $0xc0 // xor r8d, r8d
+
+LBB53_3:
+ LONG $0x0704634a // movsxd rax, dword [rdi + r8]
+ LONG $0x81046348 // movsxd rax, dword [rcx + 4*rax]
+ LONG $0x4604894a // mov qword [rsi + 2*r8], rax
+ LONG $0x04c08349 // add r8, 4
+ WORD $0xc283; BYTE $0xff // add edx, -1
+ WORD $0xfa83; BYTE $0x01 // cmp edx, 1
+ JG LBB53_3
+
+LBB53_4:
+ RET
+
+TEXT ·_transpose_uint64_uint64_avx2(SB), $0-32
+
+ MOVQ src+0(FP), DI
+ MOVQ dest+8(FP), SI
+ MOVQ length+16(FP), DX
+ MOVQ transposeMap+24(FP), CX
+
+ WORD $0xfa83; BYTE $0x04 // cmp edx, 4
+ JL LBB54_1
+
+LBB54_5:
+ WORD $0xd089 // mov eax, edx
+ WORD $0x8b48; BYTE $0x17 // mov rdx, qword [rdi]
+ LONG $0x91146348 // movsxd rdx, dword [rcx + 4*rdx]
+ WORD $0x8948; BYTE $0x16 // mov qword [rsi], rdx
+ LONG $0x08578b48 // mov rdx, qword [rdi + 8]
+ LONG $0x91146348 // movsxd rdx, dword [rcx + 4*rdx]
+ LONG $0x08568948 // mov qword [rsi + 8], rdx
+ LONG $0x10578b48 // mov rdx, qword [rdi + 16]
+ LONG $0x91146348 // movsxd rdx, dword [rcx + 4*rdx]
+ LONG $0x10568948 // mov qword [rsi + 16], rdx
+ LONG $0x18578b48 // mov rdx, qword [rdi + 24]
+ LONG $0x91146348 // movsxd rdx, dword [rcx + 4*rdx]
+ LONG $0x18568948 // mov qword [rsi + 24], rdx
+ WORD $0x508d; BYTE $0xfc // lea edx, [rax - 4]
+ LONG $0x20c78348 // add rdi, 32
+ LONG $0x20c68348 // add rsi, 32
+ WORD $0xf883; BYTE $0x07 // cmp eax, 7
+ JG LBB54_5
+
+LBB54_1:
+ WORD $0xd285 // test edx, edx
+ JLE LBB54_4
+ WORD $0xc283; BYTE $0x01 // add edx, 1
+ WORD $0x3145; BYTE $0xc0 // xor r8d, r8d
+
+LBB54_3:
+ LONG $0x07048b4a // mov rax, qword [rdi + r8]
+ LONG $0x81046348 // movsxd rax, dword [rcx + 4*rax]
+ LONG $0x0604894a // mov qword [rsi + r8], rax
+ LONG $0x08c08349 // add r8, 8
+ WORD $0xc283; BYTE $0xff // add edx, -1
+ WORD $0xfa83; BYTE $0x01 // cmp edx, 1
+ JG LBB54_3
+
+LBB54_4:
+ RET
+
+TEXT ·_transpose_int64_uint64_avx2(SB), $0-32
+
+ MOVQ src+0(FP), DI
+ MOVQ dest+8(FP), SI
+ MOVQ length+16(FP), DX
+ MOVQ transposeMap+24(FP), CX
+
+ WORD $0xfa83; BYTE $0x04 // cmp edx, 4
+ JL LBB55_1
+
+LBB55_5:
+ WORD $0xd089 // mov eax, edx
+ WORD $0x8b48; BYTE $0x17 // mov rdx, qword [rdi]
+ LONG $0x91146348 // movsxd rdx, dword [rcx + 4*rdx]
+ WORD $0x8948; BYTE $0x16 // mov qword [rsi], rdx
+ LONG $0x08578b48 // mov rdx, qword [rdi + 8]
+ LONG $0x91146348 // movsxd rdx, dword [rcx + 4*rdx]
+ LONG $0x08568948 // mov qword [rsi + 8], rdx
+ LONG $0x10578b48 // mov rdx, qword [rdi + 16]
+ LONG $0x91146348 // movsxd rdx, dword [rcx + 4*rdx]
+ LONG $0x10568948 // mov qword [rsi + 16], rdx
+ LONG $0x18578b48 // mov rdx, qword [rdi + 24]
+ LONG $0x91146348 // movsxd rdx, dword [rcx + 4*rdx]
+ LONG $0x18568948 // mov qword [rsi + 24], rdx
+ WORD $0x508d; BYTE $0xfc // lea edx, [rax - 4]
+ LONG $0x20c78348 // add rdi, 32
+ LONG $0x20c68348 // add rsi, 32
+ WORD $0xf883; BYTE $0x07 // cmp eax, 7
+ JG LBB55_5
+
+LBB55_1:
+ WORD $0xd285 // test edx, edx
+ JLE LBB55_4
+ WORD $0xc283; BYTE $0x01 // add edx, 1
+ WORD $0x3145; BYTE $0xc0 // xor r8d, r8d
+
+LBB55_3:
+ LONG $0x07048b4a // mov rax, qword [rdi + r8]
+ LONG $0x81046348 // movsxd rax, dword [rcx + 4*rax]
+ LONG $0x0604894a // mov qword [rsi + r8], rax
+ LONG $0x08c08349 // add r8, 8
+ WORD $0xc283; BYTE $0xff // add edx, -1
+ WORD $0xfa83; BYTE $0x01 // cmp edx, 1
+ JG LBB55_3
+
+LBB55_4:
+ RET
+
+TEXT ·_transpose_uint8_int64_avx2(SB), $0-32
+
+ MOVQ src+0(FP), DI
+ MOVQ dest+8(FP), SI
+ MOVQ length+16(FP), DX
+ MOVQ transposeMap+24(FP), CX
+
+ WORD $0xfa83; BYTE $0x04 // cmp edx, 4
+ JL LBB56_1
+
+LBB56_5:
+ WORD $0xd089 // mov eax, edx
+ WORD $0xb60f; BYTE $0x17 // movzx edx, byte [rdi]
+ LONG $0x91146348 // movsxd rdx, dword [rcx + 4*rdx]
+ WORD $0x8948; BYTE $0x16 // mov qword [rsi], rdx
+ LONG $0x0157b60f // movzx edx, byte [rdi + 1]
+ LONG $0x91146348 // movsxd rdx, dword [rcx + 4*rdx]
+ LONG $0x08568948 // mov qword [rsi + 8], rdx
+ LONG $0x0257b60f // movzx edx, byte [rdi + 2]
+ LONG $0x91146348 // movsxd rdx, dword [rcx + 4*rdx]
+ LONG $0x10568948 // mov qword [rsi + 16], rdx
+ LONG $0x0357b60f // movzx edx, byte [rdi + 3]
+ LONG $0x91146348 // movsxd rdx, dword [rcx + 4*rdx]
+ LONG $0x18568948 // mov qword [rsi + 24], rdx
+ WORD $0x508d; BYTE $0xfc // lea edx, [rax - 4]
+ LONG $0x04c78348 // add rdi, 4
+ LONG $0x20c68348 // add rsi, 32
+ WORD $0xf883; BYTE $0x07 // cmp eax, 7
+ JG LBB56_5
+
+LBB56_1:
+ WORD $0xd285 // test edx, edx
+ JLE LBB56_4
+ WORD $0xc283; BYTE $0x01 // add edx, 1
+ WORD $0x3145; BYTE $0xc0 // xor r8d, r8d
+
+LBB56_3:
+ LONG $0x04b60f42; BYTE $0x07 // movzx eax, byte [rdi + r8]
+ LONG $0x81046348 // movsxd rax, dword [rcx + 4*rax]
+ LONG $0xc604894a // mov qword [rsi + 8*r8], rax
+ LONG $0x01c08349 // add r8, 1
+ WORD $0xc283; BYTE $0xff // add edx, -1
+ WORD $0xfa83; BYTE $0x01 // cmp edx, 1
+ JG LBB56_3
+
+LBB56_4:
+ RET
+
+TEXT ·_transpose_int8_int64_avx2(SB), $0-32
+
+ MOVQ src+0(FP), DI
+ MOVQ dest+8(FP), SI
+ MOVQ length+16(FP), DX
+ MOVQ transposeMap+24(FP), CX
+
+ WORD $0xfa83; BYTE $0x04 // cmp edx, 4
+ JL LBB57_1
+
+LBB57_5:
+ WORD $0xd089 // mov eax, edx
+ LONG $0x17be0f48 // movsx rdx, byte [rdi]
+ LONG $0x91146348 // movsxd rdx, dword [rcx + 4*rdx]
+ WORD $0x8948; BYTE $0x16 // mov qword [rsi], rdx
+ LONG $0x57be0f48; BYTE $0x01 // movsx rdx, byte [rdi + 1]
+ LONG $0x91146348 // movsxd rdx, dword [rcx + 4*rdx]
+ LONG $0x08568948 // mov qword [rsi + 8], rdx
+ LONG $0x57be0f48; BYTE $0x02 // movsx rdx, byte [rdi + 2]
+ LONG $0x91146348 // movsxd rdx, dword [rcx + 4*rdx]
+ LONG $0x10568948 // mov qword [rsi + 16], rdx
+ LONG $0x57be0f48; BYTE $0x03 // movsx rdx, byte [rdi + 3]
+ LONG $0x91146348 // movsxd rdx, dword [rcx + 4*rdx]
+ LONG $0x18568948 // mov qword [rsi + 24], rdx
+ WORD $0x508d; BYTE $0xfc // lea edx, [rax - 4]
+ LONG $0x04c78348 // add rdi, 4
+ LONG $0x20c68348 // add rsi, 32
+ WORD $0xf883; BYTE $0x07 // cmp eax, 7
+ JG LBB57_5
+
+LBB57_1:
+ WORD $0xd285 // test edx, edx
+ JLE LBB57_4
+ WORD $0xc283; BYTE $0x01 // add edx, 1
+ WORD $0x3145; BYTE $0xc0 // xor r8d, r8d
+
+LBB57_3:
+ LONG $0x04be0f4a; BYTE $0x07 // movsx rax, byte [rdi + r8]
+ LONG $0x81046348 // movsxd rax, dword [rcx + 4*rax]
+ LONG $0xc604894a // mov qword [rsi + 8*r8], rax
+ LONG $0x01c08349 // add r8, 1
+ WORD $0xc283; BYTE $0xff // add edx, -1
+ WORD $0xfa83; BYTE $0x01 // cmp edx, 1
+ JG LBB57_3
+
+LBB57_4:
+ RET
+
+TEXT ·_transpose_uint16_int64_avx2(SB), $0-32
+
+ MOVQ src+0(FP), DI
+ MOVQ dest+8(FP), SI
+ MOVQ length+16(FP), DX
+ MOVQ transposeMap+24(FP), CX
+
+ WORD $0xfa83; BYTE $0x04 // cmp edx, 4
+ JL LBB58_1
+
+LBB58_5:
+ WORD $0xd089 // mov eax, edx
+ WORD $0xb70f; BYTE $0x17 // movzx edx, word [rdi]
+ LONG $0x91146348 // movsxd rdx, dword [rcx + 4*rdx]
+ WORD $0x8948; BYTE $0x16 // mov qword [rsi], rdx
+ LONG $0x0257b70f // movzx edx, word [rdi + 2]
+ LONG $0x91146348 // movsxd rdx, dword [rcx + 4*rdx]
+ LONG $0x08568948 // mov qword [rsi + 8], rdx
+ LONG $0x0457b70f // movzx edx, word [rdi + 4]
+ LONG $0x91146348 // movsxd rdx, dword [rcx + 4*rdx]
+ LONG $0x10568948 // mov qword [rsi + 16], rdx
+ LONG $0x0657b70f // movzx edx, word [rdi + 6]
+ LONG $0x91146348 // movsxd rdx, dword [rcx + 4*rdx]
+ LONG $0x18568948 // mov qword [rsi + 24], rdx
+ WORD $0x508d; BYTE $0xfc // lea edx, [rax - 4]
+ LONG $0x08c78348 // add rdi, 8
+ LONG $0x20c68348 // add rsi, 32
+ WORD $0xf883; BYTE $0x07 // cmp eax, 7
+ JG LBB58_5
+
+LBB58_1:
+ WORD $0xd285 // test edx, edx
+ JLE LBB58_4
+ WORD $0xc283; BYTE $0x01 // add edx, 1
+ WORD $0x3145; BYTE $0xc0 // xor r8d, r8d
+
+LBB58_3:
+ LONG $0x04b70f42; BYTE $0x07 // movzx eax, word [rdi + r8]
+ LONG $0x81046348 // movsxd rax, dword [rcx + 4*rax]
+ LONG $0x8604894a // mov qword [rsi + 4*r8], rax
+ LONG $0x02c08349 // add r8, 2
+ WORD $0xc283; BYTE $0xff // add edx, -1
+ WORD $0xfa83; BYTE $0x01 // cmp edx, 1
+ JG LBB58_3
+
+LBB58_4:
+ RET
+
+TEXT ·_transpose_int16_int64_avx2(SB), $0-32
+
+ MOVQ src+0(FP), DI
+ MOVQ dest+8(FP), SI
+ MOVQ length+16(FP), DX
+ MOVQ transposeMap+24(FP), CX
+
+ WORD $0xfa83; BYTE $0x04 // cmp edx, 4
+ JL LBB59_1
+
+LBB59_5:
+ WORD $0xd089 // mov eax, edx
+ LONG $0x17bf0f48 // movsx rdx, word [rdi]
+ LONG $0x91146348 // movsxd rdx, dword [rcx + 4*rdx]
+ WORD $0x8948; BYTE $0x16 // mov qword [rsi], rdx
+ LONG $0x57bf0f48; BYTE $0x02 // movsx rdx, word [rdi + 2]
+ LONG $0x91146348 // movsxd rdx, dword [rcx + 4*rdx]
+ LONG $0x08568948 // mov qword [rsi + 8], rdx
+ LONG $0x57bf0f48; BYTE $0x04 // movsx rdx, word [rdi + 4]
+ LONG $0x91146348 // movsxd rdx, dword [rcx + 4*rdx]
+ LONG $0x10568948 // mov qword [rsi + 16], rdx
+ LONG $0x57bf0f48; BYTE $0x06 // movsx rdx, word [rdi + 6]
+ LONG $0x91146348 // movsxd rdx, dword [rcx + 4*rdx]
+ LONG $0x18568948 // mov qword [rsi + 24], rdx
+ WORD $0x508d; BYTE $0xfc // lea edx, [rax - 4]
+ LONG $0x08c78348 // add rdi, 8
+ LONG $0x20c68348 // add rsi, 32
+ WORD $0xf883; BYTE $0x07 // cmp eax, 7
+ JG LBB59_5
+
+LBB59_1:
+ WORD $0xd285 // test edx, edx
+ JLE LBB59_4
+ WORD $0xc283; BYTE $0x01 // add edx, 1
+ WORD $0x3145; BYTE $0xc0 // xor r8d, r8d
+
+LBB59_3:
+ LONG $0x04bf0f4a; BYTE $0x07 // movsx rax, word [rdi + r8]
+ LONG $0x81046348 // movsxd rax, dword [rcx + 4*rax]
+ LONG $0x8604894a // mov qword [rsi + 4*r8], rax
+ LONG $0x02c08349 // add r8, 2
+ WORD $0xc283; BYTE $0xff // add edx, -1
+ WORD $0xfa83; BYTE $0x01 // cmp edx, 1
+ JG LBB59_3
+
+LBB59_4:
+ RET
+
+TEXT ·_transpose_uint32_int64_avx2(SB), $0-32
+
+ MOVQ src+0(FP), DI
+ MOVQ dest+8(FP), SI
+ MOVQ length+16(FP), DX
+ MOVQ transposeMap+24(FP), CX
+
+ WORD $0xfa83; BYTE $0x04 // cmp edx, 4
+ JL LBB60_1
+
+LBB60_5:
+ WORD $0xd089 // mov eax, edx
+ WORD $0x178b // mov edx, dword [rdi]
+ LONG $0x91146348 // movsxd rdx, dword [rcx + 4*rdx]
+ WORD $0x8948; BYTE $0x16 // mov qword [rsi], rdx
+ WORD $0x578b; BYTE $0x04 // mov edx, dword [rdi + 4]
+ LONG $0x91146348 // movsxd rdx, dword [rcx + 4*rdx]
+ LONG $0x08568948 // mov qword [rsi + 8], rdx
+ WORD $0x578b; BYTE $0x08 // mov edx, dword [rdi + 8]
+ LONG $0x91146348 // movsxd rdx, dword [rcx + 4*rdx]
+ LONG $0x10568948 // mov qword [rsi + 16], rdx
+ WORD $0x578b; BYTE $0x0c // mov edx, dword [rdi + 12]
+ LONG $0x91146348 // movsxd rdx, dword [rcx + 4*rdx]
+ LONG $0x18568948 // mov qword [rsi + 24], rdx
+ WORD $0x508d; BYTE $0xfc // lea edx, [rax - 4]
+ LONG $0x10c78348 // add rdi, 16
+ LONG $0x20c68348 // add rsi, 32
+ WORD $0xf883; BYTE $0x07 // cmp eax, 7
+ JG LBB60_5
+
+LBB60_1:
+ WORD $0xd285 // test edx, edx
+ JLE LBB60_4
+ WORD $0xc283; BYTE $0x01 // add edx, 1
+ WORD $0x3145; BYTE $0xc0 // xor r8d, r8d
+
+LBB60_3:
+ LONG $0x07048b42 // mov eax, dword [rdi + r8]
+ LONG $0x81046348 // movsxd rax, dword [rcx + 4*rax]
+ LONG $0x4604894a // mov qword [rsi + 2*r8], rax
+ LONG $0x04c08349 // add r8, 4
+ WORD $0xc283; BYTE $0xff // add edx, -1
+ WORD $0xfa83; BYTE $0x01 // cmp edx, 1
+ JG LBB60_3
+
+LBB60_4:
+ RET
+
+TEXT ·_transpose_int32_int64_avx2(SB), $0-32
+
+ MOVQ src+0(FP), DI
+ MOVQ dest+8(FP), SI
+ MOVQ length+16(FP), DX
+ MOVQ transposeMap+24(FP), CX
+
+ WORD $0xfa83; BYTE $0x04 // cmp edx, 4
+ JL LBB61_1
+
+LBB61_5:
+ WORD $0xd089 // mov eax, edx
+ WORD $0x6348; BYTE $0x17 // movsxd rdx, dword [rdi]
+ LONG $0x91146348 // movsxd rdx, dword [rcx + 4*rdx]
+ WORD $0x8948; BYTE $0x16 // mov qword [rsi], rdx
+ LONG $0x04576348 // movsxd rdx, dword [rdi + 4]
+ LONG $0x91146348 // movsxd rdx, dword [rcx + 4*rdx]
+ LONG $0x08568948 // mov qword [rsi + 8], rdx
+ LONG $0x08576348 // movsxd rdx, dword [rdi + 8]
+ LONG $0x91146348 // movsxd rdx, dword [rcx + 4*rdx]
+ LONG $0x10568948 // mov qword [rsi + 16], rdx
+ LONG $0x0c576348 // movsxd rdx, dword [rdi + 12]
+ LONG $0x91146348 // movsxd rdx, dword [rcx + 4*rdx]
+ LONG $0x18568948 // mov qword [rsi + 24], rdx
+ WORD $0x508d; BYTE $0xfc // lea edx, [rax - 4]
+ LONG $0x10c78348 // add rdi, 16
+ LONG $0x20c68348 // add rsi, 32
+ WORD $0xf883; BYTE $0x07 // cmp eax, 7
+ JG LBB61_5
+
+LBB61_1:
+ WORD $0xd285 // test edx, edx
+ JLE LBB61_4
+ WORD $0xc283; BYTE $0x01 // add edx, 1
+ WORD $0x3145; BYTE $0xc0 // xor r8d, r8d
+
+LBB61_3:
+ LONG $0x0704634a // movsxd rax, dword [rdi + r8]
+ LONG $0x81046348 // movsxd rax, dword [rcx + 4*rax]
+ LONG $0x4604894a // mov qword [rsi + 2*r8], rax
+ LONG $0x04c08349 // add r8, 4
+ WORD $0xc283; BYTE $0xff // add edx, -1
+ WORD $0xfa83; BYTE $0x01 // cmp edx, 1
+ JG LBB61_3
+
+LBB61_4:
+ RET
+
+TEXT ·_transpose_uint64_int64_avx2(SB), $0-32
+
+ MOVQ src+0(FP), DI
+ MOVQ dest+8(FP), SI
+ MOVQ length+16(FP), DX
+ MOVQ transposeMap+24(FP), CX
+
+ WORD $0xfa83; BYTE $0x04 // cmp edx, 4
+ JL LBB62_1
+
+LBB62_5:
+ WORD $0xd089 // mov eax, edx
+ WORD $0x8b48; BYTE $0x17 // mov rdx, qword [rdi]
+ LONG $0x91146348 // movsxd rdx, dword [rcx + 4*rdx]
+ WORD $0x8948; BYTE $0x16 // mov qword [rsi], rdx
+ LONG $0x08578b48 // mov rdx, qword [rdi + 8]
+ LONG $0x91146348 // movsxd rdx, dword [rcx + 4*rdx]
+ LONG $0x08568948 // mov qword [rsi + 8], rdx
+ LONG $0x10578b48 // mov rdx, qword [rdi + 16]
+ LONG $0x91146348 // movsxd rdx, dword [rcx + 4*rdx]
+ LONG $0x10568948 // mov qword [rsi + 16], rdx
+ LONG $0x18578b48 // mov rdx, qword [rdi + 24]
+ LONG $0x91146348 // movsxd rdx, dword [rcx + 4*rdx]
+ LONG $0x18568948 // mov qword [rsi + 24], rdx
+ WORD $0x508d; BYTE $0xfc // lea edx, [rax - 4]
+ LONG $0x20c78348 // add rdi, 32
+ LONG $0x20c68348 // add rsi, 32
+ WORD $0xf883; BYTE $0x07 // cmp eax, 7
+ JG LBB62_5
+
+LBB62_1:
+ WORD $0xd285 // test edx, edx
+ JLE LBB62_4
+ WORD $0xc283; BYTE $0x01 // add edx, 1
+ WORD $0x3145; BYTE $0xc0 // xor r8d, r8d
+
+LBB62_3:
+ LONG $0x07048b4a // mov rax, qword [rdi + r8]
+ LONG $0x81046348 // movsxd rax, dword [rcx + 4*rax]
+ LONG $0x0604894a // mov qword [rsi + r8], rax
+ LONG $0x08c08349 // add r8, 8
+ WORD $0xc283; BYTE $0xff // add edx, -1
+ WORD $0xfa83; BYTE $0x01 // cmp edx, 1
+ JG LBB62_3
+
+LBB62_4:
+ RET
+
+TEXT ·_transpose_int64_int64_avx2(SB), $0-32
+
+ MOVQ src+0(FP), DI
+ MOVQ dest+8(FP), SI
+ MOVQ length+16(FP), DX
+ MOVQ transposeMap+24(FP), CX
+
+ WORD $0xfa83; BYTE $0x04 // cmp edx, 4
+ JL LBB63_1
+
+LBB63_5:
+ WORD $0xd089 // mov eax, edx
+ WORD $0x8b48; BYTE $0x17 // mov rdx, qword [rdi]
+ LONG $0x91146348 // movsxd rdx, dword [rcx + 4*rdx]
+ WORD $0x8948; BYTE $0x16 // mov qword [rsi], rdx
+ LONG $0x08578b48 // mov rdx, qword [rdi + 8]
+ LONG $0x91146348 // movsxd rdx, dword [rcx + 4*rdx]
+ LONG $0x08568948 // mov qword [rsi + 8], rdx
+ LONG $0x10578b48 // mov rdx, qword [rdi + 16]
+ LONG $0x91146348 // movsxd rdx, dword [rcx + 4*rdx]
+ LONG $0x10568948 // mov qword [rsi + 16], rdx
+ LONG $0x18578b48 // mov rdx, qword [rdi + 24]
+ LONG $0x91146348 // movsxd rdx, dword [rcx + 4*rdx]
+ LONG $0x18568948 // mov qword [rsi + 24], rdx
+ WORD $0x508d; BYTE $0xfc // lea edx, [rax - 4]
+ LONG $0x20c78348 // add rdi, 32
+ LONG $0x20c68348 // add rsi, 32
+ WORD $0xf883; BYTE $0x07 // cmp eax, 7
+ JG LBB63_5
+
+LBB63_1:
+ WORD $0xd285 // test edx, edx
+ JLE LBB63_4
+ WORD $0xc283; BYTE $0x01 // add edx, 1
+ WORD $0x3145; BYTE $0xc0 // xor r8d, r8d
+
+LBB63_3:
+ LONG $0x07048b4a // mov rax, qword [rdi + r8]
+ LONG $0x81046348 // movsxd rax, dword [rcx + 4*rax]
+ LONG $0x0604894a // mov qword [rsi + r8], rax
+ LONG $0x08c08349 // add r8, 8
+ WORD $0xc283; BYTE $0xff // add edx, -1
+ WORD $0xfa83; BYTE $0x01 // cmp edx, 1
+ JG LBB63_3
+
+LBB63_4:
+ RET
diff --git a/vendor/github.com/apache/arrow/go/v14/internal/utils/transpose_ints_def.go b/vendor/github.com/apache/arrow/go/v14/internal/utils/transpose_ints_def.go
new file mode 100644
index 000000000..cc3b0abb5
--- /dev/null
+++ b/vendor/github.com/apache/arrow/go/v14/internal/utils/transpose_ints_def.go
@@ -0,0 +1,227 @@
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements. See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership. The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License. You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package utils
+
+import (
+ "errors"
+
+ "github.com/apache/arrow/go/v14/arrow"
+)
+
+//go:generate go run ../../arrow/_tools/tmpl -i -data=transpose_ints.tmpldata -d arch=avx2 transpose_ints_simd.go.tmpl=transpose_ints_avx2_amd64.go
+//go:generate go run ../../arrow/_tools/tmpl -i -data=transpose_ints.tmpldata -d arch=sse4 transpose_ints_simd.go.tmpl=transpose_ints_sse4_amd64.go
+//go:generate go run ../../arrow/_tools/tmpl -i -data=transpose_ints.tmpldata transpose_ints_s390x.go.tmpl=transpose_ints_s390x.go
+//go:generate go run ../../arrow/_tools/tmpl -i -data=transpose_ints.tmpldata transpose_ints_s390x.go.tmpl=transpose_ints_arm64.go
+//go:generate go run ../../arrow/_tools/tmpl -i -data=transpose_ints.tmpldata transpose_ints_noasm.go.tmpl=transpose_ints_noasm.go
+//go:generate go run ../../arrow/_tools/tmpl -i -data=transpose_ints.tmpldata transpose_ints.go.tmpl=transpose_ints.go
+
+func bufToTyped(typ arrow.DataType, buf []byte, offset, length int) (interface{}, error) {
+ switch typ.ID() {
+ case arrow.INT8:
+ return arrow.Int8Traits.CastFromBytes(buf)[offset : offset+length], nil
+ case arrow.INT16:
+ return arrow.Int16Traits.CastFromBytes(buf)[offset : offset+length], nil
+ case arrow.INT32:
+ return arrow.Int32Traits.CastFromBytes(buf)[offset : offset+length], nil
+ case arrow.INT64:
+ return arrow.Int64Traits.CastFromBytes(buf)[offset : offset+length], nil
+ case arrow.UINT8:
+ return arrow.Uint8Traits.CastFromBytes(buf)[offset : offset+length], nil
+ case arrow.UINT16:
+ return arrow.Uint16Traits.CastFromBytes(buf)[offset : offset+length], nil
+ case arrow.UINT32:
+ return arrow.Uint32Traits.CastFromBytes(buf)[offset : offset+length], nil
+ case arrow.UINT64:
+ return arrow.Uint64Traits.CastFromBytes(buf)[offset : offset+length], nil
+ }
+ return nil, errors.New("only accepts integral types")
+}
+
+// TransposeIntsBuffers takes the data-types, byte buffers, and offsets of a source and destination
+// buffer to perform TransposeInts on with the provided mapping data.
+func TransposeIntsBuffers(inType, outType arrow.DataType, indata, outdata []byte, inOffset, outOffset int, length int, transposeMap []int32) error {
+ src, err := bufToTyped(inType, indata, inOffset, length)
+ if err != nil {
+ return err
+ }
+ dest, err := bufToTyped(outType, outdata, outOffset, length)
+ if err != nil {
+ return err
+ }
+
+ return TransposeInts(src, dest, transposeMap)
+}
+
+// TransposeInts expects two integral slices and the values they map to. Returning
+// an error if either src or dest are not an integral type.
+func TransposeInts(src, dest interface{}, mapping []int32) error {
+ switch s := src.(type) {
+ case []int8:
+ switch d := dest.(type) {
+ case []int8:
+ TransposeInt8Int8(s, d, mapping)
+ case []int16:
+ TransposeInt8Int16(s, d, mapping)
+ case []int32:
+ TransposeInt8Int32(s, d, mapping)
+ case []int64:
+ TransposeInt8Int64(s, d, mapping)
+ case []uint8:
+ TransposeInt8Uint8(s, d, mapping)
+ case []uint16:
+ TransposeInt8Uint16(s, d, mapping)
+ case []uint32:
+ TransposeInt8Uint32(s, d, mapping)
+ case []uint64:
+ TransposeInt8Uint64(s, d, mapping)
+ }
+ case []int16:
+ switch d := dest.(type) {
+ case []int8:
+ TransposeInt16Int8(s, d, mapping)
+ case []int16:
+ TransposeInt16Int16(s, d, mapping)
+ case []int32:
+ TransposeInt16Int32(s, d, mapping)
+ case []int64:
+ TransposeInt16Int64(s, d, mapping)
+ case []uint8:
+ TransposeInt16Uint8(s, d, mapping)
+ case []uint16:
+ TransposeInt16Uint16(s, d, mapping)
+ case []uint32:
+ TransposeInt16Uint32(s, d, mapping)
+ case []uint64:
+ TransposeInt16Uint64(s, d, mapping)
+ }
+ case []int32:
+ switch d := dest.(type) {
+ case []int8:
+ TransposeInt32Int8(s, d, mapping)
+ case []int16:
+ TransposeInt32Int16(s, d, mapping)
+ case []int32:
+ TransposeInt32Int32(s, d, mapping)
+ case []int64:
+ TransposeInt32Int64(s, d, mapping)
+ case []uint8:
+ TransposeInt32Uint8(s, d, mapping)
+ case []uint16:
+ TransposeInt32Uint16(s, d, mapping)
+ case []uint32:
+ TransposeInt32Uint32(s, d, mapping)
+ case []uint64:
+ TransposeInt32Uint64(s, d, mapping)
+ }
+ case []int64:
+ switch d := dest.(type) {
+ case []int8:
+ TransposeInt64Int8(s, d, mapping)
+ case []int16:
+ TransposeInt64Int16(s, d, mapping)
+ case []int32:
+ TransposeInt64Int32(s, d, mapping)
+ case []int64:
+ TransposeInt64Int64(s, d, mapping)
+ case []uint8:
+ TransposeInt64Uint8(s, d, mapping)
+ case []uint16:
+ TransposeInt64Uint16(s, d, mapping)
+ case []uint32:
+ TransposeInt64Uint32(s, d, mapping)
+ case []uint64:
+ TransposeInt64Uint64(s, d, mapping)
+ }
+ case []uint8:
+ switch d := dest.(type) {
+ case []int8:
+ TransposeUint8Int8(s, d, mapping)
+ case []int16:
+ TransposeUint8Int16(s, d, mapping)
+ case []int32:
+ TransposeUint8Int32(s, d, mapping)
+ case []int64:
+ TransposeUint8Int64(s, d, mapping)
+ case []uint8:
+ TransposeUint8Uint8(s, d, mapping)
+ case []uint16:
+ TransposeUint8Uint16(s, d, mapping)
+ case []uint32:
+ TransposeUint8Uint32(s, d, mapping)
+ case []uint64:
+ TransposeUint8Uint64(s, d, mapping)
+ }
+ case []uint16:
+ switch d := dest.(type) {
+ case []int8:
+ TransposeUint16Int8(s, d, mapping)
+ case []int16:
+ TransposeUint16Int16(s, d, mapping)
+ case []int32:
+ TransposeUint16Int32(s, d, mapping)
+ case []int64:
+ TransposeUint16Int64(s, d, mapping)
+ case []uint8:
+ TransposeUint16Uint8(s, d, mapping)
+ case []uint16:
+ TransposeUint16Uint16(s, d, mapping)
+ case []uint32:
+ TransposeUint16Uint32(s, d, mapping)
+ case []uint64:
+ TransposeUint16Uint64(s, d, mapping)
+ }
+ case []uint32:
+ switch d := dest.(type) {
+ case []int8:
+ TransposeUint32Int8(s, d, mapping)
+ case []int16:
+ TransposeUint32Int16(s, d, mapping)
+ case []int32:
+ TransposeUint32Int32(s, d, mapping)
+ case []int64:
+ TransposeUint32Int64(s, d, mapping)
+ case []uint8:
+ TransposeUint32Uint8(s, d, mapping)
+ case []uint16:
+ TransposeUint32Uint16(s, d, mapping)
+ case []uint32:
+ TransposeUint32Uint32(s, d, mapping)
+ case []uint64:
+ TransposeUint32Uint64(s, d, mapping)
+ }
+ case []uint64:
+ switch d := dest.(type) {
+ case []int8:
+ TransposeUint64Int8(s, d, mapping)
+ case []int16:
+ TransposeUint64Int16(s, d, mapping)
+ case []int32:
+ TransposeUint64Int32(s, d, mapping)
+ case []int64:
+ TransposeUint64Int64(s, d, mapping)
+ case []uint8:
+ TransposeUint64Uint8(s, d, mapping)
+ case []uint16:
+ TransposeUint64Uint16(s, d, mapping)
+ case []uint32:
+ TransposeUint64Uint32(s, d, mapping)
+ case []uint64:
+ TransposeUint64Uint64(s, d, mapping)
+ }
+ }
+ return nil
+}
diff --git a/vendor/github.com/apache/arrow/go/v14/internal/utils/transpose_ints_noasm.go b/vendor/github.com/apache/arrow/go/v14/internal/utils/transpose_ints_noasm.go
new file mode 100644
index 000000000..461aaf31f
--- /dev/null
+++ b/vendor/github.com/apache/arrow/go/v14/internal/utils/transpose_ints_noasm.go
@@ -0,0 +1,96 @@
+// Code generated by transpose_ints_noasm.go.tmpl. DO NOT EDIT.
+
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements. See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership. The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License. You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+//go:build noasm || (!amd64 && !arm64 && !s390x && !ppc64le)
+
+package utils
+
+// if building with the 'noasm' tag, then point to the pure go implementations
+var (
+ TransposeInt8Int8 = transposeInt8Int8
+ TransposeInt8Uint8 = transposeInt8Uint8
+ TransposeInt8Int16 = transposeInt8Int16
+ TransposeInt8Uint16 = transposeInt8Uint16
+ TransposeInt8Int32 = transposeInt8Int32
+ TransposeInt8Uint32 = transposeInt8Uint32
+ TransposeInt8Int64 = transposeInt8Int64
+ TransposeInt8Uint64 = transposeInt8Uint64
+
+ TransposeUint8Int8 = transposeUint8Int8
+ TransposeUint8Uint8 = transposeUint8Uint8
+ TransposeUint8Int16 = transposeUint8Int16
+ TransposeUint8Uint16 = transposeUint8Uint16
+ TransposeUint8Int32 = transposeUint8Int32
+ TransposeUint8Uint32 = transposeUint8Uint32
+ TransposeUint8Int64 = transposeUint8Int64
+ TransposeUint8Uint64 = transposeUint8Uint64
+
+ TransposeInt16Int8 = transposeInt16Int8
+ TransposeInt16Uint8 = transposeInt16Uint8
+ TransposeInt16Int16 = transposeInt16Int16
+ TransposeInt16Uint16 = transposeInt16Uint16
+ TransposeInt16Int32 = transposeInt16Int32
+ TransposeInt16Uint32 = transposeInt16Uint32
+ TransposeInt16Int64 = transposeInt16Int64
+ TransposeInt16Uint64 = transposeInt16Uint64
+
+ TransposeUint16Int8 = transposeUint16Int8
+ TransposeUint16Uint8 = transposeUint16Uint8
+ TransposeUint16Int16 = transposeUint16Int16
+ TransposeUint16Uint16 = transposeUint16Uint16
+ TransposeUint16Int32 = transposeUint16Int32
+ TransposeUint16Uint32 = transposeUint16Uint32
+ TransposeUint16Int64 = transposeUint16Int64
+ TransposeUint16Uint64 = transposeUint16Uint64
+
+ TransposeInt32Int8 = transposeInt32Int8
+ TransposeInt32Uint8 = transposeInt32Uint8
+ TransposeInt32Int16 = transposeInt32Int16
+ TransposeInt32Uint16 = transposeInt32Uint16
+ TransposeInt32Int32 = transposeInt32Int32
+ TransposeInt32Uint32 = transposeInt32Uint32
+ TransposeInt32Int64 = transposeInt32Int64
+ TransposeInt32Uint64 = transposeInt32Uint64
+
+ TransposeUint32Int8 = transposeUint32Int8
+ TransposeUint32Uint8 = transposeUint32Uint8
+ TransposeUint32Int16 = transposeUint32Int16
+ TransposeUint32Uint16 = transposeUint32Uint16
+ TransposeUint32Int32 = transposeUint32Int32
+ TransposeUint32Uint32 = transposeUint32Uint32
+ TransposeUint32Int64 = transposeUint32Int64
+ TransposeUint32Uint64 = transposeUint32Uint64
+
+ TransposeInt64Int8 = transposeInt64Int8
+ TransposeInt64Uint8 = transposeInt64Uint8
+ TransposeInt64Int16 = transposeInt64Int16
+ TransposeInt64Uint16 = transposeInt64Uint16
+ TransposeInt64Int32 = transposeInt64Int32
+ TransposeInt64Uint32 = transposeInt64Uint32
+ TransposeInt64Int64 = transposeInt64Int64
+ TransposeInt64Uint64 = transposeInt64Uint64
+
+ TransposeUint64Int8 = transposeUint64Int8
+ TransposeUint64Uint8 = transposeUint64Uint8
+ TransposeUint64Int16 = transposeUint64Int16
+ TransposeUint64Uint16 = transposeUint64Uint16
+ TransposeUint64Int32 = transposeUint64Int32
+ TransposeUint64Uint32 = transposeUint64Uint32
+ TransposeUint64Int64 = transposeUint64Int64
+ TransposeUint64Uint64 = transposeUint64Uint64
+)
diff --git a/vendor/github.com/apache/arrow/go/v14/internal/utils/transpose_ints_noasm.go.tmpl b/vendor/github.com/apache/arrow/go/v14/internal/utils/transpose_ints_noasm.go.tmpl
new file mode 100644
index 000000000..faffdce35
--- /dev/null
+++ b/vendor/github.com/apache/arrow/go/v14/internal/utils/transpose_ints_noasm.go.tmpl
@@ -0,0 +1,34 @@
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements. See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership. The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License. You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+//go:build noasm
+// +build noasm
+
+package utils
+
+// if building with the 'noasm' tag, then point to the pure go implementations
+var (
+{{ $typelist := .In }}
+{{range .In}}
+{{ $src := .Type -}}
+{{ $srcName := .Name -}}
+{{ range $typelist -}}
+{{ $dest := .Type -}}
+{{ $destName := .Name -}}
+ Transpose{{$srcName}}{{$destName}} = transpose{{$srcName}}{{$destName}}
+{{end}}
+{{end}}
+)
diff --git a/vendor/github.com/apache/arrow/go/v14/internal/utils/transpose_ints_ppc64le.go b/vendor/github.com/apache/arrow/go/v14/internal/utils/transpose_ints_ppc64le.go
new file mode 100644
index 000000000..cc957cdaa
--- /dev/null
+++ b/vendor/github.com/apache/arrow/go/v14/internal/utils/transpose_ints_ppc64le.go
@@ -0,0 +1,96 @@
+// Code generated by transpose_ints_s390x.go.tmpl. DO NOT EDIT.
+
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements. See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership. The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License. You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+//go:build !noasm
+
+package utils
+
+// if building with the 'noasm' tag, then point to the pure go implementations
+var (
+ TransposeInt8Int8 = transposeInt8Int8
+ TransposeInt8Uint8 = transposeInt8Uint8
+ TransposeInt8Int16 = transposeInt8Int16
+ TransposeInt8Uint16 = transposeInt8Uint16
+ TransposeInt8Int32 = transposeInt8Int32
+ TransposeInt8Uint32 = transposeInt8Uint32
+ TransposeInt8Int64 = transposeInt8Int64
+ TransposeInt8Uint64 = transposeInt8Uint64
+
+ TransposeUint8Int8 = transposeUint8Int8
+ TransposeUint8Uint8 = transposeUint8Uint8
+ TransposeUint8Int16 = transposeUint8Int16
+ TransposeUint8Uint16 = transposeUint8Uint16
+ TransposeUint8Int32 = transposeUint8Int32
+ TransposeUint8Uint32 = transposeUint8Uint32
+ TransposeUint8Int64 = transposeUint8Int64
+ TransposeUint8Uint64 = transposeUint8Uint64
+
+ TransposeInt16Int8 = transposeInt16Int8
+ TransposeInt16Uint8 = transposeInt16Uint8
+ TransposeInt16Int16 = transposeInt16Int16
+ TransposeInt16Uint16 = transposeInt16Uint16
+ TransposeInt16Int32 = transposeInt16Int32
+ TransposeInt16Uint32 = transposeInt16Uint32
+ TransposeInt16Int64 = transposeInt16Int64
+ TransposeInt16Uint64 = transposeInt16Uint64
+
+ TransposeUint16Int8 = transposeUint16Int8
+ TransposeUint16Uint8 = transposeUint16Uint8
+ TransposeUint16Int16 = transposeUint16Int16
+ TransposeUint16Uint16 = transposeUint16Uint16
+ TransposeUint16Int32 = transposeUint16Int32
+ TransposeUint16Uint32 = transposeUint16Uint32
+ TransposeUint16Int64 = transposeUint16Int64
+ TransposeUint16Uint64 = transposeUint16Uint64
+
+ TransposeInt32Int8 = transposeInt32Int8
+ TransposeInt32Uint8 = transposeInt32Uint8
+ TransposeInt32Int16 = transposeInt32Int16
+ TransposeInt32Uint16 = transposeInt32Uint16
+ TransposeInt32Int32 = transposeInt32Int32
+ TransposeInt32Uint32 = transposeInt32Uint32
+ TransposeInt32Int64 = transposeInt32Int64
+ TransposeInt32Uint64 = transposeInt32Uint64
+
+ TransposeUint32Int8 = transposeUint32Int8
+ TransposeUint32Uint8 = transposeUint32Uint8
+ TransposeUint32Int16 = transposeUint32Int16
+ TransposeUint32Uint16 = transposeUint32Uint16
+ TransposeUint32Int32 = transposeUint32Int32
+ TransposeUint32Uint32 = transposeUint32Uint32
+ TransposeUint32Int64 = transposeUint32Int64
+ TransposeUint32Uint64 = transposeUint32Uint64
+
+ TransposeInt64Int8 = transposeInt64Int8
+ TransposeInt64Uint8 = transposeInt64Uint8
+ TransposeInt64Int16 = transposeInt64Int16
+ TransposeInt64Uint16 = transposeInt64Uint16
+ TransposeInt64Int32 = transposeInt64Int32
+ TransposeInt64Uint32 = transposeInt64Uint32
+ TransposeInt64Int64 = transposeInt64Int64
+ TransposeInt64Uint64 = transposeInt64Uint64
+
+ TransposeUint64Int8 = transposeUint64Int8
+ TransposeUint64Uint8 = transposeUint64Uint8
+ TransposeUint64Int16 = transposeUint64Int16
+ TransposeUint64Uint16 = transposeUint64Uint16
+ TransposeUint64Int32 = transposeUint64Int32
+ TransposeUint64Uint32 = transposeUint64Uint32
+ TransposeUint64Int64 = transposeUint64Int64
+ TransposeUint64Uint64 = transposeUint64Uint64
+)
diff --git a/vendor/github.com/apache/arrow/go/v14/internal/utils/transpose_ints_s390x.go b/vendor/github.com/apache/arrow/go/v14/internal/utils/transpose_ints_s390x.go
new file mode 100644
index 000000000..cc957cdaa
--- /dev/null
+++ b/vendor/github.com/apache/arrow/go/v14/internal/utils/transpose_ints_s390x.go
@@ -0,0 +1,96 @@
+// Code generated by transpose_ints_s390x.go.tmpl. DO NOT EDIT.
+
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements. See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership. The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License. You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+//go:build !noasm
+
+package utils
+
+// if building with the 'noasm' tag, then point to the pure go implementations
+var (
+ TransposeInt8Int8 = transposeInt8Int8
+ TransposeInt8Uint8 = transposeInt8Uint8
+ TransposeInt8Int16 = transposeInt8Int16
+ TransposeInt8Uint16 = transposeInt8Uint16
+ TransposeInt8Int32 = transposeInt8Int32
+ TransposeInt8Uint32 = transposeInt8Uint32
+ TransposeInt8Int64 = transposeInt8Int64
+ TransposeInt8Uint64 = transposeInt8Uint64
+
+ TransposeUint8Int8 = transposeUint8Int8
+ TransposeUint8Uint8 = transposeUint8Uint8
+ TransposeUint8Int16 = transposeUint8Int16
+ TransposeUint8Uint16 = transposeUint8Uint16
+ TransposeUint8Int32 = transposeUint8Int32
+ TransposeUint8Uint32 = transposeUint8Uint32
+ TransposeUint8Int64 = transposeUint8Int64
+ TransposeUint8Uint64 = transposeUint8Uint64
+
+ TransposeInt16Int8 = transposeInt16Int8
+ TransposeInt16Uint8 = transposeInt16Uint8
+ TransposeInt16Int16 = transposeInt16Int16
+ TransposeInt16Uint16 = transposeInt16Uint16
+ TransposeInt16Int32 = transposeInt16Int32
+ TransposeInt16Uint32 = transposeInt16Uint32
+ TransposeInt16Int64 = transposeInt16Int64
+ TransposeInt16Uint64 = transposeInt16Uint64
+
+ TransposeUint16Int8 = transposeUint16Int8
+ TransposeUint16Uint8 = transposeUint16Uint8
+ TransposeUint16Int16 = transposeUint16Int16
+ TransposeUint16Uint16 = transposeUint16Uint16
+ TransposeUint16Int32 = transposeUint16Int32
+ TransposeUint16Uint32 = transposeUint16Uint32
+ TransposeUint16Int64 = transposeUint16Int64
+ TransposeUint16Uint64 = transposeUint16Uint64
+
+ TransposeInt32Int8 = transposeInt32Int8
+ TransposeInt32Uint8 = transposeInt32Uint8
+ TransposeInt32Int16 = transposeInt32Int16
+ TransposeInt32Uint16 = transposeInt32Uint16
+ TransposeInt32Int32 = transposeInt32Int32
+ TransposeInt32Uint32 = transposeInt32Uint32
+ TransposeInt32Int64 = transposeInt32Int64
+ TransposeInt32Uint64 = transposeInt32Uint64
+
+ TransposeUint32Int8 = transposeUint32Int8
+ TransposeUint32Uint8 = transposeUint32Uint8
+ TransposeUint32Int16 = transposeUint32Int16
+ TransposeUint32Uint16 = transposeUint32Uint16
+ TransposeUint32Int32 = transposeUint32Int32
+ TransposeUint32Uint32 = transposeUint32Uint32
+ TransposeUint32Int64 = transposeUint32Int64
+ TransposeUint32Uint64 = transposeUint32Uint64
+
+ TransposeInt64Int8 = transposeInt64Int8
+ TransposeInt64Uint8 = transposeInt64Uint8
+ TransposeInt64Int16 = transposeInt64Int16
+ TransposeInt64Uint16 = transposeInt64Uint16
+ TransposeInt64Int32 = transposeInt64Int32
+ TransposeInt64Uint32 = transposeInt64Uint32
+ TransposeInt64Int64 = transposeInt64Int64
+ TransposeInt64Uint64 = transposeInt64Uint64
+
+ TransposeUint64Int8 = transposeUint64Int8
+ TransposeUint64Uint8 = transposeUint64Uint8
+ TransposeUint64Int16 = transposeUint64Int16
+ TransposeUint64Uint16 = transposeUint64Uint16
+ TransposeUint64Int32 = transposeUint64Int32
+ TransposeUint64Uint32 = transposeUint64Uint32
+ TransposeUint64Int64 = transposeUint64Int64
+ TransposeUint64Uint64 = transposeUint64Uint64
+)
diff --git a/vendor/github.com/apache/arrow/go/v14/internal/utils/transpose_ints_s390x.go.tmpl b/vendor/github.com/apache/arrow/go/v14/internal/utils/transpose_ints_s390x.go.tmpl
new file mode 100644
index 000000000..d93c8779c
--- /dev/null
+++ b/vendor/github.com/apache/arrow/go/v14/internal/utils/transpose_ints_s390x.go.tmpl
@@ -0,0 +1,34 @@
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements. See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership. The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License. You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+//go:build !noasm
+// +build !noasm
+
+package utils
+
+// if building with the 'noasm' tag, then point to the pure go implementations
+var (
+{{ $typelist := .In }}
+{{range .In}}
+{{ $src := .Type -}}
+{{ $srcName := .Name -}}
+{{ range $typelist -}}
+{{ $dest := .Type -}}
+{{ $destName := .Name -}}
+ Transpose{{$srcName}}{{$destName}} = transpose{{$srcName}}{{$destName}}
+{{end}}
+{{end}}
+)
diff --git a/vendor/github.com/apache/arrow/go/v14/internal/utils/transpose_ints_simd.go.tmpl b/vendor/github.com/apache/arrow/go/v14/internal/utils/transpose_ints_simd.go.tmpl
new file mode 100644
index 000000000..034d0e9d2
--- /dev/null
+++ b/vendor/github.com/apache/arrow/go/v14/internal/utils/transpose_ints_simd.go.tmpl
@@ -0,0 +1,42 @@
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements. See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership. The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License. You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+//go:build !noasm
+// +build !noasm
+
+package utils
+
+import (
+ "unsafe"
+)
+
+{{ $arch := .D.arch}}
+{{ $typelist := .In}}
+{{range .In}}
+{{ $src := .Type }}
+{{ $srcName := .Name }}
+{{ range $typelist}}
+{{ $dest := .Type }}
+{{ $destName := .Name }}
+
+//go:noescape
+func _transpose_{{printf "%s_%s_%s" $src $dest $arch}}(src, dest unsafe.Pointer, length int, transposeMap unsafe.Pointer)
+
+func transpose{{ $srcName }}{{ $destName }}{{ $arch }}(src []{{$src}}, dest []{{$dest}}, transposeMap []int32) {
+ _transpose_{{printf "%s_%s_%s" $src $dest $arch}}(unsafe.Pointer(&src[0]), unsafe.Pointer(&dest[0]), len(dest), unsafe.Pointer(&transposeMap[0]))
+}
+{{ end }}
+{{ end }}
diff --git a/vendor/github.com/apache/arrow/go/v14/internal/utils/transpose_ints_sse4_amd64.go b/vendor/github.com/apache/arrow/go/v14/internal/utils/transpose_ints_sse4_amd64.go
new file mode 100644
index 000000000..241ca74a7
--- /dev/null
+++ b/vendor/github.com/apache/arrow/go/v14/internal/utils/transpose_ints_sse4_amd64.go
@@ -0,0 +1,473 @@
+// Code generated by transpose_ints_simd.go.tmpl. DO NOT EDIT.
+
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements. See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership. The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License. You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+//go:build !noasm
+
+package utils
+
+import (
+ "unsafe"
+)
+
+//go:noescape
+func _transpose_int8_int8_sse4(src, dest unsafe.Pointer, length int, transposeMap unsafe.Pointer)
+
+func transposeInt8Int8sse4(src []int8, dest []int8, transposeMap []int32) {
+ _transpose_int8_int8_sse4(unsafe.Pointer(&src[0]), unsafe.Pointer(&dest[0]), len(dest), unsafe.Pointer(&transposeMap[0]))
+}
+
+//go:noescape
+func _transpose_int8_uint8_sse4(src, dest unsafe.Pointer, length int, transposeMap unsafe.Pointer)
+
+func transposeInt8Uint8sse4(src []int8, dest []uint8, transposeMap []int32) {
+ _transpose_int8_uint8_sse4(unsafe.Pointer(&src[0]), unsafe.Pointer(&dest[0]), len(dest), unsafe.Pointer(&transposeMap[0]))
+}
+
+//go:noescape
+func _transpose_int8_int16_sse4(src, dest unsafe.Pointer, length int, transposeMap unsafe.Pointer)
+
+func transposeInt8Int16sse4(src []int8, dest []int16, transposeMap []int32) {
+ _transpose_int8_int16_sse4(unsafe.Pointer(&src[0]), unsafe.Pointer(&dest[0]), len(dest), unsafe.Pointer(&transposeMap[0]))
+}
+
+//go:noescape
+func _transpose_int8_uint16_sse4(src, dest unsafe.Pointer, length int, transposeMap unsafe.Pointer)
+
+func transposeInt8Uint16sse4(src []int8, dest []uint16, transposeMap []int32) {
+ _transpose_int8_uint16_sse4(unsafe.Pointer(&src[0]), unsafe.Pointer(&dest[0]), len(dest), unsafe.Pointer(&transposeMap[0]))
+}
+
+//go:noescape
+func _transpose_int8_int32_sse4(src, dest unsafe.Pointer, length int, transposeMap unsafe.Pointer)
+
+func transposeInt8Int32sse4(src []int8, dest []int32, transposeMap []int32) {
+ _transpose_int8_int32_sse4(unsafe.Pointer(&src[0]), unsafe.Pointer(&dest[0]), len(dest), unsafe.Pointer(&transposeMap[0]))
+}
+
+//go:noescape
+func _transpose_int8_uint32_sse4(src, dest unsafe.Pointer, length int, transposeMap unsafe.Pointer)
+
+func transposeInt8Uint32sse4(src []int8, dest []uint32, transposeMap []int32) {
+ _transpose_int8_uint32_sse4(unsafe.Pointer(&src[0]), unsafe.Pointer(&dest[0]), len(dest), unsafe.Pointer(&transposeMap[0]))
+}
+
+//go:noescape
+func _transpose_int8_int64_sse4(src, dest unsafe.Pointer, length int, transposeMap unsafe.Pointer)
+
+func transposeInt8Int64sse4(src []int8, dest []int64, transposeMap []int32) {
+ _transpose_int8_int64_sse4(unsafe.Pointer(&src[0]), unsafe.Pointer(&dest[0]), len(dest), unsafe.Pointer(&transposeMap[0]))
+}
+
+//go:noescape
+func _transpose_int8_uint64_sse4(src, dest unsafe.Pointer, length int, transposeMap unsafe.Pointer)
+
+func transposeInt8Uint64sse4(src []int8, dest []uint64, transposeMap []int32) {
+ _transpose_int8_uint64_sse4(unsafe.Pointer(&src[0]), unsafe.Pointer(&dest[0]), len(dest), unsafe.Pointer(&transposeMap[0]))
+}
+
+//go:noescape
+func _transpose_uint8_int8_sse4(src, dest unsafe.Pointer, length int, transposeMap unsafe.Pointer)
+
+func transposeUint8Int8sse4(src []uint8, dest []int8, transposeMap []int32) {
+ _transpose_uint8_int8_sse4(unsafe.Pointer(&src[0]), unsafe.Pointer(&dest[0]), len(dest), unsafe.Pointer(&transposeMap[0]))
+}
+
+//go:noescape
+func _transpose_uint8_uint8_sse4(src, dest unsafe.Pointer, length int, transposeMap unsafe.Pointer)
+
+func transposeUint8Uint8sse4(src []uint8, dest []uint8, transposeMap []int32) {
+ _transpose_uint8_uint8_sse4(unsafe.Pointer(&src[0]), unsafe.Pointer(&dest[0]), len(dest), unsafe.Pointer(&transposeMap[0]))
+}
+
+//go:noescape
+func _transpose_uint8_int16_sse4(src, dest unsafe.Pointer, length int, transposeMap unsafe.Pointer)
+
+func transposeUint8Int16sse4(src []uint8, dest []int16, transposeMap []int32) {
+ _transpose_uint8_int16_sse4(unsafe.Pointer(&src[0]), unsafe.Pointer(&dest[0]), len(dest), unsafe.Pointer(&transposeMap[0]))
+}
+
+//go:noescape
+func _transpose_uint8_uint16_sse4(src, dest unsafe.Pointer, length int, transposeMap unsafe.Pointer)
+
+func transposeUint8Uint16sse4(src []uint8, dest []uint16, transposeMap []int32) {
+ _transpose_uint8_uint16_sse4(unsafe.Pointer(&src[0]), unsafe.Pointer(&dest[0]), len(dest), unsafe.Pointer(&transposeMap[0]))
+}
+
+//go:noescape
+func _transpose_uint8_int32_sse4(src, dest unsafe.Pointer, length int, transposeMap unsafe.Pointer)
+
+func transposeUint8Int32sse4(src []uint8, dest []int32, transposeMap []int32) {
+ _transpose_uint8_int32_sse4(unsafe.Pointer(&src[0]), unsafe.Pointer(&dest[0]), len(dest), unsafe.Pointer(&transposeMap[0]))
+}
+
+//go:noescape
+func _transpose_uint8_uint32_sse4(src, dest unsafe.Pointer, length int, transposeMap unsafe.Pointer)
+
+func transposeUint8Uint32sse4(src []uint8, dest []uint32, transposeMap []int32) {
+ _transpose_uint8_uint32_sse4(unsafe.Pointer(&src[0]), unsafe.Pointer(&dest[0]), len(dest), unsafe.Pointer(&transposeMap[0]))
+}
+
+//go:noescape
+func _transpose_uint8_int64_sse4(src, dest unsafe.Pointer, length int, transposeMap unsafe.Pointer)
+
+func transposeUint8Int64sse4(src []uint8, dest []int64, transposeMap []int32) {
+ _transpose_uint8_int64_sse4(unsafe.Pointer(&src[0]), unsafe.Pointer(&dest[0]), len(dest), unsafe.Pointer(&transposeMap[0]))
+}
+
+//go:noescape
+func _transpose_uint8_uint64_sse4(src, dest unsafe.Pointer, length int, transposeMap unsafe.Pointer)
+
+func transposeUint8Uint64sse4(src []uint8, dest []uint64, transposeMap []int32) {
+ _transpose_uint8_uint64_sse4(unsafe.Pointer(&src[0]), unsafe.Pointer(&dest[0]), len(dest), unsafe.Pointer(&transposeMap[0]))
+}
+
+//go:noescape
+func _transpose_int16_int8_sse4(src, dest unsafe.Pointer, length int, transposeMap unsafe.Pointer)
+
+func transposeInt16Int8sse4(src []int16, dest []int8, transposeMap []int32) {
+ _transpose_int16_int8_sse4(unsafe.Pointer(&src[0]), unsafe.Pointer(&dest[0]), len(dest), unsafe.Pointer(&transposeMap[0]))
+}
+
+//go:noescape
+func _transpose_int16_uint8_sse4(src, dest unsafe.Pointer, length int, transposeMap unsafe.Pointer)
+
+func transposeInt16Uint8sse4(src []int16, dest []uint8, transposeMap []int32) {
+ _transpose_int16_uint8_sse4(unsafe.Pointer(&src[0]), unsafe.Pointer(&dest[0]), len(dest), unsafe.Pointer(&transposeMap[0]))
+}
+
+//go:noescape
+func _transpose_int16_int16_sse4(src, dest unsafe.Pointer, length int, transposeMap unsafe.Pointer)
+
+func transposeInt16Int16sse4(src []int16, dest []int16, transposeMap []int32) {
+ _transpose_int16_int16_sse4(unsafe.Pointer(&src[0]), unsafe.Pointer(&dest[0]), len(dest), unsafe.Pointer(&transposeMap[0]))
+}
+
+//go:noescape
+func _transpose_int16_uint16_sse4(src, dest unsafe.Pointer, length int, transposeMap unsafe.Pointer)
+
+func transposeInt16Uint16sse4(src []int16, dest []uint16, transposeMap []int32) {
+ _transpose_int16_uint16_sse4(unsafe.Pointer(&src[0]), unsafe.Pointer(&dest[0]), len(dest), unsafe.Pointer(&transposeMap[0]))
+}
+
+//go:noescape
+func _transpose_int16_int32_sse4(src, dest unsafe.Pointer, length int, transposeMap unsafe.Pointer)
+
+func transposeInt16Int32sse4(src []int16, dest []int32, transposeMap []int32) {
+ _transpose_int16_int32_sse4(unsafe.Pointer(&src[0]), unsafe.Pointer(&dest[0]), len(dest), unsafe.Pointer(&transposeMap[0]))
+}
+
+//go:noescape
+func _transpose_int16_uint32_sse4(src, dest unsafe.Pointer, length int, transposeMap unsafe.Pointer)
+
+func transposeInt16Uint32sse4(src []int16, dest []uint32, transposeMap []int32) {
+ _transpose_int16_uint32_sse4(unsafe.Pointer(&src[0]), unsafe.Pointer(&dest[0]), len(dest), unsafe.Pointer(&transposeMap[0]))
+}
+
+//go:noescape
+func _transpose_int16_int64_sse4(src, dest unsafe.Pointer, length int, transposeMap unsafe.Pointer)
+
+func transposeInt16Int64sse4(src []int16, dest []int64, transposeMap []int32) {
+ _transpose_int16_int64_sse4(unsafe.Pointer(&src[0]), unsafe.Pointer(&dest[0]), len(dest), unsafe.Pointer(&transposeMap[0]))
+}
+
+//go:noescape
+func _transpose_int16_uint64_sse4(src, dest unsafe.Pointer, length int, transposeMap unsafe.Pointer)
+
+func transposeInt16Uint64sse4(src []int16, dest []uint64, transposeMap []int32) {
+ _transpose_int16_uint64_sse4(unsafe.Pointer(&src[0]), unsafe.Pointer(&dest[0]), len(dest), unsafe.Pointer(&transposeMap[0]))
+}
+
+//go:noescape
+func _transpose_uint16_int8_sse4(src, dest unsafe.Pointer, length int, transposeMap unsafe.Pointer)
+
+func transposeUint16Int8sse4(src []uint16, dest []int8, transposeMap []int32) {
+ _transpose_uint16_int8_sse4(unsafe.Pointer(&src[0]), unsafe.Pointer(&dest[0]), len(dest), unsafe.Pointer(&transposeMap[0]))
+}
+
+//go:noescape
+func _transpose_uint16_uint8_sse4(src, dest unsafe.Pointer, length int, transposeMap unsafe.Pointer)
+
+func transposeUint16Uint8sse4(src []uint16, dest []uint8, transposeMap []int32) {
+ _transpose_uint16_uint8_sse4(unsafe.Pointer(&src[0]), unsafe.Pointer(&dest[0]), len(dest), unsafe.Pointer(&transposeMap[0]))
+}
+
+//go:noescape
+func _transpose_uint16_int16_sse4(src, dest unsafe.Pointer, length int, transposeMap unsafe.Pointer)
+
+func transposeUint16Int16sse4(src []uint16, dest []int16, transposeMap []int32) {
+ _transpose_uint16_int16_sse4(unsafe.Pointer(&src[0]), unsafe.Pointer(&dest[0]), len(dest), unsafe.Pointer(&transposeMap[0]))
+}
+
+//go:noescape
+func _transpose_uint16_uint16_sse4(src, dest unsafe.Pointer, length int, transposeMap unsafe.Pointer)
+
+func transposeUint16Uint16sse4(src []uint16, dest []uint16, transposeMap []int32) {
+ _transpose_uint16_uint16_sse4(unsafe.Pointer(&src[0]), unsafe.Pointer(&dest[0]), len(dest), unsafe.Pointer(&transposeMap[0]))
+}
+
+//go:noescape
+func _transpose_uint16_int32_sse4(src, dest unsafe.Pointer, length int, transposeMap unsafe.Pointer)
+
+func transposeUint16Int32sse4(src []uint16, dest []int32, transposeMap []int32) {
+ _transpose_uint16_int32_sse4(unsafe.Pointer(&src[0]), unsafe.Pointer(&dest[0]), len(dest), unsafe.Pointer(&transposeMap[0]))
+}
+
+//go:noescape
+func _transpose_uint16_uint32_sse4(src, dest unsafe.Pointer, length int, transposeMap unsafe.Pointer)
+
+func transposeUint16Uint32sse4(src []uint16, dest []uint32, transposeMap []int32) {
+ _transpose_uint16_uint32_sse4(unsafe.Pointer(&src[0]), unsafe.Pointer(&dest[0]), len(dest), unsafe.Pointer(&transposeMap[0]))
+}
+
+//go:noescape
+func _transpose_uint16_int64_sse4(src, dest unsafe.Pointer, length int, transposeMap unsafe.Pointer)
+
+func transposeUint16Int64sse4(src []uint16, dest []int64, transposeMap []int32) {
+ _transpose_uint16_int64_sse4(unsafe.Pointer(&src[0]), unsafe.Pointer(&dest[0]), len(dest), unsafe.Pointer(&transposeMap[0]))
+}
+
+//go:noescape
+func _transpose_uint16_uint64_sse4(src, dest unsafe.Pointer, length int, transposeMap unsafe.Pointer)
+
+func transposeUint16Uint64sse4(src []uint16, dest []uint64, transposeMap []int32) {
+ _transpose_uint16_uint64_sse4(unsafe.Pointer(&src[0]), unsafe.Pointer(&dest[0]), len(dest), unsafe.Pointer(&transposeMap[0]))
+}
+
+//go:noescape
+func _transpose_int32_int8_sse4(src, dest unsafe.Pointer, length int, transposeMap unsafe.Pointer)
+
+func transposeInt32Int8sse4(src []int32, dest []int8, transposeMap []int32) {
+ _transpose_int32_int8_sse4(unsafe.Pointer(&src[0]), unsafe.Pointer(&dest[0]), len(dest), unsafe.Pointer(&transposeMap[0]))
+}
+
+//go:noescape
+func _transpose_int32_uint8_sse4(src, dest unsafe.Pointer, length int, transposeMap unsafe.Pointer)
+
+func transposeInt32Uint8sse4(src []int32, dest []uint8, transposeMap []int32) {
+ _transpose_int32_uint8_sse4(unsafe.Pointer(&src[0]), unsafe.Pointer(&dest[0]), len(dest), unsafe.Pointer(&transposeMap[0]))
+}
+
+//go:noescape
+func _transpose_int32_int16_sse4(src, dest unsafe.Pointer, length int, transposeMap unsafe.Pointer)
+
+func transposeInt32Int16sse4(src []int32, dest []int16, transposeMap []int32) {
+ _transpose_int32_int16_sse4(unsafe.Pointer(&src[0]), unsafe.Pointer(&dest[0]), len(dest), unsafe.Pointer(&transposeMap[0]))
+}
+
+//go:noescape
+func _transpose_int32_uint16_sse4(src, dest unsafe.Pointer, length int, transposeMap unsafe.Pointer)
+
+func transposeInt32Uint16sse4(src []int32, dest []uint16, transposeMap []int32) {
+ _transpose_int32_uint16_sse4(unsafe.Pointer(&src[0]), unsafe.Pointer(&dest[0]), len(dest), unsafe.Pointer(&transposeMap[0]))
+}
+
+//go:noescape
+func _transpose_int32_int32_sse4(src, dest unsafe.Pointer, length int, transposeMap unsafe.Pointer)
+
+func transposeInt32Int32sse4(src []int32, dest []int32, transposeMap []int32) {
+ _transpose_int32_int32_sse4(unsafe.Pointer(&src[0]), unsafe.Pointer(&dest[0]), len(dest), unsafe.Pointer(&transposeMap[0]))
+}
+
+//go:noescape
+func _transpose_int32_uint32_sse4(src, dest unsafe.Pointer, length int, transposeMap unsafe.Pointer)
+
+func transposeInt32Uint32sse4(src []int32, dest []uint32, transposeMap []int32) {
+ _transpose_int32_uint32_sse4(unsafe.Pointer(&src[0]), unsafe.Pointer(&dest[0]), len(dest), unsafe.Pointer(&transposeMap[0]))
+}
+
+//go:noescape
+func _transpose_int32_int64_sse4(src, dest unsafe.Pointer, length int, transposeMap unsafe.Pointer)
+
+func transposeInt32Int64sse4(src []int32, dest []int64, transposeMap []int32) {
+ _transpose_int32_int64_sse4(unsafe.Pointer(&src[0]), unsafe.Pointer(&dest[0]), len(dest), unsafe.Pointer(&transposeMap[0]))
+}
+
+//go:noescape
+func _transpose_int32_uint64_sse4(src, dest unsafe.Pointer, length int, transposeMap unsafe.Pointer)
+
+func transposeInt32Uint64sse4(src []int32, dest []uint64, transposeMap []int32) {
+ _transpose_int32_uint64_sse4(unsafe.Pointer(&src[0]), unsafe.Pointer(&dest[0]), len(dest), unsafe.Pointer(&transposeMap[0]))
+}
+
+//go:noescape
+func _transpose_uint32_int8_sse4(src, dest unsafe.Pointer, length int, transposeMap unsafe.Pointer)
+
+func transposeUint32Int8sse4(src []uint32, dest []int8, transposeMap []int32) {
+ _transpose_uint32_int8_sse4(unsafe.Pointer(&src[0]), unsafe.Pointer(&dest[0]), len(dest), unsafe.Pointer(&transposeMap[0]))
+}
+
+//go:noescape
+func _transpose_uint32_uint8_sse4(src, dest unsafe.Pointer, length int, transposeMap unsafe.Pointer)
+
+func transposeUint32Uint8sse4(src []uint32, dest []uint8, transposeMap []int32) {
+ _transpose_uint32_uint8_sse4(unsafe.Pointer(&src[0]), unsafe.Pointer(&dest[0]), len(dest), unsafe.Pointer(&transposeMap[0]))
+}
+
+//go:noescape
+func _transpose_uint32_int16_sse4(src, dest unsafe.Pointer, length int, transposeMap unsafe.Pointer)
+
+func transposeUint32Int16sse4(src []uint32, dest []int16, transposeMap []int32) {
+ _transpose_uint32_int16_sse4(unsafe.Pointer(&src[0]), unsafe.Pointer(&dest[0]), len(dest), unsafe.Pointer(&transposeMap[0]))
+}
+
+//go:noescape
+func _transpose_uint32_uint16_sse4(src, dest unsafe.Pointer, length int, transposeMap unsafe.Pointer)
+
+func transposeUint32Uint16sse4(src []uint32, dest []uint16, transposeMap []int32) {
+ _transpose_uint32_uint16_sse4(unsafe.Pointer(&src[0]), unsafe.Pointer(&dest[0]), len(dest), unsafe.Pointer(&transposeMap[0]))
+}
+
+//go:noescape
+func _transpose_uint32_int32_sse4(src, dest unsafe.Pointer, length int, transposeMap unsafe.Pointer)
+
+func transposeUint32Int32sse4(src []uint32, dest []int32, transposeMap []int32) {
+ _transpose_uint32_int32_sse4(unsafe.Pointer(&src[0]), unsafe.Pointer(&dest[0]), len(dest), unsafe.Pointer(&transposeMap[0]))
+}
+
+//go:noescape
+func _transpose_uint32_uint32_sse4(src, dest unsafe.Pointer, length int, transposeMap unsafe.Pointer)
+
+func transposeUint32Uint32sse4(src []uint32, dest []uint32, transposeMap []int32) {
+ _transpose_uint32_uint32_sse4(unsafe.Pointer(&src[0]), unsafe.Pointer(&dest[0]), len(dest), unsafe.Pointer(&transposeMap[0]))
+}
+
+//go:noescape
+func _transpose_uint32_int64_sse4(src, dest unsafe.Pointer, length int, transposeMap unsafe.Pointer)
+
+func transposeUint32Int64sse4(src []uint32, dest []int64, transposeMap []int32) {
+ _transpose_uint32_int64_sse4(unsafe.Pointer(&src[0]), unsafe.Pointer(&dest[0]), len(dest), unsafe.Pointer(&transposeMap[0]))
+}
+
+//go:noescape
+func _transpose_uint32_uint64_sse4(src, dest unsafe.Pointer, length int, transposeMap unsafe.Pointer)
+
+func transposeUint32Uint64sse4(src []uint32, dest []uint64, transposeMap []int32) {
+ _transpose_uint32_uint64_sse4(unsafe.Pointer(&src[0]), unsafe.Pointer(&dest[0]), len(dest), unsafe.Pointer(&transposeMap[0]))
+}
+
+//go:noescape
+func _transpose_int64_int8_sse4(src, dest unsafe.Pointer, length int, transposeMap unsafe.Pointer)
+
+func transposeInt64Int8sse4(src []int64, dest []int8, transposeMap []int32) {
+ _transpose_int64_int8_sse4(unsafe.Pointer(&src[0]), unsafe.Pointer(&dest[0]), len(dest), unsafe.Pointer(&transposeMap[0]))
+}
+
+//go:noescape
+func _transpose_int64_uint8_sse4(src, dest unsafe.Pointer, length int, transposeMap unsafe.Pointer)
+
+func transposeInt64Uint8sse4(src []int64, dest []uint8, transposeMap []int32) {
+ _transpose_int64_uint8_sse4(unsafe.Pointer(&src[0]), unsafe.Pointer(&dest[0]), len(dest), unsafe.Pointer(&transposeMap[0]))
+}
+
+//go:noescape
+func _transpose_int64_int16_sse4(src, dest unsafe.Pointer, length int, transposeMap unsafe.Pointer)
+
+func transposeInt64Int16sse4(src []int64, dest []int16, transposeMap []int32) {
+ _transpose_int64_int16_sse4(unsafe.Pointer(&src[0]), unsafe.Pointer(&dest[0]), len(dest), unsafe.Pointer(&transposeMap[0]))
+}
+
+//go:noescape
+func _transpose_int64_uint16_sse4(src, dest unsafe.Pointer, length int, transposeMap unsafe.Pointer)
+
+func transposeInt64Uint16sse4(src []int64, dest []uint16, transposeMap []int32) {
+ _transpose_int64_uint16_sse4(unsafe.Pointer(&src[0]), unsafe.Pointer(&dest[0]), len(dest), unsafe.Pointer(&transposeMap[0]))
+}
+
+//go:noescape
+func _transpose_int64_int32_sse4(src, dest unsafe.Pointer, length int, transposeMap unsafe.Pointer)
+
+func transposeInt64Int32sse4(src []int64, dest []int32, transposeMap []int32) {
+ _transpose_int64_int32_sse4(unsafe.Pointer(&src[0]), unsafe.Pointer(&dest[0]), len(dest), unsafe.Pointer(&transposeMap[0]))
+}
+
+//go:noescape
+func _transpose_int64_uint32_sse4(src, dest unsafe.Pointer, length int, transposeMap unsafe.Pointer)
+
+func transposeInt64Uint32sse4(src []int64, dest []uint32, transposeMap []int32) {
+ _transpose_int64_uint32_sse4(unsafe.Pointer(&src[0]), unsafe.Pointer(&dest[0]), len(dest), unsafe.Pointer(&transposeMap[0]))
+}
+
+//go:noescape
+func _transpose_int64_int64_sse4(src, dest unsafe.Pointer, length int, transposeMap unsafe.Pointer)
+
+func transposeInt64Int64sse4(src []int64, dest []int64, transposeMap []int32) {
+ _transpose_int64_int64_sse4(unsafe.Pointer(&src[0]), unsafe.Pointer(&dest[0]), len(dest), unsafe.Pointer(&transposeMap[0]))
+}
+
+//go:noescape
+func _transpose_int64_uint64_sse4(src, dest unsafe.Pointer, length int, transposeMap unsafe.Pointer)
+
+func transposeInt64Uint64sse4(src []int64, dest []uint64, transposeMap []int32) {
+ _transpose_int64_uint64_sse4(unsafe.Pointer(&src[0]), unsafe.Pointer(&dest[0]), len(dest), unsafe.Pointer(&transposeMap[0]))
+}
+
+//go:noescape
+func _transpose_uint64_int8_sse4(src, dest unsafe.Pointer, length int, transposeMap unsafe.Pointer)
+
+func transposeUint64Int8sse4(src []uint64, dest []int8, transposeMap []int32) {
+ _transpose_uint64_int8_sse4(unsafe.Pointer(&src[0]), unsafe.Pointer(&dest[0]), len(dest), unsafe.Pointer(&transposeMap[0]))
+}
+
+//go:noescape
+func _transpose_uint64_uint8_sse4(src, dest unsafe.Pointer, length int, transposeMap unsafe.Pointer)
+
+func transposeUint64Uint8sse4(src []uint64, dest []uint8, transposeMap []int32) {
+ _transpose_uint64_uint8_sse4(unsafe.Pointer(&src[0]), unsafe.Pointer(&dest[0]), len(dest), unsafe.Pointer(&transposeMap[0]))
+}
+
+//go:noescape
+func _transpose_uint64_int16_sse4(src, dest unsafe.Pointer, length int, transposeMap unsafe.Pointer)
+
+func transposeUint64Int16sse4(src []uint64, dest []int16, transposeMap []int32) {
+ _transpose_uint64_int16_sse4(unsafe.Pointer(&src[0]), unsafe.Pointer(&dest[0]), len(dest), unsafe.Pointer(&transposeMap[0]))
+}
+
+//go:noescape
+func _transpose_uint64_uint16_sse4(src, dest unsafe.Pointer, length int, transposeMap unsafe.Pointer)
+
+func transposeUint64Uint16sse4(src []uint64, dest []uint16, transposeMap []int32) {
+ _transpose_uint64_uint16_sse4(unsafe.Pointer(&src[0]), unsafe.Pointer(&dest[0]), len(dest), unsafe.Pointer(&transposeMap[0]))
+}
+
+//go:noescape
+func _transpose_uint64_int32_sse4(src, dest unsafe.Pointer, length int, transposeMap unsafe.Pointer)
+
+func transposeUint64Int32sse4(src []uint64, dest []int32, transposeMap []int32) {
+ _transpose_uint64_int32_sse4(unsafe.Pointer(&src[0]), unsafe.Pointer(&dest[0]), len(dest), unsafe.Pointer(&transposeMap[0]))
+}
+
+//go:noescape
+func _transpose_uint64_uint32_sse4(src, dest unsafe.Pointer, length int, transposeMap unsafe.Pointer)
+
+func transposeUint64Uint32sse4(src []uint64, dest []uint32, transposeMap []int32) {
+ _transpose_uint64_uint32_sse4(unsafe.Pointer(&src[0]), unsafe.Pointer(&dest[0]), len(dest), unsafe.Pointer(&transposeMap[0]))
+}
+
+//go:noescape
+func _transpose_uint64_int64_sse4(src, dest unsafe.Pointer, length int, transposeMap unsafe.Pointer)
+
+func transposeUint64Int64sse4(src []uint64, dest []int64, transposeMap []int32) {
+ _transpose_uint64_int64_sse4(unsafe.Pointer(&src[0]), unsafe.Pointer(&dest[0]), len(dest), unsafe.Pointer(&transposeMap[0]))
+}
+
+//go:noescape
+func _transpose_uint64_uint64_sse4(src, dest unsafe.Pointer, length int, transposeMap unsafe.Pointer)
+
+func transposeUint64Uint64sse4(src []uint64, dest []uint64, transposeMap []int32) {
+ _transpose_uint64_uint64_sse4(unsafe.Pointer(&src[0]), unsafe.Pointer(&dest[0]), len(dest), unsafe.Pointer(&transposeMap[0]))
+}
diff --git a/vendor/github.com/apache/arrow/go/v14/internal/utils/transpose_ints_sse4_amd64.s b/vendor/github.com/apache/arrow/go/v14/internal/utils/transpose_ints_sse4_amd64.s
new file mode 100644
index 000000000..ee5199a5a
--- /dev/null
+++ b/vendor/github.com/apache/arrow/go/v14/internal/utils/transpose_ints_sse4_amd64.s
@@ -0,0 +1,3074 @@
+//+build !noasm !appengine
+// AUTO-GENERATED BY C2GOASM -- DO NOT EDIT
+
+TEXT ·_transpose_uint8_uint8_sse4(SB), $0-32
+
+ MOVQ src+0(FP), DI
+ MOVQ dest+8(FP), SI
+ MOVQ length+16(FP), DX
+ MOVQ transposeMap+24(FP), CX
+
+ WORD $0xfa83; BYTE $0x04 // cmp edx, 4
+ JL LBB0_1
+
+LBB0_5:
+ WORD $0xd089 // mov eax, edx
+ WORD $0xb60f; BYTE $0x17 // movzx edx, byte [rdi]
+ LONG $0x9114b60f // movzx edx, byte [rcx + 4*rdx]
+ WORD $0x1688 // mov byte [rsi], dl
+ LONG $0x0157b60f // movzx edx, byte [rdi + 1]
+ LONG $0x9114b60f // movzx edx, byte [rcx + 4*rdx]
+ WORD $0x5688; BYTE $0x01 // mov byte [rsi + 1], dl
+ LONG $0x0257b60f // movzx edx, byte [rdi + 2]
+ LONG $0x9114b60f // movzx edx, byte [rcx + 4*rdx]
+ WORD $0x5688; BYTE $0x02 // mov byte [rsi + 2], dl
+ LONG $0x0357b60f // movzx edx, byte [rdi + 3]
+ LONG $0x9114b60f // movzx edx, byte [rcx + 4*rdx]
+ WORD $0x5688; BYTE $0x03 // mov byte [rsi + 3], dl
+ WORD $0x508d; BYTE $0xfc // lea edx, [rax - 4]
+ LONG $0x04c78348 // add rdi, 4
+ LONG $0x04c68348 // add rsi, 4
+ WORD $0xf883; BYTE $0x07 // cmp eax, 7
+ JG LBB0_5
+
+LBB0_1:
+ WORD $0xd285 // test edx, edx
+ JLE LBB0_4
+ WORD $0xc283; BYTE $0x01 // add edx, 1
+ WORD $0x3145; BYTE $0xc0 // xor r8d, r8d
+
+LBB0_3:
+ LONG $0x04b60f42; BYTE $0x07 // movzx eax, byte [rdi + r8]
+ LONG $0x8104b60f // movzx eax, byte [rcx + 4*rax]
+ LONG $0x06048842 // mov byte [rsi + r8], al
+ LONG $0x01c08349 // add r8, 1
+ WORD $0xc283; BYTE $0xff // add edx, -1
+ WORD $0xfa83; BYTE $0x01 // cmp edx, 1
+ JG LBB0_3
+
+LBB0_4:
+ RET
+
+TEXT ·_transpose_int8_uint8_sse4(SB), $0-32
+
+ MOVQ src+0(FP), DI
+ MOVQ dest+8(FP), SI
+ MOVQ length+16(FP), DX
+ MOVQ transposeMap+24(FP), CX
+
+ WORD $0xfa83; BYTE $0x04 // cmp edx, 4
+ JL LBB1_1
+
+LBB1_5:
+ WORD $0xd089 // mov eax, edx
+ LONG $0x17be0f48 // movsx rdx, byte [rdi]
+ LONG $0x9114b60f // movzx edx, byte [rcx + 4*rdx]
+ WORD $0x1688 // mov byte [rsi], dl
+ LONG $0x57be0f48; BYTE $0x01 // movsx rdx, byte [rdi + 1]
+ LONG $0x9114b60f // movzx edx, byte [rcx + 4*rdx]
+ WORD $0x5688; BYTE $0x01 // mov byte [rsi + 1], dl
+ LONG $0x57be0f48; BYTE $0x02 // movsx rdx, byte [rdi + 2]
+ LONG $0x9114b60f // movzx edx, byte [rcx + 4*rdx]
+ WORD $0x5688; BYTE $0x02 // mov byte [rsi + 2], dl
+ LONG $0x57be0f48; BYTE $0x03 // movsx rdx, byte [rdi + 3]
+ LONG $0x9114b60f // movzx edx, byte [rcx + 4*rdx]
+ WORD $0x5688; BYTE $0x03 // mov byte [rsi + 3], dl
+ WORD $0x508d; BYTE $0xfc // lea edx, [rax - 4]
+ LONG $0x04c78348 // add rdi, 4
+ LONG $0x04c68348 // add rsi, 4
+ WORD $0xf883; BYTE $0x07 // cmp eax, 7
+ JG LBB1_5
+
+LBB1_1:
+ WORD $0xd285 // test edx, edx
+ JLE LBB1_4
+ WORD $0xc283; BYTE $0x01 // add edx, 1
+ WORD $0x3145; BYTE $0xc0 // xor r8d, r8d
+
+LBB1_3:
+ LONG $0x04be0f4a; BYTE $0x07 // movsx rax, byte [rdi + r8]
+ LONG $0x8104b60f // movzx eax, byte [rcx + 4*rax]
+ LONG $0x06048842 // mov byte [rsi + r8], al
+ LONG $0x01c08349 // add r8, 1
+ WORD $0xc283; BYTE $0xff // add edx, -1
+ WORD $0xfa83; BYTE $0x01 // cmp edx, 1
+ JG LBB1_3
+
+LBB1_4:
+ RET
+
+TEXT ·_transpose_uint16_uint8_sse4(SB), $0-32
+
+ MOVQ src+0(FP), DI
+ MOVQ dest+8(FP), SI
+ MOVQ length+16(FP), DX
+ MOVQ transposeMap+24(FP), CX
+
+ WORD $0xfa83; BYTE $0x04 // cmp edx, 4
+ JL LBB2_1
+
+LBB2_5:
+ WORD $0xd089 // mov eax, edx
+ WORD $0xb70f; BYTE $0x17 // movzx edx, word [rdi]
+ LONG $0x9114b60f // movzx edx, byte [rcx + 4*rdx]
+ WORD $0x1688 // mov byte [rsi], dl
+ LONG $0x0257b70f // movzx edx, word [rdi + 2]
+ LONG $0x9114b60f // movzx edx, byte [rcx + 4*rdx]
+ WORD $0x5688; BYTE $0x01 // mov byte [rsi + 1], dl
+ LONG $0x0457b70f // movzx edx, word [rdi + 4]
+ LONG $0x9114b60f // movzx edx, byte [rcx + 4*rdx]
+ WORD $0x5688; BYTE $0x02 // mov byte [rsi + 2], dl
+ LONG $0x0657b70f // movzx edx, word [rdi + 6]
+ LONG $0x9114b60f // movzx edx, byte [rcx + 4*rdx]
+ WORD $0x5688; BYTE $0x03 // mov byte [rsi + 3], dl
+ WORD $0x508d; BYTE $0xfc // lea edx, [rax - 4]
+ LONG $0x08c78348 // add rdi, 8
+ LONG $0x04c68348 // add rsi, 4
+ WORD $0xf883; BYTE $0x07 // cmp eax, 7
+ JG LBB2_5
+
+LBB2_1:
+ WORD $0xd285 // test edx, edx
+ JLE LBB2_4
+ WORD $0xc283; BYTE $0x01 // add edx, 1
+ WORD $0x3145; BYTE $0xc0 // xor r8d, r8d
+
+LBB2_3:
+ LONG $0x04b70f42; BYTE $0x47 // movzx eax, word [rdi + 2*r8]
+ LONG $0x8104b60f // movzx eax, byte [rcx + 4*rax]
+ LONG $0x06048842 // mov byte [rsi + r8], al
+ LONG $0x01c08349 // add r8, 1
+ WORD $0xc283; BYTE $0xff // add edx, -1
+ WORD $0xfa83; BYTE $0x01 // cmp edx, 1
+ JG LBB2_3
+
+LBB2_4:
+ RET
+
+TEXT ·_transpose_int16_uint8_sse4(SB), $0-32
+
+ MOVQ src+0(FP), DI
+ MOVQ dest+8(FP), SI
+ MOVQ length+16(FP), DX
+ MOVQ transposeMap+24(FP), CX
+
+ WORD $0xfa83; BYTE $0x04 // cmp edx, 4
+ JL LBB3_1
+
+LBB3_5:
+ WORD $0xd089 // mov eax, edx
+ LONG $0x17bf0f48 // movsx rdx, word [rdi]
+ LONG $0x9114b60f // movzx edx, byte [rcx + 4*rdx]
+ WORD $0x1688 // mov byte [rsi], dl
+ LONG $0x57bf0f48; BYTE $0x02 // movsx rdx, word [rdi + 2]
+ LONG $0x9114b60f // movzx edx, byte [rcx + 4*rdx]
+ WORD $0x5688; BYTE $0x01 // mov byte [rsi + 1], dl
+ LONG $0x57bf0f48; BYTE $0x04 // movsx rdx, word [rdi + 4]
+ LONG $0x9114b60f // movzx edx, byte [rcx + 4*rdx]
+ WORD $0x5688; BYTE $0x02 // mov byte [rsi + 2], dl
+ LONG $0x57bf0f48; BYTE $0x06 // movsx rdx, word [rdi + 6]
+ LONG $0x9114b60f // movzx edx, byte [rcx + 4*rdx]
+ WORD $0x5688; BYTE $0x03 // mov byte [rsi + 3], dl
+ WORD $0x508d; BYTE $0xfc // lea edx, [rax - 4]
+ LONG $0x08c78348 // add rdi, 8
+ LONG $0x04c68348 // add rsi, 4
+ WORD $0xf883; BYTE $0x07 // cmp eax, 7
+ JG LBB3_5
+
+LBB3_1:
+ WORD $0xd285 // test edx, edx
+ JLE LBB3_4
+ WORD $0xc283; BYTE $0x01 // add edx, 1
+ WORD $0x3145; BYTE $0xc0 // xor r8d, r8d
+
+LBB3_3:
+ LONG $0x04bf0f4a; BYTE $0x47 // movsx rax, word [rdi + 2*r8]
+ LONG $0x8104b60f // movzx eax, byte [rcx + 4*rax]
+ LONG $0x06048842 // mov byte [rsi + r8], al
+ LONG $0x01c08349 // add r8, 1
+ WORD $0xc283; BYTE $0xff // add edx, -1
+ WORD $0xfa83; BYTE $0x01 // cmp edx, 1
+ JG LBB3_3
+
+LBB3_4:
+ RET
+
+TEXT ·_transpose_uint32_uint8_sse4(SB), $0-32
+
+ MOVQ src+0(FP), DI
+ MOVQ dest+8(FP), SI
+ MOVQ length+16(FP), DX
+ MOVQ transposeMap+24(FP), CX
+
+ WORD $0xfa83; BYTE $0x04 // cmp edx, 4
+ JL LBB4_1
+
+LBB4_5:
+ WORD $0xd089 // mov eax, edx
+ WORD $0x178b // mov edx, dword [rdi]
+ LONG $0x9114b60f // movzx edx, byte [rcx + 4*rdx]
+ WORD $0x1688 // mov byte [rsi], dl
+ WORD $0x578b; BYTE $0x04 // mov edx, dword [rdi + 4]
+ LONG $0x9114b60f // movzx edx, byte [rcx + 4*rdx]
+ WORD $0x5688; BYTE $0x01 // mov byte [rsi + 1], dl
+ WORD $0x578b; BYTE $0x08 // mov edx, dword [rdi + 8]
+ LONG $0x9114b60f // movzx edx, byte [rcx + 4*rdx]
+ WORD $0x5688; BYTE $0x02 // mov byte [rsi + 2], dl
+ WORD $0x578b; BYTE $0x0c // mov edx, dword [rdi + 12]
+ LONG $0x9114b60f // movzx edx, byte [rcx + 4*rdx]
+ WORD $0x5688; BYTE $0x03 // mov byte [rsi + 3], dl
+ WORD $0x508d; BYTE $0xfc // lea edx, [rax - 4]
+ LONG $0x10c78348 // add rdi, 16
+ LONG $0x04c68348 // add rsi, 4
+ WORD $0xf883; BYTE $0x07 // cmp eax, 7
+ JG LBB4_5
+
+LBB4_1:
+ WORD $0xd285 // test edx, edx
+ JLE LBB4_4
+ WORD $0xc283; BYTE $0x01 // add edx, 1
+ WORD $0x3145; BYTE $0xc0 // xor r8d, r8d
+
+LBB4_3:
+ LONG $0x87048b42 // mov eax, dword [rdi + 4*r8]
+ LONG $0x8104b60f // movzx eax, byte [rcx + 4*rax]
+ LONG $0x06048842 // mov byte [rsi + r8], al
+ LONG $0x01c08349 // add r8, 1
+ WORD $0xc283; BYTE $0xff // add edx, -1
+ WORD $0xfa83; BYTE $0x01 // cmp edx, 1
+ JG LBB4_3
+
+LBB4_4:
+ RET
+
+TEXT ·_transpose_int32_uint8_sse4(SB), $0-32
+
+ MOVQ src+0(FP), DI
+ MOVQ dest+8(FP), SI
+ MOVQ length+16(FP), DX
+ MOVQ transposeMap+24(FP), CX
+
+ WORD $0xfa83; BYTE $0x04 // cmp edx, 4
+ JL LBB5_1
+
+LBB5_5:
+ WORD $0xd089 // mov eax, edx
+ WORD $0x6348; BYTE $0x17 // movsxd rdx, dword [rdi]
+ LONG $0x9114b60f // movzx edx, byte [rcx + 4*rdx]
+ WORD $0x1688 // mov byte [rsi], dl
+ LONG $0x04576348 // movsxd rdx, dword [rdi + 4]
+ LONG $0x9114b60f // movzx edx, byte [rcx + 4*rdx]
+ WORD $0x5688; BYTE $0x01 // mov byte [rsi + 1], dl
+ LONG $0x08576348 // movsxd rdx, dword [rdi + 8]
+ LONG $0x9114b60f // movzx edx, byte [rcx + 4*rdx]
+ WORD $0x5688; BYTE $0x02 // mov byte [rsi + 2], dl
+ LONG $0x0c576348 // movsxd rdx, dword [rdi + 12]
+ LONG $0x9114b60f // movzx edx, byte [rcx + 4*rdx]
+ WORD $0x5688; BYTE $0x03 // mov byte [rsi + 3], dl
+ WORD $0x508d; BYTE $0xfc // lea edx, [rax - 4]
+ LONG $0x10c78348 // add rdi, 16
+ LONG $0x04c68348 // add rsi, 4
+ WORD $0xf883; BYTE $0x07 // cmp eax, 7
+ JG LBB5_5
+
+LBB5_1:
+ WORD $0xd285 // test edx, edx
+ JLE LBB5_4
+ WORD $0xc283; BYTE $0x01 // add edx, 1
+ WORD $0x3145; BYTE $0xc0 // xor r8d, r8d
+
+LBB5_3:
+ LONG $0x8704634a // movsxd rax, dword [rdi + 4*r8]
+ LONG $0x8104b60f // movzx eax, byte [rcx + 4*rax]
+ LONG $0x06048842 // mov byte [rsi + r8], al
+ LONG $0x01c08349 // add r8, 1
+ WORD $0xc283; BYTE $0xff // add edx, -1
+ WORD $0xfa83; BYTE $0x01 // cmp edx, 1
+ JG LBB5_3
+
+LBB5_4:
+ RET
+
+TEXT ·_transpose_uint64_uint8_sse4(SB), $0-32
+
+ MOVQ src+0(FP), DI
+ MOVQ dest+8(FP), SI
+ MOVQ length+16(FP), DX
+ MOVQ transposeMap+24(FP), CX
+
+ WORD $0xfa83; BYTE $0x04 // cmp edx, 4
+ JL LBB6_1
+
+LBB6_5:
+ WORD $0xd089 // mov eax, edx
+ WORD $0x8b48; BYTE $0x17 // mov rdx, qword [rdi]
+ LONG $0x9114b60f // movzx edx, byte [rcx + 4*rdx]
+ WORD $0x1688 // mov byte [rsi], dl
+ LONG $0x08578b48 // mov rdx, qword [rdi + 8]
+ LONG $0x9114b60f // movzx edx, byte [rcx + 4*rdx]
+ WORD $0x5688; BYTE $0x01 // mov byte [rsi + 1], dl
+ LONG $0x10578b48 // mov rdx, qword [rdi + 16]
+ LONG $0x9114b60f // movzx edx, byte [rcx + 4*rdx]
+ WORD $0x5688; BYTE $0x02 // mov byte [rsi + 2], dl
+ LONG $0x18578b48 // mov rdx, qword [rdi + 24]
+ LONG $0x9114b60f // movzx edx, byte [rcx + 4*rdx]
+ WORD $0x5688; BYTE $0x03 // mov byte [rsi + 3], dl
+ WORD $0x508d; BYTE $0xfc // lea edx, [rax - 4]
+ LONG $0x20c78348 // add rdi, 32
+ LONG $0x04c68348 // add rsi, 4
+ WORD $0xf883; BYTE $0x07 // cmp eax, 7
+ JG LBB6_5
+
+LBB6_1:
+ WORD $0xd285 // test edx, edx
+ JLE LBB6_4
+ WORD $0xc283; BYTE $0x01 // add edx, 1
+ WORD $0x3145; BYTE $0xc0 // xor r8d, r8d
+
+LBB6_3:
+ LONG $0xc7048b4a // mov rax, qword [rdi + 8*r8]
+ LONG $0x8104b60f // movzx eax, byte [rcx + 4*rax]
+ LONG $0x06048842 // mov byte [rsi + r8], al
+ LONG $0x01c08349 // add r8, 1
+ WORD $0xc283; BYTE $0xff // add edx, -1
+ WORD $0xfa83; BYTE $0x01 // cmp edx, 1
+ JG LBB6_3
+
+LBB6_4:
+ RET
+
+TEXT ·_transpose_int64_uint8_sse4(SB), $0-32
+
+ MOVQ src+0(FP), DI
+ MOVQ dest+8(FP), SI
+ MOVQ length+16(FP), DX
+ MOVQ transposeMap+24(FP), CX
+
+ WORD $0xfa83; BYTE $0x04 // cmp edx, 4
+ JL LBB7_1
+
+LBB7_5:
+ WORD $0xd089 // mov eax, edx
+ WORD $0x8b48; BYTE $0x17 // mov rdx, qword [rdi]
+ LONG $0x9114b60f // movzx edx, byte [rcx + 4*rdx]
+ WORD $0x1688 // mov byte [rsi], dl
+ LONG $0x08578b48 // mov rdx, qword [rdi + 8]
+ LONG $0x9114b60f // movzx edx, byte [rcx + 4*rdx]
+ WORD $0x5688; BYTE $0x01 // mov byte [rsi + 1], dl
+ LONG $0x10578b48 // mov rdx, qword [rdi + 16]
+ LONG $0x9114b60f // movzx edx, byte [rcx + 4*rdx]
+ WORD $0x5688; BYTE $0x02 // mov byte [rsi + 2], dl
+ LONG $0x18578b48 // mov rdx, qword [rdi + 24]
+ LONG $0x9114b60f // movzx edx, byte [rcx + 4*rdx]
+ WORD $0x5688; BYTE $0x03 // mov byte [rsi + 3], dl
+ WORD $0x508d; BYTE $0xfc // lea edx, [rax - 4]
+ LONG $0x20c78348 // add rdi, 32
+ LONG $0x04c68348 // add rsi, 4
+ WORD $0xf883; BYTE $0x07 // cmp eax, 7
+ JG LBB7_5
+
+LBB7_1:
+ WORD $0xd285 // test edx, edx
+ JLE LBB7_4
+ WORD $0xc283; BYTE $0x01 // add edx, 1
+ WORD $0x3145; BYTE $0xc0 // xor r8d, r8d
+
+LBB7_3:
+ LONG $0xc7048b4a // mov rax, qword [rdi + 8*r8]
+ LONG $0x8104b60f // movzx eax, byte [rcx + 4*rax]
+ LONG $0x06048842 // mov byte [rsi + r8], al
+ LONG $0x01c08349 // add r8, 1
+ WORD $0xc283; BYTE $0xff // add edx, -1
+ WORD $0xfa83; BYTE $0x01 // cmp edx, 1
+ JG LBB7_3
+
+LBB7_4:
+ RET
+
+TEXT ·_transpose_uint8_int8_sse4(SB), $0-32
+
+ MOVQ src+0(FP), DI
+ MOVQ dest+8(FP), SI
+ MOVQ length+16(FP), DX
+ MOVQ transposeMap+24(FP), CX
+
+ WORD $0xfa83; BYTE $0x04 // cmp edx, 4
+ JL LBB8_1
+
+LBB8_5:
+ WORD $0xd089 // mov eax, edx
+ WORD $0xb60f; BYTE $0x17 // movzx edx, byte [rdi]
+ LONG $0x9114b60f // movzx edx, byte [rcx + 4*rdx]
+ WORD $0x1688 // mov byte [rsi], dl
+ LONG $0x0157b60f // movzx edx, byte [rdi + 1]
+ LONG $0x9114b60f // movzx edx, byte [rcx + 4*rdx]
+ WORD $0x5688; BYTE $0x01 // mov byte [rsi + 1], dl
+ LONG $0x0257b60f // movzx edx, byte [rdi + 2]
+ LONG $0x9114b60f // movzx edx, byte [rcx + 4*rdx]
+ WORD $0x5688; BYTE $0x02 // mov byte [rsi + 2], dl
+ LONG $0x0357b60f // movzx edx, byte [rdi + 3]
+ LONG $0x9114b60f // movzx edx, byte [rcx + 4*rdx]
+ WORD $0x5688; BYTE $0x03 // mov byte [rsi + 3], dl
+ WORD $0x508d; BYTE $0xfc // lea edx, [rax - 4]
+ LONG $0x04c78348 // add rdi, 4
+ LONG $0x04c68348 // add rsi, 4
+ WORD $0xf883; BYTE $0x07 // cmp eax, 7
+ JG LBB8_5
+
+LBB8_1:
+ WORD $0xd285 // test edx, edx
+ JLE LBB8_4
+ WORD $0xc283; BYTE $0x01 // add edx, 1
+ WORD $0x3145; BYTE $0xc0 // xor r8d, r8d
+
+LBB8_3:
+ LONG $0x04b60f42; BYTE $0x07 // movzx eax, byte [rdi + r8]
+ LONG $0x8104b60f // movzx eax, byte [rcx + 4*rax]
+ LONG $0x06048842 // mov byte [rsi + r8], al
+ LONG $0x01c08349 // add r8, 1
+ WORD $0xc283; BYTE $0xff // add edx, -1
+ WORD $0xfa83; BYTE $0x01 // cmp edx, 1
+ JG LBB8_3
+
+LBB8_4:
+ RET
+
+TEXT ·_transpose_int8_int8_sse4(SB), $0-32
+
+ MOVQ src+0(FP), DI
+ MOVQ dest+8(FP), SI
+ MOVQ length+16(FP), DX
+ MOVQ transposeMap+24(FP), CX
+
+ WORD $0xfa83; BYTE $0x04 // cmp edx, 4
+ JL LBB9_1
+
+LBB9_5:
+ WORD $0xd089 // mov eax, edx
+ LONG $0x17be0f48 // movsx rdx, byte [rdi]
+ LONG $0x9114b60f // movzx edx, byte [rcx + 4*rdx]
+ WORD $0x1688 // mov byte [rsi], dl
+ LONG $0x57be0f48; BYTE $0x01 // movsx rdx, byte [rdi + 1]
+ LONG $0x9114b60f // movzx edx, byte [rcx + 4*rdx]
+ WORD $0x5688; BYTE $0x01 // mov byte [rsi + 1], dl
+ LONG $0x57be0f48; BYTE $0x02 // movsx rdx, byte [rdi + 2]
+ LONG $0x9114b60f // movzx edx, byte [rcx + 4*rdx]
+ WORD $0x5688; BYTE $0x02 // mov byte [rsi + 2], dl
+ LONG $0x57be0f48; BYTE $0x03 // movsx rdx, byte [rdi + 3]
+ LONG $0x9114b60f // movzx edx, byte [rcx + 4*rdx]
+ WORD $0x5688; BYTE $0x03 // mov byte [rsi + 3], dl
+ WORD $0x508d; BYTE $0xfc // lea edx, [rax - 4]
+ LONG $0x04c78348 // add rdi, 4
+ LONG $0x04c68348 // add rsi, 4
+ WORD $0xf883; BYTE $0x07 // cmp eax, 7
+ JG LBB9_5
+
+LBB9_1:
+ WORD $0xd285 // test edx, edx
+ JLE LBB9_4
+ WORD $0xc283; BYTE $0x01 // add edx, 1
+ WORD $0x3145; BYTE $0xc0 // xor r8d, r8d
+
+LBB9_3:
+ LONG $0x04be0f4a; BYTE $0x07 // movsx rax, byte [rdi + r8]
+ LONG $0x8104b60f // movzx eax, byte [rcx + 4*rax]
+ LONG $0x06048842 // mov byte [rsi + r8], al
+ LONG $0x01c08349 // add r8, 1
+ WORD $0xc283; BYTE $0xff // add edx, -1
+ WORD $0xfa83; BYTE $0x01 // cmp edx, 1
+ JG LBB9_3
+
+LBB9_4:
+ RET
+
+TEXT ·_transpose_uint16_int8_sse4(SB), $0-32
+
+ MOVQ src+0(FP), DI
+ MOVQ dest+8(FP), SI
+ MOVQ length+16(FP), DX
+ MOVQ transposeMap+24(FP), CX
+
+ WORD $0xfa83; BYTE $0x04 // cmp edx, 4
+ JL LBB10_1
+
+LBB10_5:
+ WORD $0xd089 // mov eax, edx
+ WORD $0xb70f; BYTE $0x17 // movzx edx, word [rdi]
+ LONG $0x9114b60f // movzx edx, byte [rcx + 4*rdx]
+ WORD $0x1688 // mov byte [rsi], dl
+ LONG $0x0257b70f // movzx edx, word [rdi + 2]
+ LONG $0x9114b60f // movzx edx, byte [rcx + 4*rdx]
+ WORD $0x5688; BYTE $0x01 // mov byte [rsi + 1], dl
+ LONG $0x0457b70f // movzx edx, word [rdi + 4]
+ LONG $0x9114b60f // movzx edx, byte [rcx + 4*rdx]
+ WORD $0x5688; BYTE $0x02 // mov byte [rsi + 2], dl
+ LONG $0x0657b70f // movzx edx, word [rdi + 6]
+ LONG $0x9114b60f // movzx edx, byte [rcx + 4*rdx]
+ WORD $0x5688; BYTE $0x03 // mov byte [rsi + 3], dl
+ WORD $0x508d; BYTE $0xfc // lea edx, [rax - 4]
+ LONG $0x08c78348 // add rdi, 8
+ LONG $0x04c68348 // add rsi, 4
+ WORD $0xf883; BYTE $0x07 // cmp eax, 7
+ JG LBB10_5
+
+LBB10_1:
+ WORD $0xd285 // test edx, edx
+ JLE LBB10_4
+ WORD $0xc283; BYTE $0x01 // add edx, 1
+ WORD $0x3145; BYTE $0xc0 // xor r8d, r8d
+
+LBB10_3:
+ LONG $0x04b70f42; BYTE $0x47 // movzx eax, word [rdi + 2*r8]
+ LONG $0x8104b60f // movzx eax, byte [rcx + 4*rax]
+ LONG $0x06048842 // mov byte [rsi + r8], al
+ LONG $0x01c08349 // add r8, 1
+ WORD $0xc283; BYTE $0xff // add edx, -1
+ WORD $0xfa83; BYTE $0x01 // cmp edx, 1
+ JG LBB10_3
+
+LBB10_4:
+ RET
+
+TEXT ·_transpose_int16_int8_sse4(SB), $0-32
+
+ MOVQ src+0(FP), DI
+ MOVQ dest+8(FP), SI
+ MOVQ length+16(FP), DX
+ MOVQ transposeMap+24(FP), CX
+
+ WORD $0xfa83; BYTE $0x04 // cmp edx, 4
+ JL LBB11_1
+
+LBB11_5:
+ WORD $0xd089 // mov eax, edx
+ LONG $0x17bf0f48 // movsx rdx, word [rdi]
+ LONG $0x9114b60f // movzx edx, byte [rcx + 4*rdx]
+ WORD $0x1688 // mov byte [rsi], dl
+ LONG $0x57bf0f48; BYTE $0x02 // movsx rdx, word [rdi + 2]
+ LONG $0x9114b60f // movzx edx, byte [rcx + 4*rdx]
+ WORD $0x5688; BYTE $0x01 // mov byte [rsi + 1], dl
+ LONG $0x57bf0f48; BYTE $0x04 // movsx rdx, word [rdi + 4]
+ LONG $0x9114b60f // movzx edx, byte [rcx + 4*rdx]
+ WORD $0x5688; BYTE $0x02 // mov byte [rsi + 2], dl
+ LONG $0x57bf0f48; BYTE $0x06 // movsx rdx, word [rdi + 6]
+ LONG $0x9114b60f // movzx edx, byte [rcx + 4*rdx]
+ WORD $0x5688; BYTE $0x03 // mov byte [rsi + 3], dl
+ WORD $0x508d; BYTE $0xfc // lea edx, [rax - 4]
+ LONG $0x08c78348 // add rdi, 8
+ LONG $0x04c68348 // add rsi, 4
+ WORD $0xf883; BYTE $0x07 // cmp eax, 7
+ JG LBB11_5
+
+LBB11_1:
+ WORD $0xd285 // test edx, edx
+ JLE LBB11_4
+ WORD $0xc283; BYTE $0x01 // add edx, 1
+ WORD $0x3145; BYTE $0xc0 // xor r8d, r8d
+
+LBB11_3:
+ LONG $0x04bf0f4a; BYTE $0x47 // movsx rax, word [rdi + 2*r8]
+ LONG $0x8104b60f // movzx eax, byte [rcx + 4*rax]
+ LONG $0x06048842 // mov byte [rsi + r8], al
+ LONG $0x01c08349 // add r8, 1
+ WORD $0xc283; BYTE $0xff // add edx, -1
+ WORD $0xfa83; BYTE $0x01 // cmp edx, 1
+ JG LBB11_3
+
+LBB11_4:
+ RET
+
+TEXT ·_transpose_uint32_int8_sse4(SB), $0-32
+
+ MOVQ src+0(FP), DI
+ MOVQ dest+8(FP), SI
+ MOVQ length+16(FP), DX
+ MOVQ transposeMap+24(FP), CX
+
+ WORD $0xfa83; BYTE $0x04 // cmp edx, 4
+ JL LBB12_1
+
+LBB12_5:
+ WORD $0xd089 // mov eax, edx
+ WORD $0x178b // mov edx, dword [rdi]
+ LONG $0x9114b60f // movzx edx, byte [rcx + 4*rdx]
+ WORD $0x1688 // mov byte [rsi], dl
+ WORD $0x578b; BYTE $0x04 // mov edx, dword [rdi + 4]
+ LONG $0x9114b60f // movzx edx, byte [rcx + 4*rdx]
+ WORD $0x5688; BYTE $0x01 // mov byte [rsi + 1], dl
+ WORD $0x578b; BYTE $0x08 // mov edx, dword [rdi + 8]
+ LONG $0x9114b60f // movzx edx, byte [rcx + 4*rdx]
+ WORD $0x5688; BYTE $0x02 // mov byte [rsi + 2], dl
+ WORD $0x578b; BYTE $0x0c // mov edx, dword [rdi + 12]
+ LONG $0x9114b60f // movzx edx, byte [rcx + 4*rdx]
+ WORD $0x5688; BYTE $0x03 // mov byte [rsi + 3], dl
+ WORD $0x508d; BYTE $0xfc // lea edx, [rax - 4]
+ LONG $0x10c78348 // add rdi, 16
+ LONG $0x04c68348 // add rsi, 4
+ WORD $0xf883; BYTE $0x07 // cmp eax, 7
+ JG LBB12_5
+
+LBB12_1:
+ WORD $0xd285 // test edx, edx
+ JLE LBB12_4
+ WORD $0xc283; BYTE $0x01 // add edx, 1
+ WORD $0x3145; BYTE $0xc0 // xor r8d, r8d
+
+LBB12_3:
+ LONG $0x87048b42 // mov eax, dword [rdi + 4*r8]
+ LONG $0x8104b60f // movzx eax, byte [rcx + 4*rax]
+ LONG $0x06048842 // mov byte [rsi + r8], al
+ LONG $0x01c08349 // add r8, 1
+ WORD $0xc283; BYTE $0xff // add edx, -1
+ WORD $0xfa83; BYTE $0x01 // cmp edx, 1
+ JG LBB12_3
+
+LBB12_4:
+ RET
+
+TEXT ·_transpose_int32_int8_sse4(SB), $0-32
+
+ MOVQ src+0(FP), DI
+ MOVQ dest+8(FP), SI
+ MOVQ length+16(FP), DX
+ MOVQ transposeMap+24(FP), CX
+
+ WORD $0xfa83; BYTE $0x04 // cmp edx, 4
+ JL LBB13_1
+
+LBB13_5:
+ WORD $0xd089 // mov eax, edx
+ WORD $0x6348; BYTE $0x17 // movsxd rdx, dword [rdi]
+ LONG $0x9114b60f // movzx edx, byte [rcx + 4*rdx]
+ WORD $0x1688 // mov byte [rsi], dl
+ LONG $0x04576348 // movsxd rdx, dword [rdi + 4]
+ LONG $0x9114b60f // movzx edx, byte [rcx + 4*rdx]
+ WORD $0x5688; BYTE $0x01 // mov byte [rsi + 1], dl
+ LONG $0x08576348 // movsxd rdx, dword [rdi + 8]
+ LONG $0x9114b60f // movzx edx, byte [rcx + 4*rdx]
+ WORD $0x5688; BYTE $0x02 // mov byte [rsi + 2], dl
+ LONG $0x0c576348 // movsxd rdx, dword [rdi + 12]
+ LONG $0x9114b60f // movzx edx, byte [rcx + 4*rdx]
+ WORD $0x5688; BYTE $0x03 // mov byte [rsi + 3], dl
+ WORD $0x508d; BYTE $0xfc // lea edx, [rax - 4]
+ LONG $0x10c78348 // add rdi, 16
+ LONG $0x04c68348 // add rsi, 4
+ WORD $0xf883; BYTE $0x07 // cmp eax, 7
+ JG LBB13_5
+
+LBB13_1:
+ WORD $0xd285 // test edx, edx
+ JLE LBB13_4
+ WORD $0xc283; BYTE $0x01 // add edx, 1
+ WORD $0x3145; BYTE $0xc0 // xor r8d, r8d
+
+LBB13_3:
+ LONG $0x8704634a // movsxd rax, dword [rdi + 4*r8]
+ LONG $0x8104b60f // movzx eax, byte [rcx + 4*rax]
+ LONG $0x06048842 // mov byte [rsi + r8], al
+ LONG $0x01c08349 // add r8, 1
+ WORD $0xc283; BYTE $0xff // add edx, -1
+ WORD $0xfa83; BYTE $0x01 // cmp edx, 1
+ JG LBB13_3
+
+LBB13_4:
+ RET
+
+TEXT ·_transpose_uint64_int8_sse4(SB), $0-32
+
+ MOVQ src+0(FP), DI
+ MOVQ dest+8(FP), SI
+ MOVQ length+16(FP), DX
+ MOVQ transposeMap+24(FP), CX
+
+ WORD $0xfa83; BYTE $0x04 // cmp edx, 4
+ JL LBB14_1
+
+LBB14_5:
+ WORD $0xd089 // mov eax, edx
+ WORD $0x8b48; BYTE $0x17 // mov rdx, qword [rdi]
+ LONG $0x9114b60f // movzx edx, byte [rcx + 4*rdx]
+ WORD $0x1688 // mov byte [rsi], dl
+ LONG $0x08578b48 // mov rdx, qword [rdi + 8]
+ LONG $0x9114b60f // movzx edx, byte [rcx + 4*rdx]
+ WORD $0x5688; BYTE $0x01 // mov byte [rsi + 1], dl
+ LONG $0x10578b48 // mov rdx, qword [rdi + 16]
+ LONG $0x9114b60f // movzx edx, byte [rcx + 4*rdx]
+ WORD $0x5688; BYTE $0x02 // mov byte [rsi + 2], dl
+ LONG $0x18578b48 // mov rdx, qword [rdi + 24]
+ LONG $0x9114b60f // movzx edx, byte [rcx + 4*rdx]
+ WORD $0x5688; BYTE $0x03 // mov byte [rsi + 3], dl
+ WORD $0x508d; BYTE $0xfc // lea edx, [rax - 4]
+ LONG $0x20c78348 // add rdi, 32
+ LONG $0x04c68348 // add rsi, 4
+ WORD $0xf883; BYTE $0x07 // cmp eax, 7
+ JG LBB14_5
+
+LBB14_1:
+ WORD $0xd285 // test edx, edx
+ JLE LBB14_4
+ WORD $0xc283; BYTE $0x01 // add edx, 1
+ WORD $0x3145; BYTE $0xc0 // xor r8d, r8d
+
+LBB14_3:
+ LONG $0xc7048b4a // mov rax, qword [rdi + 8*r8]
+ LONG $0x8104b60f // movzx eax, byte [rcx + 4*rax]
+ LONG $0x06048842 // mov byte [rsi + r8], al
+ LONG $0x01c08349 // add r8, 1
+ WORD $0xc283; BYTE $0xff // add edx, -1
+ WORD $0xfa83; BYTE $0x01 // cmp edx, 1
+ JG LBB14_3
+
+LBB14_4:
+ RET
+
+TEXT ·_transpose_int64_int8_sse4(SB), $0-32
+
+ MOVQ src+0(FP), DI
+ MOVQ dest+8(FP), SI
+ MOVQ length+16(FP), DX
+ MOVQ transposeMap+24(FP), CX
+
+ WORD $0xfa83; BYTE $0x04 // cmp edx, 4
+ JL LBB15_1
+
+LBB15_5:
+ WORD $0xd089 // mov eax, edx
+ WORD $0x8b48; BYTE $0x17 // mov rdx, qword [rdi]
+ LONG $0x9114b60f // movzx edx, byte [rcx + 4*rdx]
+ WORD $0x1688 // mov byte [rsi], dl
+ LONG $0x08578b48 // mov rdx, qword [rdi + 8]
+ LONG $0x9114b60f // movzx edx, byte [rcx + 4*rdx]
+ WORD $0x5688; BYTE $0x01 // mov byte [rsi + 1], dl
+ LONG $0x10578b48 // mov rdx, qword [rdi + 16]
+ LONG $0x9114b60f // movzx edx, byte [rcx + 4*rdx]
+ WORD $0x5688; BYTE $0x02 // mov byte [rsi + 2], dl
+ LONG $0x18578b48 // mov rdx, qword [rdi + 24]
+ LONG $0x9114b60f // movzx edx, byte [rcx + 4*rdx]
+ WORD $0x5688; BYTE $0x03 // mov byte [rsi + 3], dl
+ WORD $0x508d; BYTE $0xfc // lea edx, [rax - 4]
+ LONG $0x20c78348 // add rdi, 32
+ LONG $0x04c68348 // add rsi, 4
+ WORD $0xf883; BYTE $0x07 // cmp eax, 7
+ JG LBB15_5
+
+LBB15_1:
+ WORD $0xd285 // test edx, edx
+ JLE LBB15_4
+ WORD $0xc283; BYTE $0x01 // add edx, 1
+ WORD $0x3145; BYTE $0xc0 // xor r8d, r8d
+
+LBB15_3:
+ LONG $0xc7048b4a // mov rax, qword [rdi + 8*r8]
+ LONG $0x8104b60f // movzx eax, byte [rcx + 4*rax]
+ LONG $0x06048842 // mov byte [rsi + r8], al
+ LONG $0x01c08349 // add r8, 1
+ WORD $0xc283; BYTE $0xff // add edx, -1
+ WORD $0xfa83; BYTE $0x01 // cmp edx, 1
+ JG LBB15_3
+
+LBB15_4:
+ RET
+
+TEXT ·_transpose_uint8_uint16_sse4(SB), $0-32
+
+ MOVQ src+0(FP), DI
+ MOVQ dest+8(FP), SI
+ MOVQ length+16(FP), DX
+ MOVQ transposeMap+24(FP), CX
+
+ WORD $0xfa83; BYTE $0x04 // cmp edx, 4
+ JL LBB16_1
+
+LBB16_5:
+ WORD $0xd089 // mov eax, edx
+ WORD $0xb60f; BYTE $0x17 // movzx edx, byte [rdi]
+ LONG $0x9114b70f // movzx edx, word [rcx + 4*rdx]
+ WORD $0x8966; BYTE $0x16 // mov word [rsi], dx
+ LONG $0x0157b60f // movzx edx, byte [rdi + 1]
+ LONG $0x9114b70f // movzx edx, word [rcx + 4*rdx]
+ LONG $0x02568966 // mov word [rsi + 2], dx
+ LONG $0x0257b60f // movzx edx, byte [rdi + 2]
+ LONG $0x9114b70f // movzx edx, word [rcx + 4*rdx]
+ LONG $0x04568966 // mov word [rsi + 4], dx
+ LONG $0x0357b60f // movzx edx, byte [rdi + 3]
+ LONG $0x9114b70f // movzx edx, word [rcx + 4*rdx]
+ LONG $0x06568966 // mov word [rsi + 6], dx
+ WORD $0x508d; BYTE $0xfc // lea edx, [rax - 4]
+ LONG $0x04c78348 // add rdi, 4
+ LONG $0x08c68348 // add rsi, 8
+ WORD $0xf883; BYTE $0x07 // cmp eax, 7
+ JG LBB16_5
+
+LBB16_1:
+ WORD $0xd285 // test edx, edx
+ JLE LBB16_4
+ WORD $0xc283; BYTE $0x01 // add edx, 1
+ WORD $0x3145; BYTE $0xc0 // xor r8d, r8d
+
+LBB16_3:
+ LONG $0x04b60f42; BYTE $0x07 // movzx eax, byte [rdi + r8]
+ LONG $0x8104b70f // movzx eax, word [rcx + 4*rax]
+ LONG $0x04894266; BYTE $0x46 // mov word [rsi + 2*r8], ax
+ LONG $0x01c08349 // add r8, 1
+ WORD $0xc283; BYTE $0xff // add edx, -1
+ WORD $0xfa83; BYTE $0x01 // cmp edx, 1
+ JG LBB16_3
+
+LBB16_4:
+ RET
+
+TEXT ·_transpose_int8_uint16_sse4(SB), $0-32
+
+ MOVQ src+0(FP), DI
+ MOVQ dest+8(FP), SI
+ MOVQ length+16(FP), DX
+ MOVQ transposeMap+24(FP), CX
+
+ WORD $0xfa83; BYTE $0x04 // cmp edx, 4
+ JL LBB17_1
+
+LBB17_5:
+ WORD $0xd089 // mov eax, edx
+ LONG $0x17be0f48 // movsx rdx, byte [rdi]
+ LONG $0x9114b70f // movzx edx, word [rcx + 4*rdx]
+ WORD $0x8966; BYTE $0x16 // mov word [rsi], dx
+ LONG $0x57be0f48; BYTE $0x01 // movsx rdx, byte [rdi + 1]
+ LONG $0x9114b70f // movzx edx, word [rcx + 4*rdx]
+ LONG $0x02568966 // mov word [rsi + 2], dx
+ LONG $0x57be0f48; BYTE $0x02 // movsx rdx, byte [rdi + 2]
+ LONG $0x9114b70f // movzx edx, word [rcx + 4*rdx]
+ LONG $0x04568966 // mov word [rsi + 4], dx
+ LONG $0x57be0f48; BYTE $0x03 // movsx rdx, byte [rdi + 3]
+ LONG $0x9114b70f // movzx edx, word [rcx + 4*rdx]
+ LONG $0x06568966 // mov word [rsi + 6], dx
+ WORD $0x508d; BYTE $0xfc // lea edx, [rax - 4]
+ LONG $0x04c78348 // add rdi, 4
+ LONG $0x08c68348 // add rsi, 8
+ WORD $0xf883; BYTE $0x07 // cmp eax, 7
+ JG LBB17_5
+
+LBB17_1:
+ WORD $0xd285 // test edx, edx
+ JLE LBB17_4
+ WORD $0xc283; BYTE $0x01 // add edx, 1
+ WORD $0x3145; BYTE $0xc0 // xor r8d, r8d
+
+LBB17_3:
+ LONG $0x04be0f4a; BYTE $0x07 // movsx rax, byte [rdi + r8]
+ LONG $0x8104b70f // movzx eax, word [rcx + 4*rax]
+ LONG $0x04894266; BYTE $0x46 // mov word [rsi + 2*r8], ax
+ LONG $0x01c08349 // add r8, 1
+ WORD $0xc283; BYTE $0xff // add edx, -1
+ WORD $0xfa83; BYTE $0x01 // cmp edx, 1
+ JG LBB17_3
+
+LBB17_4:
+ RET
+
+TEXT ·_transpose_uint16_uint16_sse4(SB), $0-32
+
+ MOVQ src+0(FP), DI
+ MOVQ dest+8(FP), SI
+ MOVQ length+16(FP), DX
+ MOVQ transposeMap+24(FP), CX
+
+ WORD $0xfa83; BYTE $0x04 // cmp edx, 4
+ JL LBB18_1
+
+LBB18_5:
+ WORD $0xd089 // mov eax, edx
+ WORD $0xb70f; BYTE $0x17 // movzx edx, word [rdi]
+ LONG $0x9114b70f // movzx edx, word [rcx + 4*rdx]
+ WORD $0x8966; BYTE $0x16 // mov word [rsi], dx
+ LONG $0x0257b70f // movzx edx, word [rdi + 2]
+ LONG $0x9114b70f // movzx edx, word [rcx + 4*rdx]
+ LONG $0x02568966 // mov word [rsi + 2], dx
+ LONG $0x0457b70f // movzx edx, word [rdi + 4]
+ LONG $0x9114b70f // movzx edx, word [rcx + 4*rdx]
+ LONG $0x04568966 // mov word [rsi + 4], dx
+ LONG $0x0657b70f // movzx edx, word [rdi + 6]
+ LONG $0x9114b70f // movzx edx, word [rcx + 4*rdx]
+ LONG $0x06568966 // mov word [rsi + 6], dx
+ WORD $0x508d; BYTE $0xfc // lea edx, [rax - 4]
+ LONG $0x08c78348 // add rdi, 8
+ LONG $0x08c68348 // add rsi, 8
+ WORD $0xf883; BYTE $0x07 // cmp eax, 7
+ JG LBB18_5
+
+LBB18_1:
+ WORD $0xd285 // test edx, edx
+ JLE LBB18_4
+ WORD $0xc283; BYTE $0x01 // add edx, 1
+ WORD $0x3145; BYTE $0xc0 // xor r8d, r8d
+
+LBB18_3:
+ LONG $0x04b70f42; BYTE $0x07 // movzx eax, word [rdi + r8]
+ LONG $0x8104b70f // movzx eax, word [rcx + 4*rax]
+ LONG $0x04894266; BYTE $0x06 // mov word [rsi + r8], ax
+ LONG $0x02c08349 // add r8, 2
+ WORD $0xc283; BYTE $0xff // add edx, -1
+ WORD $0xfa83; BYTE $0x01 // cmp edx, 1
+ JG LBB18_3
+
+LBB18_4:
+ RET
+
+TEXT ·_transpose_int16_uint16_sse4(SB), $0-32
+
+ MOVQ src+0(FP), DI
+ MOVQ dest+8(FP), SI
+ MOVQ length+16(FP), DX
+ MOVQ transposeMap+24(FP), CX
+
+ WORD $0xfa83; BYTE $0x04 // cmp edx, 4
+ JL LBB19_1
+
+LBB19_5:
+ WORD $0xd089 // mov eax, edx
+ LONG $0x17bf0f48 // movsx rdx, word [rdi]
+ LONG $0x9114b70f // movzx edx, word [rcx + 4*rdx]
+ WORD $0x8966; BYTE $0x16 // mov word [rsi], dx
+ LONG $0x57bf0f48; BYTE $0x02 // movsx rdx, word [rdi + 2]
+ LONG $0x9114b70f // movzx edx, word [rcx + 4*rdx]
+ LONG $0x02568966 // mov word [rsi + 2], dx
+ LONG $0x57bf0f48; BYTE $0x04 // movsx rdx, word [rdi + 4]
+ LONG $0x9114b70f // movzx edx, word [rcx + 4*rdx]
+ LONG $0x04568966 // mov word [rsi + 4], dx
+ LONG $0x57bf0f48; BYTE $0x06 // movsx rdx, word [rdi + 6]
+ LONG $0x9114b70f // movzx edx, word [rcx + 4*rdx]
+ LONG $0x06568966 // mov word [rsi + 6], dx
+ WORD $0x508d; BYTE $0xfc // lea edx, [rax - 4]
+ LONG $0x08c78348 // add rdi, 8
+ LONG $0x08c68348 // add rsi, 8
+ WORD $0xf883; BYTE $0x07 // cmp eax, 7
+ JG LBB19_5
+
+LBB19_1:
+ WORD $0xd285 // test edx, edx
+ JLE LBB19_4
+ WORD $0xc283; BYTE $0x01 // add edx, 1
+ WORD $0x3145; BYTE $0xc0 // xor r8d, r8d
+
+LBB19_3:
+ LONG $0x04bf0f4a; BYTE $0x07 // movsx rax, word [rdi + r8]
+ LONG $0x8104b70f // movzx eax, word [rcx + 4*rax]
+ LONG $0x04894266; BYTE $0x06 // mov word [rsi + r8], ax
+ LONG $0x02c08349 // add r8, 2
+ WORD $0xc283; BYTE $0xff // add edx, -1
+ WORD $0xfa83; BYTE $0x01 // cmp edx, 1
+ JG LBB19_3
+
+LBB19_4:
+ RET
+
+TEXT ·_transpose_uint32_uint16_sse4(SB), $0-32
+
+ MOVQ src+0(FP), DI
+ MOVQ dest+8(FP), SI
+ MOVQ length+16(FP), DX
+ MOVQ transposeMap+24(FP), CX
+
+ WORD $0xfa83; BYTE $0x04 // cmp edx, 4
+ JL LBB20_1
+
+LBB20_5:
+ WORD $0xd089 // mov eax, edx
+ WORD $0x178b // mov edx, dword [rdi]
+ LONG $0x9114b70f // movzx edx, word [rcx + 4*rdx]
+ WORD $0x8966; BYTE $0x16 // mov word [rsi], dx
+ WORD $0x578b; BYTE $0x04 // mov edx, dword [rdi + 4]
+ LONG $0x9114b70f // movzx edx, word [rcx + 4*rdx]
+ LONG $0x02568966 // mov word [rsi + 2], dx
+ WORD $0x578b; BYTE $0x08 // mov edx, dword [rdi + 8]
+ LONG $0x9114b70f // movzx edx, word [rcx + 4*rdx]
+ LONG $0x04568966 // mov word [rsi + 4], dx
+ WORD $0x578b; BYTE $0x0c // mov edx, dword [rdi + 12]
+ LONG $0x9114b70f // movzx edx, word [rcx + 4*rdx]
+ LONG $0x06568966 // mov word [rsi + 6], dx
+ WORD $0x508d; BYTE $0xfc // lea edx, [rax - 4]
+ LONG $0x10c78348 // add rdi, 16
+ LONG $0x08c68348 // add rsi, 8
+ WORD $0xf883; BYTE $0x07 // cmp eax, 7
+ JG LBB20_5
+
+LBB20_1:
+ WORD $0xd285 // test edx, edx
+ JLE LBB20_4
+ WORD $0xc283; BYTE $0x01 // add edx, 1
+ WORD $0x3145; BYTE $0xc0 // xor r8d, r8d
+
+LBB20_3:
+ LONG $0x47048b42 // mov eax, dword [rdi + 2*r8]
+ LONG $0x8104b70f // movzx eax, word [rcx + 4*rax]
+ LONG $0x04894266; BYTE $0x06 // mov word [rsi + r8], ax
+ LONG $0x02c08349 // add r8, 2
+ WORD $0xc283; BYTE $0xff // add edx, -1
+ WORD $0xfa83; BYTE $0x01 // cmp edx, 1
+ JG LBB20_3
+
+LBB20_4:
+ RET
+
+TEXT ·_transpose_int32_uint16_sse4(SB), $0-32
+
+ MOVQ src+0(FP), DI
+ MOVQ dest+8(FP), SI
+ MOVQ length+16(FP), DX
+ MOVQ transposeMap+24(FP), CX
+
+ WORD $0xfa83; BYTE $0x04 // cmp edx, 4
+ JL LBB21_1
+
+LBB21_5:
+ WORD $0xd089 // mov eax, edx
+ WORD $0x6348; BYTE $0x17 // movsxd rdx, dword [rdi]
+ LONG $0x9114b70f // movzx edx, word [rcx + 4*rdx]
+ WORD $0x8966; BYTE $0x16 // mov word [rsi], dx
+ LONG $0x04576348 // movsxd rdx, dword [rdi + 4]
+ LONG $0x9114b70f // movzx edx, word [rcx + 4*rdx]
+ LONG $0x02568966 // mov word [rsi + 2], dx
+ LONG $0x08576348 // movsxd rdx, dword [rdi + 8]
+ LONG $0x9114b70f // movzx edx, word [rcx + 4*rdx]
+ LONG $0x04568966 // mov word [rsi + 4], dx
+ LONG $0x0c576348 // movsxd rdx, dword [rdi + 12]
+ LONG $0x9114b70f // movzx edx, word [rcx + 4*rdx]
+ LONG $0x06568966 // mov word [rsi + 6], dx
+ WORD $0x508d; BYTE $0xfc // lea edx, [rax - 4]
+ LONG $0x10c78348 // add rdi, 16
+ LONG $0x08c68348 // add rsi, 8
+ WORD $0xf883; BYTE $0x07 // cmp eax, 7
+ JG LBB21_5
+
+LBB21_1:
+ WORD $0xd285 // test edx, edx
+ JLE LBB21_4
+ WORD $0xc283; BYTE $0x01 // add edx, 1
+ WORD $0x3145; BYTE $0xc0 // xor r8d, r8d
+
+LBB21_3:
+ LONG $0x4704634a // movsxd rax, dword [rdi + 2*r8]
+ LONG $0x8104b70f // movzx eax, word [rcx + 4*rax]
+ LONG $0x04894266; BYTE $0x06 // mov word [rsi + r8], ax
+ LONG $0x02c08349 // add r8, 2
+ WORD $0xc283; BYTE $0xff // add edx, -1
+ WORD $0xfa83; BYTE $0x01 // cmp edx, 1
+ JG LBB21_3
+
+LBB21_4:
+ RET
+
+TEXT ·_transpose_uint64_uint16_sse4(SB), $0-32
+
+ MOVQ src+0(FP), DI
+ MOVQ dest+8(FP), SI
+ MOVQ length+16(FP), DX
+ MOVQ transposeMap+24(FP), CX
+
+ WORD $0xfa83; BYTE $0x04 // cmp edx, 4
+ JL LBB22_1
+
+LBB22_5:
+ WORD $0xd089 // mov eax, edx
+ WORD $0x8b48; BYTE $0x17 // mov rdx, qword [rdi]
+ LONG $0x9114b70f // movzx edx, word [rcx + 4*rdx]
+ WORD $0x8966; BYTE $0x16 // mov word [rsi], dx
+ LONG $0x08578b48 // mov rdx, qword [rdi + 8]
+ LONG $0x9114b70f // movzx edx, word [rcx + 4*rdx]
+ LONG $0x02568966 // mov word [rsi + 2], dx
+ LONG $0x10578b48 // mov rdx, qword [rdi + 16]
+ LONG $0x9114b70f // movzx edx, word [rcx + 4*rdx]
+ LONG $0x04568966 // mov word [rsi + 4], dx
+ LONG $0x18578b48 // mov rdx, qword [rdi + 24]
+ LONG $0x9114b70f // movzx edx, word [rcx + 4*rdx]
+ LONG $0x06568966 // mov word [rsi + 6], dx
+ WORD $0x508d; BYTE $0xfc // lea edx, [rax - 4]
+ LONG $0x20c78348 // add rdi, 32
+ LONG $0x08c68348 // add rsi, 8
+ WORD $0xf883; BYTE $0x07 // cmp eax, 7
+ JG LBB22_5
+
+LBB22_1:
+ WORD $0xd285 // test edx, edx
+ JLE LBB22_4
+ WORD $0xc283; BYTE $0x01 // add edx, 1
+ WORD $0x3145; BYTE $0xc0 // xor r8d, r8d
+
+LBB22_3:
+ LONG $0x87048b4a // mov rax, qword [rdi + 4*r8]
+ LONG $0x8104b70f // movzx eax, word [rcx + 4*rax]
+ LONG $0x04894266; BYTE $0x06 // mov word [rsi + r8], ax
+ LONG $0x02c08349 // add r8, 2
+ WORD $0xc283; BYTE $0xff // add edx, -1
+ WORD $0xfa83; BYTE $0x01 // cmp edx, 1
+ JG LBB22_3
+
+LBB22_4:
+ RET
+
+TEXT ·_transpose_int64_uint16_sse4(SB), $0-32
+
+ MOVQ src+0(FP), DI
+ MOVQ dest+8(FP), SI
+ MOVQ length+16(FP), DX
+ MOVQ transposeMap+24(FP), CX
+
+ WORD $0xfa83; BYTE $0x04 // cmp edx, 4
+ JL LBB23_1
+
+LBB23_5:
+ WORD $0xd089 // mov eax, edx
+ WORD $0x8b48; BYTE $0x17 // mov rdx, qword [rdi]
+ LONG $0x9114b70f // movzx edx, word [rcx + 4*rdx]
+ WORD $0x8966; BYTE $0x16 // mov word [rsi], dx
+ LONG $0x08578b48 // mov rdx, qword [rdi + 8]
+ LONG $0x9114b70f // movzx edx, word [rcx + 4*rdx]
+ LONG $0x02568966 // mov word [rsi + 2], dx
+ LONG $0x10578b48 // mov rdx, qword [rdi + 16]
+ LONG $0x9114b70f // movzx edx, word [rcx + 4*rdx]
+ LONG $0x04568966 // mov word [rsi + 4], dx
+ LONG $0x18578b48 // mov rdx, qword [rdi + 24]
+ LONG $0x9114b70f // movzx edx, word [rcx + 4*rdx]
+ LONG $0x06568966 // mov word [rsi + 6], dx
+ WORD $0x508d; BYTE $0xfc // lea edx, [rax - 4]
+ LONG $0x20c78348 // add rdi, 32
+ LONG $0x08c68348 // add rsi, 8
+ WORD $0xf883; BYTE $0x07 // cmp eax, 7
+ JG LBB23_5
+
+LBB23_1:
+ WORD $0xd285 // test edx, edx
+ JLE LBB23_4
+ WORD $0xc283; BYTE $0x01 // add edx, 1
+ WORD $0x3145; BYTE $0xc0 // xor r8d, r8d
+
+LBB23_3:
+ LONG $0x87048b4a // mov rax, qword [rdi + 4*r8]
+ LONG $0x8104b70f // movzx eax, word [rcx + 4*rax]
+ LONG $0x04894266; BYTE $0x06 // mov word [rsi + r8], ax
+ LONG $0x02c08349 // add r8, 2
+ WORD $0xc283; BYTE $0xff // add edx, -1
+ WORD $0xfa83; BYTE $0x01 // cmp edx, 1
+ JG LBB23_3
+
+LBB23_4:
+ RET
+
+TEXT ·_transpose_uint8_int16_sse4(SB), $0-32
+
+ MOVQ src+0(FP), DI
+ MOVQ dest+8(FP), SI
+ MOVQ length+16(FP), DX
+ MOVQ transposeMap+24(FP), CX
+
+ WORD $0xfa83; BYTE $0x04 // cmp edx, 4
+ JL LBB24_1
+
+LBB24_5:
+ WORD $0xd089 // mov eax, edx
+ WORD $0xb60f; BYTE $0x17 // movzx edx, byte [rdi]
+ LONG $0x9114b70f // movzx edx, word [rcx + 4*rdx]
+ WORD $0x8966; BYTE $0x16 // mov word [rsi], dx
+ LONG $0x0157b60f // movzx edx, byte [rdi + 1]
+ LONG $0x9114b70f // movzx edx, word [rcx + 4*rdx]
+ LONG $0x02568966 // mov word [rsi + 2], dx
+ LONG $0x0257b60f // movzx edx, byte [rdi + 2]
+ LONG $0x9114b70f // movzx edx, word [rcx + 4*rdx]
+ LONG $0x04568966 // mov word [rsi + 4], dx
+ LONG $0x0357b60f // movzx edx, byte [rdi + 3]
+ LONG $0x9114b70f // movzx edx, word [rcx + 4*rdx]
+ LONG $0x06568966 // mov word [rsi + 6], dx
+ WORD $0x508d; BYTE $0xfc // lea edx, [rax - 4]
+ LONG $0x04c78348 // add rdi, 4
+ LONG $0x08c68348 // add rsi, 8
+ WORD $0xf883; BYTE $0x07 // cmp eax, 7
+ JG LBB24_5
+
+LBB24_1:
+ WORD $0xd285 // test edx, edx
+ JLE LBB24_4
+ WORD $0xc283; BYTE $0x01 // add edx, 1
+ WORD $0x3145; BYTE $0xc0 // xor r8d, r8d
+
+LBB24_3:
+ LONG $0x04b60f42; BYTE $0x07 // movzx eax, byte [rdi + r8]
+ LONG $0x8104b70f // movzx eax, word [rcx + 4*rax]
+ LONG $0x04894266; BYTE $0x46 // mov word [rsi + 2*r8], ax
+ LONG $0x01c08349 // add r8, 1
+ WORD $0xc283; BYTE $0xff // add edx, -1
+ WORD $0xfa83; BYTE $0x01 // cmp edx, 1
+ JG LBB24_3
+
+LBB24_4:
+ RET
+
+TEXT ·_transpose_int8_int16_sse4(SB), $0-32
+
+ MOVQ src+0(FP), DI
+ MOVQ dest+8(FP), SI
+ MOVQ length+16(FP), DX
+ MOVQ transposeMap+24(FP), CX
+
+ WORD $0xfa83; BYTE $0x04 // cmp edx, 4
+ JL LBB25_1
+
+LBB25_5:
+ WORD $0xd089 // mov eax, edx
+ LONG $0x17be0f48 // movsx rdx, byte [rdi]
+ LONG $0x9114b70f // movzx edx, word [rcx + 4*rdx]
+ WORD $0x8966; BYTE $0x16 // mov word [rsi], dx
+ LONG $0x57be0f48; BYTE $0x01 // movsx rdx, byte [rdi + 1]
+ LONG $0x9114b70f // movzx edx, word [rcx + 4*rdx]
+ LONG $0x02568966 // mov word [rsi + 2], dx
+ LONG $0x57be0f48; BYTE $0x02 // movsx rdx, byte [rdi + 2]
+ LONG $0x9114b70f // movzx edx, word [rcx + 4*rdx]
+ LONG $0x04568966 // mov word [rsi + 4], dx
+ LONG $0x57be0f48; BYTE $0x03 // movsx rdx, byte [rdi + 3]
+ LONG $0x9114b70f // movzx edx, word [rcx + 4*rdx]
+ LONG $0x06568966 // mov word [rsi + 6], dx
+ WORD $0x508d; BYTE $0xfc // lea edx, [rax - 4]
+ LONG $0x04c78348 // add rdi, 4
+ LONG $0x08c68348 // add rsi, 8
+ WORD $0xf883; BYTE $0x07 // cmp eax, 7
+ JG LBB25_5
+
+LBB25_1:
+ WORD $0xd285 // test edx, edx
+ JLE LBB25_4
+ WORD $0xc283; BYTE $0x01 // add edx, 1
+ WORD $0x3145; BYTE $0xc0 // xor r8d, r8d
+
+LBB25_3:
+ LONG $0x04be0f4a; BYTE $0x07 // movsx rax, byte [rdi + r8]
+ LONG $0x8104b70f // movzx eax, word [rcx + 4*rax]
+ LONG $0x04894266; BYTE $0x46 // mov word [rsi + 2*r8], ax
+ LONG $0x01c08349 // add r8, 1
+ WORD $0xc283; BYTE $0xff // add edx, -1
+ WORD $0xfa83; BYTE $0x01 // cmp edx, 1
+ JG LBB25_3
+
+LBB25_4:
+ RET
+
+TEXT ·_transpose_uint16_int16_sse4(SB), $0-32
+
+ MOVQ src+0(FP), DI
+ MOVQ dest+8(FP), SI
+ MOVQ length+16(FP), DX
+ MOVQ transposeMap+24(FP), CX
+
+ WORD $0xfa83; BYTE $0x04 // cmp edx, 4
+ JL LBB26_1
+
+LBB26_5:
+ WORD $0xd089 // mov eax, edx
+ WORD $0xb70f; BYTE $0x17 // movzx edx, word [rdi]
+ LONG $0x9114b70f // movzx edx, word [rcx + 4*rdx]
+ WORD $0x8966; BYTE $0x16 // mov word [rsi], dx
+ LONG $0x0257b70f // movzx edx, word [rdi + 2]
+ LONG $0x9114b70f // movzx edx, word [rcx + 4*rdx]
+ LONG $0x02568966 // mov word [rsi + 2], dx
+ LONG $0x0457b70f // movzx edx, word [rdi + 4]
+ LONG $0x9114b70f // movzx edx, word [rcx + 4*rdx]
+ LONG $0x04568966 // mov word [rsi + 4], dx
+ LONG $0x0657b70f // movzx edx, word [rdi + 6]
+ LONG $0x9114b70f // movzx edx, word [rcx + 4*rdx]
+ LONG $0x06568966 // mov word [rsi + 6], dx
+ WORD $0x508d; BYTE $0xfc // lea edx, [rax - 4]
+ LONG $0x08c78348 // add rdi, 8
+ LONG $0x08c68348 // add rsi, 8
+ WORD $0xf883; BYTE $0x07 // cmp eax, 7
+ JG LBB26_5
+
+LBB26_1:
+ WORD $0xd285 // test edx, edx
+ JLE LBB26_4
+ WORD $0xc283; BYTE $0x01 // add edx, 1
+ WORD $0x3145; BYTE $0xc0 // xor r8d, r8d
+
+LBB26_3:
+ LONG $0x04b70f42; BYTE $0x07 // movzx eax, word [rdi + r8]
+ LONG $0x8104b70f // movzx eax, word [rcx + 4*rax]
+ LONG $0x04894266; BYTE $0x06 // mov word [rsi + r8], ax
+ LONG $0x02c08349 // add r8, 2
+ WORD $0xc283; BYTE $0xff // add edx, -1
+ WORD $0xfa83; BYTE $0x01 // cmp edx, 1
+ JG LBB26_3
+
+LBB26_4:
+ RET
+
+TEXT ·_transpose_int16_int16_sse4(SB), $0-32
+
+ MOVQ src+0(FP), DI
+ MOVQ dest+8(FP), SI
+ MOVQ length+16(FP), DX
+ MOVQ transposeMap+24(FP), CX
+
+ WORD $0xfa83; BYTE $0x04 // cmp edx, 4
+ JL LBB27_1
+
+LBB27_5:
+ WORD $0xd089 // mov eax, edx
+ LONG $0x17bf0f48 // movsx rdx, word [rdi]
+ LONG $0x9114b70f // movzx edx, word [rcx + 4*rdx]
+ WORD $0x8966; BYTE $0x16 // mov word [rsi], dx
+ LONG $0x57bf0f48; BYTE $0x02 // movsx rdx, word [rdi + 2]
+ LONG $0x9114b70f // movzx edx, word [rcx + 4*rdx]
+ LONG $0x02568966 // mov word [rsi + 2], dx
+ LONG $0x57bf0f48; BYTE $0x04 // movsx rdx, word [rdi + 4]
+ LONG $0x9114b70f // movzx edx, word [rcx + 4*rdx]
+ LONG $0x04568966 // mov word [rsi + 4], dx
+ LONG $0x57bf0f48; BYTE $0x06 // movsx rdx, word [rdi + 6]
+ LONG $0x9114b70f // movzx edx, word [rcx + 4*rdx]
+ LONG $0x06568966 // mov word [rsi + 6], dx
+ WORD $0x508d; BYTE $0xfc // lea edx, [rax - 4]
+ LONG $0x08c78348 // add rdi, 8
+ LONG $0x08c68348 // add rsi, 8
+ WORD $0xf883; BYTE $0x07 // cmp eax, 7
+ JG LBB27_5
+
+LBB27_1:
+ WORD $0xd285 // test edx, edx
+ JLE LBB27_4
+ WORD $0xc283; BYTE $0x01 // add edx, 1
+ WORD $0x3145; BYTE $0xc0 // xor r8d, r8d
+
+LBB27_3:
+ LONG $0x04bf0f4a; BYTE $0x07 // movsx rax, word [rdi + r8]
+ LONG $0x8104b70f // movzx eax, word [rcx + 4*rax]
+ LONG $0x04894266; BYTE $0x06 // mov word [rsi + r8], ax
+ LONG $0x02c08349 // add r8, 2
+ WORD $0xc283; BYTE $0xff // add edx, -1
+ WORD $0xfa83; BYTE $0x01 // cmp edx, 1
+ JG LBB27_3
+
+LBB27_4:
+ RET
+
+TEXT ·_transpose_uint32_int16_sse4(SB), $0-32
+
+ MOVQ src+0(FP), DI
+ MOVQ dest+8(FP), SI
+ MOVQ length+16(FP), DX
+ MOVQ transposeMap+24(FP), CX
+
+ WORD $0xfa83; BYTE $0x04 // cmp edx, 4
+ JL LBB28_1
+
+LBB28_5:
+ WORD $0xd089 // mov eax, edx
+ WORD $0x178b // mov edx, dword [rdi]
+ LONG $0x9114b70f // movzx edx, word [rcx + 4*rdx]
+ WORD $0x8966; BYTE $0x16 // mov word [rsi], dx
+ WORD $0x578b; BYTE $0x04 // mov edx, dword [rdi + 4]
+ LONG $0x9114b70f // movzx edx, word [rcx + 4*rdx]
+ LONG $0x02568966 // mov word [rsi + 2], dx
+ WORD $0x578b; BYTE $0x08 // mov edx, dword [rdi + 8]
+ LONG $0x9114b70f // movzx edx, word [rcx + 4*rdx]
+ LONG $0x04568966 // mov word [rsi + 4], dx
+ WORD $0x578b; BYTE $0x0c // mov edx, dword [rdi + 12]
+ LONG $0x9114b70f // movzx edx, word [rcx + 4*rdx]
+ LONG $0x06568966 // mov word [rsi + 6], dx
+ WORD $0x508d; BYTE $0xfc // lea edx, [rax - 4]
+ LONG $0x10c78348 // add rdi, 16
+ LONG $0x08c68348 // add rsi, 8
+ WORD $0xf883; BYTE $0x07 // cmp eax, 7
+ JG LBB28_5
+
+LBB28_1:
+ WORD $0xd285 // test edx, edx
+ JLE LBB28_4
+ WORD $0xc283; BYTE $0x01 // add edx, 1
+ WORD $0x3145; BYTE $0xc0 // xor r8d, r8d
+
+LBB28_3:
+ LONG $0x47048b42 // mov eax, dword [rdi + 2*r8]
+ LONG $0x8104b70f // movzx eax, word [rcx + 4*rax]
+ LONG $0x04894266; BYTE $0x06 // mov word [rsi + r8], ax
+ LONG $0x02c08349 // add r8, 2
+ WORD $0xc283; BYTE $0xff // add edx, -1
+ WORD $0xfa83; BYTE $0x01 // cmp edx, 1
+ JG LBB28_3
+
+LBB28_4:
+ RET
+
+TEXT ·_transpose_int32_int16_sse4(SB), $0-32
+
+ MOVQ src+0(FP), DI
+ MOVQ dest+8(FP), SI
+ MOVQ length+16(FP), DX
+ MOVQ transposeMap+24(FP), CX
+
+ WORD $0xfa83; BYTE $0x04 // cmp edx, 4
+ JL LBB29_1
+
+LBB29_5:
+ WORD $0xd089 // mov eax, edx
+ WORD $0x6348; BYTE $0x17 // movsxd rdx, dword [rdi]
+ LONG $0x9114b70f // movzx edx, word [rcx + 4*rdx]
+ WORD $0x8966; BYTE $0x16 // mov word [rsi], dx
+ LONG $0x04576348 // movsxd rdx, dword [rdi + 4]
+ LONG $0x9114b70f // movzx edx, word [rcx + 4*rdx]
+ LONG $0x02568966 // mov word [rsi + 2], dx
+ LONG $0x08576348 // movsxd rdx, dword [rdi + 8]
+ LONG $0x9114b70f // movzx edx, word [rcx + 4*rdx]
+ LONG $0x04568966 // mov word [rsi + 4], dx
+ LONG $0x0c576348 // movsxd rdx, dword [rdi + 12]
+ LONG $0x9114b70f // movzx edx, word [rcx + 4*rdx]
+ LONG $0x06568966 // mov word [rsi + 6], dx
+ WORD $0x508d; BYTE $0xfc // lea edx, [rax - 4]
+ LONG $0x10c78348 // add rdi, 16
+ LONG $0x08c68348 // add rsi, 8
+ WORD $0xf883; BYTE $0x07 // cmp eax, 7
+ JG LBB29_5
+
+LBB29_1:
+ WORD $0xd285 // test edx, edx
+ JLE LBB29_4
+ WORD $0xc283; BYTE $0x01 // add edx, 1
+ WORD $0x3145; BYTE $0xc0 // xor r8d, r8d
+
+LBB29_3:
+ LONG $0x4704634a // movsxd rax, dword [rdi + 2*r8]
+ LONG $0x8104b70f // movzx eax, word [rcx + 4*rax]
+ LONG $0x04894266; BYTE $0x06 // mov word [rsi + r8], ax
+ LONG $0x02c08349 // add r8, 2
+ WORD $0xc283; BYTE $0xff // add edx, -1
+ WORD $0xfa83; BYTE $0x01 // cmp edx, 1
+ JG LBB29_3
+
+LBB29_4:
+ RET
+
+TEXT ·_transpose_uint64_int16_sse4(SB), $0-32
+
+ MOVQ src+0(FP), DI
+ MOVQ dest+8(FP), SI
+ MOVQ length+16(FP), DX
+ MOVQ transposeMap+24(FP), CX
+
+ WORD $0xfa83; BYTE $0x04 // cmp edx, 4
+ JL LBB30_1
+
+LBB30_5:
+ WORD $0xd089 // mov eax, edx
+ WORD $0x8b48; BYTE $0x17 // mov rdx, qword [rdi]
+ LONG $0x9114b70f // movzx edx, word [rcx + 4*rdx]
+ WORD $0x8966; BYTE $0x16 // mov word [rsi], dx
+ LONG $0x08578b48 // mov rdx, qword [rdi + 8]
+ LONG $0x9114b70f // movzx edx, word [rcx + 4*rdx]
+ LONG $0x02568966 // mov word [rsi + 2], dx
+ LONG $0x10578b48 // mov rdx, qword [rdi + 16]
+ LONG $0x9114b70f // movzx edx, word [rcx + 4*rdx]
+ LONG $0x04568966 // mov word [rsi + 4], dx
+ LONG $0x18578b48 // mov rdx, qword [rdi + 24]
+ LONG $0x9114b70f // movzx edx, word [rcx + 4*rdx]
+ LONG $0x06568966 // mov word [rsi + 6], dx
+ WORD $0x508d; BYTE $0xfc // lea edx, [rax - 4]
+ LONG $0x20c78348 // add rdi, 32
+ LONG $0x08c68348 // add rsi, 8
+ WORD $0xf883; BYTE $0x07 // cmp eax, 7
+ JG LBB30_5
+
+LBB30_1:
+ WORD $0xd285 // test edx, edx
+ JLE LBB30_4
+ WORD $0xc283; BYTE $0x01 // add edx, 1
+ WORD $0x3145; BYTE $0xc0 // xor r8d, r8d
+
+LBB30_3:
+ LONG $0x87048b4a // mov rax, qword [rdi + 4*r8]
+ LONG $0x8104b70f // movzx eax, word [rcx + 4*rax]
+ LONG $0x04894266; BYTE $0x06 // mov word [rsi + r8], ax
+ LONG $0x02c08349 // add r8, 2
+ WORD $0xc283; BYTE $0xff // add edx, -1
+ WORD $0xfa83; BYTE $0x01 // cmp edx, 1
+ JG LBB30_3
+
+LBB30_4:
+ RET
+
+TEXT ·_transpose_int64_int16_sse4(SB), $0-32
+
+ MOVQ src+0(FP), DI
+ MOVQ dest+8(FP), SI
+ MOVQ length+16(FP), DX
+ MOVQ transposeMap+24(FP), CX
+
+ WORD $0xfa83; BYTE $0x04 // cmp edx, 4
+ JL LBB31_1
+
+LBB31_5:
+ WORD $0xd089 // mov eax, edx
+ WORD $0x8b48; BYTE $0x17 // mov rdx, qword [rdi]
+ LONG $0x9114b70f // movzx edx, word [rcx + 4*rdx]
+ WORD $0x8966; BYTE $0x16 // mov word [rsi], dx
+ LONG $0x08578b48 // mov rdx, qword [rdi + 8]
+ LONG $0x9114b70f // movzx edx, word [rcx + 4*rdx]
+ LONG $0x02568966 // mov word [rsi + 2], dx
+ LONG $0x10578b48 // mov rdx, qword [rdi + 16]
+ LONG $0x9114b70f // movzx edx, word [rcx + 4*rdx]
+ LONG $0x04568966 // mov word [rsi + 4], dx
+ LONG $0x18578b48 // mov rdx, qword [rdi + 24]
+ LONG $0x9114b70f // movzx edx, word [rcx + 4*rdx]
+ LONG $0x06568966 // mov word [rsi + 6], dx
+ WORD $0x508d; BYTE $0xfc // lea edx, [rax - 4]
+ LONG $0x20c78348 // add rdi, 32
+ LONG $0x08c68348 // add rsi, 8
+ WORD $0xf883; BYTE $0x07 // cmp eax, 7
+ JG LBB31_5
+
+LBB31_1:
+ WORD $0xd285 // test edx, edx
+ JLE LBB31_4
+ WORD $0xc283; BYTE $0x01 // add edx, 1
+ WORD $0x3145; BYTE $0xc0 // xor r8d, r8d
+
+LBB31_3:
+ LONG $0x87048b4a // mov rax, qword [rdi + 4*r8]
+ LONG $0x8104b70f // movzx eax, word [rcx + 4*rax]
+ LONG $0x04894266; BYTE $0x06 // mov word [rsi + r8], ax
+ LONG $0x02c08349 // add r8, 2
+ WORD $0xc283; BYTE $0xff // add edx, -1
+ WORD $0xfa83; BYTE $0x01 // cmp edx, 1
+ JG LBB31_3
+
+LBB31_4:
+ RET
+
+TEXT ·_transpose_uint8_uint32_sse4(SB), $0-32
+
+ MOVQ src+0(FP), DI
+ MOVQ dest+8(FP), SI
+ MOVQ length+16(FP), DX
+ MOVQ transposeMap+24(FP), CX
+
+ WORD $0xfa83; BYTE $0x04 // cmp edx, 4
+ JL LBB32_1
+
+LBB32_5:
+ WORD $0xd089 // mov eax, edx
+ WORD $0xb60f; BYTE $0x17 // movzx edx, byte [rdi]
+ WORD $0x148b; BYTE $0x91 // mov edx, dword [rcx + 4*rdx]
+ WORD $0x1689 // mov dword [rsi], edx
+ LONG $0x0157b60f // movzx edx, byte [rdi + 1]
+ WORD $0x148b; BYTE $0x91 // mov edx, dword [rcx + 4*rdx]
+ WORD $0x5689; BYTE $0x04 // mov dword [rsi + 4], edx
+ LONG $0x0257b60f // movzx edx, byte [rdi + 2]
+ WORD $0x148b; BYTE $0x91 // mov edx, dword [rcx + 4*rdx]
+ WORD $0x5689; BYTE $0x08 // mov dword [rsi + 8], edx
+ LONG $0x0357b60f // movzx edx, byte [rdi + 3]
+ WORD $0x148b; BYTE $0x91 // mov edx, dword [rcx + 4*rdx]
+ WORD $0x5689; BYTE $0x0c // mov dword [rsi + 12], edx
+ WORD $0x508d; BYTE $0xfc // lea edx, [rax - 4]
+ LONG $0x04c78348 // add rdi, 4
+ LONG $0x10c68348 // add rsi, 16
+ WORD $0xf883; BYTE $0x07 // cmp eax, 7
+ JG LBB32_5
+
+LBB32_1:
+ WORD $0xd285 // test edx, edx
+ JLE LBB32_4
+ WORD $0xc283; BYTE $0x01 // add edx, 1
+ WORD $0x3145; BYTE $0xc0 // xor r8d, r8d
+
+LBB32_3:
+ LONG $0x04b60f42; BYTE $0x07 // movzx eax, byte [rdi + r8]
+ WORD $0x048b; BYTE $0x81 // mov eax, dword [rcx + 4*rax]
+ LONG $0x86048942 // mov dword [rsi + 4*r8], eax
+ LONG $0x01c08349 // add r8, 1
+ WORD $0xc283; BYTE $0xff // add edx, -1
+ WORD $0xfa83; BYTE $0x01 // cmp edx, 1
+ JG LBB32_3
+
+LBB32_4:
+ RET
+
+TEXT ·_transpose_int8_uint32_sse4(SB), $0-32
+
+ MOVQ src+0(FP), DI
+ MOVQ dest+8(FP), SI
+ MOVQ length+16(FP), DX
+ MOVQ transposeMap+24(FP), CX
+
+ WORD $0xfa83; BYTE $0x04 // cmp edx, 4
+ JL LBB33_1
+
+LBB33_5:
+ WORD $0xd089 // mov eax, edx
+ LONG $0x17be0f48 // movsx rdx, byte [rdi]
+ WORD $0x148b; BYTE $0x91 // mov edx, dword [rcx + 4*rdx]
+ WORD $0x1689 // mov dword [rsi], edx
+ LONG $0x57be0f48; BYTE $0x01 // movsx rdx, byte [rdi + 1]
+ WORD $0x148b; BYTE $0x91 // mov edx, dword [rcx + 4*rdx]
+ WORD $0x5689; BYTE $0x04 // mov dword [rsi + 4], edx
+ LONG $0x57be0f48; BYTE $0x02 // movsx rdx, byte [rdi + 2]
+ WORD $0x148b; BYTE $0x91 // mov edx, dword [rcx + 4*rdx]
+ WORD $0x5689; BYTE $0x08 // mov dword [rsi + 8], edx
+ LONG $0x57be0f48; BYTE $0x03 // movsx rdx, byte [rdi + 3]
+ WORD $0x148b; BYTE $0x91 // mov edx, dword [rcx + 4*rdx]
+ WORD $0x5689; BYTE $0x0c // mov dword [rsi + 12], edx
+ WORD $0x508d; BYTE $0xfc // lea edx, [rax - 4]
+ LONG $0x04c78348 // add rdi, 4
+ LONG $0x10c68348 // add rsi, 16
+ WORD $0xf883; BYTE $0x07 // cmp eax, 7
+ JG LBB33_5
+
+LBB33_1:
+ WORD $0xd285 // test edx, edx
+ JLE LBB33_4
+ WORD $0xc283; BYTE $0x01 // add edx, 1
+ WORD $0x3145; BYTE $0xc0 // xor r8d, r8d
+
+LBB33_3:
+ LONG $0x04be0f4a; BYTE $0x07 // movsx rax, byte [rdi + r8]
+ WORD $0x048b; BYTE $0x81 // mov eax, dword [rcx + 4*rax]
+ LONG $0x86048942 // mov dword [rsi + 4*r8], eax
+ LONG $0x01c08349 // add r8, 1
+ WORD $0xc283; BYTE $0xff // add edx, -1
+ WORD $0xfa83; BYTE $0x01 // cmp edx, 1
+ JG LBB33_3
+
+LBB33_4:
+ RET
+
+TEXT ·_transpose_uint16_uint32_sse4(SB), $0-32
+
+ MOVQ src+0(FP), DI
+ MOVQ dest+8(FP), SI
+ MOVQ length+16(FP), DX
+ MOVQ transposeMap+24(FP), CX
+
+ WORD $0xfa83; BYTE $0x04 // cmp edx, 4
+ JL LBB34_1
+
+LBB34_5:
+ WORD $0xd089 // mov eax, edx
+ WORD $0xb70f; BYTE $0x17 // movzx edx, word [rdi]
+ WORD $0x148b; BYTE $0x91 // mov edx, dword [rcx + 4*rdx]
+ WORD $0x1689 // mov dword [rsi], edx
+ LONG $0x0257b70f // movzx edx, word [rdi + 2]
+ WORD $0x148b; BYTE $0x91 // mov edx, dword [rcx + 4*rdx]
+ WORD $0x5689; BYTE $0x04 // mov dword [rsi + 4], edx
+ LONG $0x0457b70f // movzx edx, word [rdi + 4]
+ WORD $0x148b; BYTE $0x91 // mov edx, dword [rcx + 4*rdx]
+ WORD $0x5689; BYTE $0x08 // mov dword [rsi + 8], edx
+ LONG $0x0657b70f // movzx edx, word [rdi + 6]
+ WORD $0x148b; BYTE $0x91 // mov edx, dword [rcx + 4*rdx]
+ WORD $0x5689; BYTE $0x0c // mov dword [rsi + 12], edx
+ WORD $0x508d; BYTE $0xfc // lea edx, [rax - 4]
+ LONG $0x08c78348 // add rdi, 8
+ LONG $0x10c68348 // add rsi, 16
+ WORD $0xf883; BYTE $0x07 // cmp eax, 7
+ JG LBB34_5
+
+LBB34_1:
+ WORD $0xd285 // test edx, edx
+ JLE LBB34_4
+ WORD $0xc283; BYTE $0x01 // add edx, 1
+ WORD $0x3145; BYTE $0xc0 // xor r8d, r8d
+
+LBB34_3:
+ LONG $0x04b70f42; BYTE $0x07 // movzx eax, word [rdi + r8]
+ WORD $0x048b; BYTE $0x81 // mov eax, dword [rcx + 4*rax]
+ LONG $0x46048942 // mov dword [rsi + 2*r8], eax
+ LONG $0x02c08349 // add r8, 2
+ WORD $0xc283; BYTE $0xff // add edx, -1
+ WORD $0xfa83; BYTE $0x01 // cmp edx, 1
+ JG LBB34_3
+
+LBB34_4:
+ RET
+
+TEXT ·_transpose_int16_uint32_sse4(SB), $0-32
+
+ MOVQ src+0(FP), DI
+ MOVQ dest+8(FP), SI
+ MOVQ length+16(FP), DX
+ MOVQ transposeMap+24(FP), CX
+
+ WORD $0xfa83; BYTE $0x04 // cmp edx, 4
+ JL LBB35_1
+
+LBB35_5:
+ WORD $0xd089 // mov eax, edx
+ LONG $0x17bf0f48 // movsx rdx, word [rdi]
+ WORD $0x148b; BYTE $0x91 // mov edx, dword [rcx + 4*rdx]
+ WORD $0x1689 // mov dword [rsi], edx
+ LONG $0x57bf0f48; BYTE $0x02 // movsx rdx, word [rdi + 2]
+ WORD $0x148b; BYTE $0x91 // mov edx, dword [rcx + 4*rdx]
+ WORD $0x5689; BYTE $0x04 // mov dword [rsi + 4], edx
+ LONG $0x57bf0f48; BYTE $0x04 // movsx rdx, word [rdi + 4]
+ WORD $0x148b; BYTE $0x91 // mov edx, dword [rcx + 4*rdx]
+ WORD $0x5689; BYTE $0x08 // mov dword [rsi + 8], edx
+ LONG $0x57bf0f48; BYTE $0x06 // movsx rdx, word [rdi + 6]
+ WORD $0x148b; BYTE $0x91 // mov edx, dword [rcx + 4*rdx]
+ WORD $0x5689; BYTE $0x0c // mov dword [rsi + 12], edx
+ WORD $0x508d; BYTE $0xfc // lea edx, [rax - 4]
+ LONG $0x08c78348 // add rdi, 8
+ LONG $0x10c68348 // add rsi, 16
+ WORD $0xf883; BYTE $0x07 // cmp eax, 7
+ JG LBB35_5
+
+LBB35_1:
+ WORD $0xd285 // test edx, edx
+ JLE LBB35_4
+ WORD $0xc283; BYTE $0x01 // add edx, 1
+ WORD $0x3145; BYTE $0xc0 // xor r8d, r8d
+
+LBB35_3:
+ LONG $0x04bf0f4a; BYTE $0x07 // movsx rax, word [rdi + r8]
+ WORD $0x048b; BYTE $0x81 // mov eax, dword [rcx + 4*rax]
+ LONG $0x46048942 // mov dword [rsi + 2*r8], eax
+ LONG $0x02c08349 // add r8, 2
+ WORD $0xc283; BYTE $0xff // add edx, -1
+ WORD $0xfa83; BYTE $0x01 // cmp edx, 1
+ JG LBB35_3
+
+LBB35_4:
+ RET
+
+TEXT ·_transpose_uint32_uint32_sse4(SB), $0-32
+
+ MOVQ src+0(FP), DI
+ MOVQ dest+8(FP), SI
+ MOVQ length+16(FP), DX
+ MOVQ transposeMap+24(FP), CX
+
+ WORD $0xfa83; BYTE $0x04 // cmp edx, 4
+ JL LBB36_1
+
+LBB36_5:
+ WORD $0xd089 // mov eax, edx
+ WORD $0x178b // mov edx, dword [rdi]
+ WORD $0x148b; BYTE $0x91 // mov edx, dword [rcx + 4*rdx]
+ WORD $0x1689 // mov dword [rsi], edx
+ WORD $0x578b; BYTE $0x04 // mov edx, dword [rdi + 4]
+ WORD $0x148b; BYTE $0x91 // mov edx, dword [rcx + 4*rdx]
+ WORD $0x5689; BYTE $0x04 // mov dword [rsi + 4], edx
+ WORD $0x578b; BYTE $0x08 // mov edx, dword [rdi + 8]
+ WORD $0x148b; BYTE $0x91 // mov edx, dword [rcx + 4*rdx]
+ WORD $0x5689; BYTE $0x08 // mov dword [rsi + 8], edx
+ WORD $0x578b; BYTE $0x0c // mov edx, dword [rdi + 12]
+ WORD $0x148b; BYTE $0x91 // mov edx, dword [rcx + 4*rdx]
+ WORD $0x5689; BYTE $0x0c // mov dword [rsi + 12], edx
+ WORD $0x508d; BYTE $0xfc // lea edx, [rax - 4]
+ LONG $0x10c78348 // add rdi, 16
+ LONG $0x10c68348 // add rsi, 16
+ WORD $0xf883; BYTE $0x07 // cmp eax, 7
+ JG LBB36_5
+
+LBB36_1:
+ WORD $0xd285 // test edx, edx
+ JLE LBB36_4
+ WORD $0xc283; BYTE $0x01 // add edx, 1
+ WORD $0x3145; BYTE $0xc0 // xor r8d, r8d
+
+LBB36_3:
+ LONG $0x07048b42 // mov eax, dword [rdi + r8]
+ WORD $0x048b; BYTE $0x81 // mov eax, dword [rcx + 4*rax]
+ LONG $0x06048942 // mov dword [rsi + r8], eax
+ LONG $0x04c08349 // add r8, 4
+ WORD $0xc283; BYTE $0xff // add edx, -1
+ WORD $0xfa83; BYTE $0x01 // cmp edx, 1
+ JG LBB36_3
+
+LBB36_4:
+ RET
+
+TEXT ·_transpose_int32_uint32_sse4(SB), $0-32
+
+ MOVQ src+0(FP), DI
+ MOVQ dest+8(FP), SI
+ MOVQ length+16(FP), DX
+ MOVQ transposeMap+24(FP), CX
+
+ WORD $0xfa83; BYTE $0x04 // cmp edx, 4
+ JL LBB37_1
+
+LBB37_5:
+ WORD $0xd089 // mov eax, edx
+ WORD $0x6348; BYTE $0x17 // movsxd rdx, dword [rdi]
+ WORD $0x148b; BYTE $0x91 // mov edx, dword [rcx + 4*rdx]
+ WORD $0x1689 // mov dword [rsi], edx
+ LONG $0x04576348 // movsxd rdx, dword [rdi + 4]
+ WORD $0x148b; BYTE $0x91 // mov edx, dword [rcx + 4*rdx]
+ WORD $0x5689; BYTE $0x04 // mov dword [rsi + 4], edx
+ LONG $0x08576348 // movsxd rdx, dword [rdi + 8]
+ WORD $0x148b; BYTE $0x91 // mov edx, dword [rcx + 4*rdx]
+ WORD $0x5689; BYTE $0x08 // mov dword [rsi + 8], edx
+ LONG $0x0c576348 // movsxd rdx, dword [rdi + 12]
+ WORD $0x148b; BYTE $0x91 // mov edx, dword [rcx + 4*rdx]
+ WORD $0x5689; BYTE $0x0c // mov dword [rsi + 12], edx
+ WORD $0x508d; BYTE $0xfc // lea edx, [rax - 4]
+ LONG $0x10c78348 // add rdi, 16
+ LONG $0x10c68348 // add rsi, 16
+ WORD $0xf883; BYTE $0x07 // cmp eax, 7
+ JG LBB37_5
+
+LBB37_1:
+ WORD $0xd285 // test edx, edx
+ JLE LBB37_4
+ WORD $0xc283; BYTE $0x01 // add edx, 1
+ WORD $0x3145; BYTE $0xc0 // xor r8d, r8d
+
+LBB37_3:
+ LONG $0x0704634a // movsxd rax, dword [rdi + r8]
+ WORD $0x048b; BYTE $0x81 // mov eax, dword [rcx + 4*rax]
+ LONG $0x06048942 // mov dword [rsi + r8], eax
+ LONG $0x04c08349 // add r8, 4
+ WORD $0xc283; BYTE $0xff // add edx, -1
+ WORD $0xfa83; BYTE $0x01 // cmp edx, 1
+ JG LBB37_3
+
+LBB37_4:
+ RET
+
+TEXT ·_transpose_uint64_uint32_sse4(SB), $0-32
+
+ MOVQ src+0(FP), DI
+ MOVQ dest+8(FP), SI
+ MOVQ length+16(FP), DX
+ MOVQ transposeMap+24(FP), CX
+
+ WORD $0xfa83; BYTE $0x04 // cmp edx, 4
+ JL LBB38_1
+
+LBB38_5:
+ WORD $0xd089 // mov eax, edx
+ WORD $0x8b48; BYTE $0x17 // mov rdx, qword [rdi]
+ WORD $0x148b; BYTE $0x91 // mov edx, dword [rcx + 4*rdx]
+ WORD $0x1689 // mov dword [rsi], edx
+ LONG $0x08578b48 // mov rdx, qword [rdi + 8]
+ WORD $0x148b; BYTE $0x91 // mov edx, dword [rcx + 4*rdx]
+ WORD $0x5689; BYTE $0x04 // mov dword [rsi + 4], edx
+ LONG $0x10578b48 // mov rdx, qword [rdi + 16]
+ WORD $0x148b; BYTE $0x91 // mov edx, dword [rcx + 4*rdx]
+ WORD $0x5689; BYTE $0x08 // mov dword [rsi + 8], edx
+ LONG $0x18578b48 // mov rdx, qword [rdi + 24]
+ WORD $0x148b; BYTE $0x91 // mov edx, dword [rcx + 4*rdx]
+ WORD $0x5689; BYTE $0x0c // mov dword [rsi + 12], edx
+ WORD $0x508d; BYTE $0xfc // lea edx, [rax - 4]
+ LONG $0x20c78348 // add rdi, 32
+ LONG $0x10c68348 // add rsi, 16
+ WORD $0xf883; BYTE $0x07 // cmp eax, 7
+ JG LBB38_5
+
+LBB38_1:
+ WORD $0xd285 // test edx, edx
+ JLE LBB38_4
+ WORD $0xc283; BYTE $0x01 // add edx, 1
+ WORD $0x3145; BYTE $0xc0 // xor r8d, r8d
+
+LBB38_3:
+ LONG $0x47048b4a // mov rax, qword [rdi + 2*r8]
+ WORD $0x048b; BYTE $0x81 // mov eax, dword [rcx + 4*rax]
+ LONG $0x06048942 // mov dword [rsi + r8], eax
+ LONG $0x04c08349 // add r8, 4
+ WORD $0xc283; BYTE $0xff // add edx, -1
+ WORD $0xfa83; BYTE $0x01 // cmp edx, 1
+ JG LBB38_3
+
+LBB38_4:
+ RET
+
+TEXT ·_transpose_int64_uint32_sse4(SB), $0-32
+
+ MOVQ src+0(FP), DI
+ MOVQ dest+8(FP), SI
+ MOVQ length+16(FP), DX
+ MOVQ transposeMap+24(FP), CX
+
+ WORD $0xfa83; BYTE $0x04 // cmp edx, 4
+ JL LBB39_1
+
+LBB39_5:
+ WORD $0xd089 // mov eax, edx
+ WORD $0x8b48; BYTE $0x17 // mov rdx, qword [rdi]
+ WORD $0x148b; BYTE $0x91 // mov edx, dword [rcx + 4*rdx]
+ WORD $0x1689 // mov dword [rsi], edx
+ LONG $0x08578b48 // mov rdx, qword [rdi + 8]
+ WORD $0x148b; BYTE $0x91 // mov edx, dword [rcx + 4*rdx]
+ WORD $0x5689; BYTE $0x04 // mov dword [rsi + 4], edx
+ LONG $0x10578b48 // mov rdx, qword [rdi + 16]
+ WORD $0x148b; BYTE $0x91 // mov edx, dword [rcx + 4*rdx]
+ WORD $0x5689; BYTE $0x08 // mov dword [rsi + 8], edx
+ LONG $0x18578b48 // mov rdx, qword [rdi + 24]
+ WORD $0x148b; BYTE $0x91 // mov edx, dword [rcx + 4*rdx]
+ WORD $0x5689; BYTE $0x0c // mov dword [rsi + 12], edx
+ WORD $0x508d; BYTE $0xfc // lea edx, [rax - 4]
+ LONG $0x20c78348 // add rdi, 32
+ LONG $0x10c68348 // add rsi, 16
+ WORD $0xf883; BYTE $0x07 // cmp eax, 7
+ JG LBB39_5
+
+LBB39_1:
+ WORD $0xd285 // test edx, edx
+ JLE LBB39_4
+ WORD $0xc283; BYTE $0x01 // add edx, 1
+ WORD $0x3145; BYTE $0xc0 // xor r8d, r8d
+
+LBB39_3:
+ LONG $0x47048b4a // mov rax, qword [rdi + 2*r8]
+ WORD $0x048b; BYTE $0x81 // mov eax, dword [rcx + 4*rax]
+ LONG $0x06048942 // mov dword [rsi + r8], eax
+ LONG $0x04c08349 // add r8, 4
+ WORD $0xc283; BYTE $0xff // add edx, -1
+ WORD $0xfa83; BYTE $0x01 // cmp edx, 1
+ JG LBB39_3
+
+LBB39_4:
+ RET
+
+TEXT ·_transpose_uint8_int32_sse4(SB), $0-32
+
+ MOVQ src+0(FP), DI
+ MOVQ dest+8(FP), SI
+ MOVQ length+16(FP), DX
+ MOVQ transposeMap+24(FP), CX
+
+ WORD $0xfa83; BYTE $0x04 // cmp edx, 4
+ JL LBB40_1
+
+LBB40_5:
+ WORD $0xd089 // mov eax, edx
+ WORD $0xb60f; BYTE $0x17 // movzx edx, byte [rdi]
+ WORD $0x148b; BYTE $0x91 // mov edx, dword [rcx + 4*rdx]
+ WORD $0x1689 // mov dword [rsi], edx
+ LONG $0x0157b60f // movzx edx, byte [rdi + 1]
+ WORD $0x148b; BYTE $0x91 // mov edx, dword [rcx + 4*rdx]
+ WORD $0x5689; BYTE $0x04 // mov dword [rsi + 4], edx
+ LONG $0x0257b60f // movzx edx, byte [rdi + 2]
+ WORD $0x148b; BYTE $0x91 // mov edx, dword [rcx + 4*rdx]
+ WORD $0x5689; BYTE $0x08 // mov dword [rsi + 8], edx
+ LONG $0x0357b60f // movzx edx, byte [rdi + 3]
+ WORD $0x148b; BYTE $0x91 // mov edx, dword [rcx + 4*rdx]
+ WORD $0x5689; BYTE $0x0c // mov dword [rsi + 12], edx
+ WORD $0x508d; BYTE $0xfc // lea edx, [rax - 4]
+ LONG $0x04c78348 // add rdi, 4
+ LONG $0x10c68348 // add rsi, 16
+ WORD $0xf883; BYTE $0x07 // cmp eax, 7
+ JG LBB40_5
+
+LBB40_1:
+ WORD $0xd285 // test edx, edx
+ JLE LBB40_4
+ WORD $0xc283; BYTE $0x01 // add edx, 1
+ WORD $0x3145; BYTE $0xc0 // xor r8d, r8d
+
+LBB40_3:
+ LONG $0x04b60f42; BYTE $0x07 // movzx eax, byte [rdi + r8]
+ WORD $0x048b; BYTE $0x81 // mov eax, dword [rcx + 4*rax]
+ LONG $0x86048942 // mov dword [rsi + 4*r8], eax
+ LONG $0x01c08349 // add r8, 1
+ WORD $0xc283; BYTE $0xff // add edx, -1
+ WORD $0xfa83; BYTE $0x01 // cmp edx, 1
+ JG LBB40_3
+
+LBB40_4:
+ RET
+
+TEXT ·_transpose_int8_int32_sse4(SB), $0-32
+
+ MOVQ src+0(FP), DI
+ MOVQ dest+8(FP), SI
+ MOVQ length+16(FP), DX
+ MOVQ transposeMap+24(FP), CX
+
+ WORD $0xfa83; BYTE $0x04 // cmp edx, 4
+ JL LBB41_1
+
+LBB41_5:
+ WORD $0xd089 // mov eax, edx
+ LONG $0x17be0f48 // movsx rdx, byte [rdi]
+ WORD $0x148b; BYTE $0x91 // mov edx, dword [rcx + 4*rdx]
+ WORD $0x1689 // mov dword [rsi], edx
+ LONG $0x57be0f48; BYTE $0x01 // movsx rdx, byte [rdi + 1]
+ WORD $0x148b; BYTE $0x91 // mov edx, dword [rcx + 4*rdx]
+ WORD $0x5689; BYTE $0x04 // mov dword [rsi + 4], edx
+ LONG $0x57be0f48; BYTE $0x02 // movsx rdx, byte [rdi + 2]
+ WORD $0x148b; BYTE $0x91 // mov edx, dword [rcx + 4*rdx]
+ WORD $0x5689; BYTE $0x08 // mov dword [rsi + 8], edx
+ LONG $0x57be0f48; BYTE $0x03 // movsx rdx, byte [rdi + 3]
+ WORD $0x148b; BYTE $0x91 // mov edx, dword [rcx + 4*rdx]
+ WORD $0x5689; BYTE $0x0c // mov dword [rsi + 12], edx
+ WORD $0x508d; BYTE $0xfc // lea edx, [rax - 4]
+ LONG $0x04c78348 // add rdi, 4
+ LONG $0x10c68348 // add rsi, 16
+ WORD $0xf883; BYTE $0x07 // cmp eax, 7
+ JG LBB41_5
+
+LBB41_1:
+ WORD $0xd285 // test edx, edx
+ JLE LBB41_4
+ WORD $0xc283; BYTE $0x01 // add edx, 1
+ WORD $0x3145; BYTE $0xc0 // xor r8d, r8d
+
+LBB41_3:
+ LONG $0x04be0f4a; BYTE $0x07 // movsx rax, byte [rdi + r8]
+ WORD $0x048b; BYTE $0x81 // mov eax, dword [rcx + 4*rax]
+ LONG $0x86048942 // mov dword [rsi + 4*r8], eax
+ LONG $0x01c08349 // add r8, 1
+ WORD $0xc283; BYTE $0xff // add edx, -1
+ WORD $0xfa83; BYTE $0x01 // cmp edx, 1
+ JG LBB41_3
+
+LBB41_4:
+ RET
+
+TEXT ·_transpose_uint16_int32_sse4(SB), $0-32
+
+ MOVQ src+0(FP), DI
+ MOVQ dest+8(FP), SI
+ MOVQ length+16(FP), DX
+ MOVQ transposeMap+24(FP), CX
+
+ WORD $0xfa83; BYTE $0x04 // cmp edx, 4
+ JL LBB42_1
+
+LBB42_5:
+ WORD $0xd089 // mov eax, edx
+ WORD $0xb70f; BYTE $0x17 // movzx edx, word [rdi]
+ WORD $0x148b; BYTE $0x91 // mov edx, dword [rcx + 4*rdx]
+ WORD $0x1689 // mov dword [rsi], edx
+ LONG $0x0257b70f // movzx edx, word [rdi + 2]
+ WORD $0x148b; BYTE $0x91 // mov edx, dword [rcx + 4*rdx]
+ WORD $0x5689; BYTE $0x04 // mov dword [rsi + 4], edx
+ LONG $0x0457b70f // movzx edx, word [rdi + 4]
+ WORD $0x148b; BYTE $0x91 // mov edx, dword [rcx + 4*rdx]
+ WORD $0x5689; BYTE $0x08 // mov dword [rsi + 8], edx
+ LONG $0x0657b70f // movzx edx, word [rdi + 6]
+ WORD $0x148b; BYTE $0x91 // mov edx, dword [rcx + 4*rdx]
+ WORD $0x5689; BYTE $0x0c // mov dword [rsi + 12], edx
+ WORD $0x508d; BYTE $0xfc // lea edx, [rax - 4]
+ LONG $0x08c78348 // add rdi, 8
+ LONG $0x10c68348 // add rsi, 16
+ WORD $0xf883; BYTE $0x07 // cmp eax, 7
+ JG LBB42_5
+
+LBB42_1:
+ WORD $0xd285 // test edx, edx
+ JLE LBB42_4
+ WORD $0xc283; BYTE $0x01 // add edx, 1
+ WORD $0x3145; BYTE $0xc0 // xor r8d, r8d
+
+LBB42_3:
+ LONG $0x04b70f42; BYTE $0x07 // movzx eax, word [rdi + r8]
+ WORD $0x048b; BYTE $0x81 // mov eax, dword [rcx + 4*rax]
+ LONG $0x46048942 // mov dword [rsi + 2*r8], eax
+ LONG $0x02c08349 // add r8, 2
+ WORD $0xc283; BYTE $0xff // add edx, -1
+ WORD $0xfa83; BYTE $0x01 // cmp edx, 1
+ JG LBB42_3
+
+LBB42_4:
+ RET
+
+TEXT ·_transpose_int16_int32_sse4(SB), $0-32
+
+ MOVQ src+0(FP), DI
+ MOVQ dest+8(FP), SI
+ MOVQ length+16(FP), DX
+ MOVQ transposeMap+24(FP), CX
+
+ WORD $0xfa83; BYTE $0x04 // cmp edx, 4
+ JL LBB43_1
+
+LBB43_5:
+ WORD $0xd089 // mov eax, edx
+ LONG $0x17bf0f48 // movsx rdx, word [rdi]
+ WORD $0x148b; BYTE $0x91 // mov edx, dword [rcx + 4*rdx]
+ WORD $0x1689 // mov dword [rsi], edx
+ LONG $0x57bf0f48; BYTE $0x02 // movsx rdx, word [rdi + 2]
+ WORD $0x148b; BYTE $0x91 // mov edx, dword [rcx + 4*rdx]
+ WORD $0x5689; BYTE $0x04 // mov dword [rsi + 4], edx
+ LONG $0x57bf0f48; BYTE $0x04 // movsx rdx, word [rdi + 4]
+ WORD $0x148b; BYTE $0x91 // mov edx, dword [rcx + 4*rdx]
+ WORD $0x5689; BYTE $0x08 // mov dword [rsi + 8], edx
+ LONG $0x57bf0f48; BYTE $0x06 // movsx rdx, word [rdi + 6]
+ WORD $0x148b; BYTE $0x91 // mov edx, dword [rcx + 4*rdx]
+ WORD $0x5689; BYTE $0x0c // mov dword [rsi + 12], edx
+ WORD $0x508d; BYTE $0xfc // lea edx, [rax - 4]
+ LONG $0x08c78348 // add rdi, 8
+ LONG $0x10c68348 // add rsi, 16
+ WORD $0xf883; BYTE $0x07 // cmp eax, 7
+ JG LBB43_5
+
+LBB43_1:
+ WORD $0xd285 // test edx, edx
+ JLE LBB43_4
+ WORD $0xc283; BYTE $0x01 // add edx, 1
+ WORD $0x3145; BYTE $0xc0 // xor r8d, r8d
+
+LBB43_3:
+ LONG $0x04bf0f4a; BYTE $0x07 // movsx rax, word [rdi + r8]
+ WORD $0x048b; BYTE $0x81 // mov eax, dword [rcx + 4*rax]
+ LONG $0x46048942 // mov dword [rsi + 2*r8], eax
+ LONG $0x02c08349 // add r8, 2
+ WORD $0xc283; BYTE $0xff // add edx, -1
+ WORD $0xfa83; BYTE $0x01 // cmp edx, 1
+ JG LBB43_3
+
+LBB43_4:
+ RET
+
+TEXT ·_transpose_uint32_int32_sse4(SB), $0-32
+
+ MOVQ src+0(FP), DI
+ MOVQ dest+8(FP), SI
+ MOVQ length+16(FP), DX
+ MOVQ transposeMap+24(FP), CX
+
+ WORD $0xfa83; BYTE $0x04 // cmp edx, 4
+ JL LBB44_1
+
+LBB44_5:
+ WORD $0xd089 // mov eax, edx
+ WORD $0x178b // mov edx, dword [rdi]
+ WORD $0x148b; BYTE $0x91 // mov edx, dword [rcx + 4*rdx]
+ WORD $0x1689 // mov dword [rsi], edx
+ WORD $0x578b; BYTE $0x04 // mov edx, dword [rdi + 4]
+ WORD $0x148b; BYTE $0x91 // mov edx, dword [rcx + 4*rdx]
+ WORD $0x5689; BYTE $0x04 // mov dword [rsi + 4], edx
+ WORD $0x578b; BYTE $0x08 // mov edx, dword [rdi + 8]
+ WORD $0x148b; BYTE $0x91 // mov edx, dword [rcx + 4*rdx]
+ WORD $0x5689; BYTE $0x08 // mov dword [rsi + 8], edx
+ WORD $0x578b; BYTE $0x0c // mov edx, dword [rdi + 12]
+ WORD $0x148b; BYTE $0x91 // mov edx, dword [rcx + 4*rdx]
+ WORD $0x5689; BYTE $0x0c // mov dword [rsi + 12], edx
+ WORD $0x508d; BYTE $0xfc // lea edx, [rax - 4]
+ LONG $0x10c78348 // add rdi, 16
+ LONG $0x10c68348 // add rsi, 16
+ WORD $0xf883; BYTE $0x07 // cmp eax, 7
+ JG LBB44_5
+
+LBB44_1:
+ WORD $0xd285 // test edx, edx
+ JLE LBB44_4
+ WORD $0xc283; BYTE $0x01 // add edx, 1
+ WORD $0x3145; BYTE $0xc0 // xor r8d, r8d
+
+LBB44_3:
+ LONG $0x07048b42 // mov eax, dword [rdi + r8]
+ WORD $0x048b; BYTE $0x81 // mov eax, dword [rcx + 4*rax]
+ LONG $0x06048942 // mov dword [rsi + r8], eax
+ LONG $0x04c08349 // add r8, 4
+ WORD $0xc283; BYTE $0xff // add edx, -1
+ WORD $0xfa83; BYTE $0x01 // cmp edx, 1
+ JG LBB44_3
+
+LBB44_4:
+ RET
+
+TEXT ·_transpose_int32_int32_sse4(SB), $0-32
+
+ MOVQ src+0(FP), DI
+ MOVQ dest+8(FP), SI
+ MOVQ length+16(FP), DX
+ MOVQ transposeMap+24(FP), CX
+
+ WORD $0xfa83; BYTE $0x04 // cmp edx, 4
+ JL LBB45_1
+
+LBB45_5:
+ WORD $0xd089 // mov eax, edx
+ WORD $0x6348; BYTE $0x17 // movsxd rdx, dword [rdi]
+ WORD $0x148b; BYTE $0x91 // mov edx, dword [rcx + 4*rdx]
+ WORD $0x1689 // mov dword [rsi], edx
+ LONG $0x04576348 // movsxd rdx, dword [rdi + 4]
+ WORD $0x148b; BYTE $0x91 // mov edx, dword [rcx + 4*rdx]
+ WORD $0x5689; BYTE $0x04 // mov dword [rsi + 4], edx
+ LONG $0x08576348 // movsxd rdx, dword [rdi + 8]
+ WORD $0x148b; BYTE $0x91 // mov edx, dword [rcx + 4*rdx]
+ WORD $0x5689; BYTE $0x08 // mov dword [rsi + 8], edx
+ LONG $0x0c576348 // movsxd rdx, dword [rdi + 12]
+ WORD $0x148b; BYTE $0x91 // mov edx, dword [rcx + 4*rdx]
+ WORD $0x5689; BYTE $0x0c // mov dword [rsi + 12], edx
+ WORD $0x508d; BYTE $0xfc // lea edx, [rax - 4]
+ LONG $0x10c78348 // add rdi, 16
+ LONG $0x10c68348 // add rsi, 16
+ WORD $0xf883; BYTE $0x07 // cmp eax, 7
+ JG LBB45_5
+
+LBB45_1:
+ WORD $0xd285 // test edx, edx
+ JLE LBB45_4
+ WORD $0xc283; BYTE $0x01 // add edx, 1
+ WORD $0x3145; BYTE $0xc0 // xor r8d, r8d
+
+LBB45_3:
+ LONG $0x0704634a // movsxd rax, dword [rdi + r8]
+ WORD $0x048b; BYTE $0x81 // mov eax, dword [rcx + 4*rax]
+ LONG $0x06048942 // mov dword [rsi + r8], eax
+ LONG $0x04c08349 // add r8, 4
+ WORD $0xc283; BYTE $0xff // add edx, -1
+ WORD $0xfa83; BYTE $0x01 // cmp edx, 1
+ JG LBB45_3
+
+LBB45_4:
+ RET
+
+TEXT ·_transpose_uint64_int32_sse4(SB), $0-32
+
+ MOVQ src+0(FP), DI
+ MOVQ dest+8(FP), SI
+ MOVQ length+16(FP), DX
+ MOVQ transposeMap+24(FP), CX
+
+ WORD $0xfa83; BYTE $0x04 // cmp edx, 4
+ JL LBB46_1
+
+LBB46_5:
+ WORD $0xd089 // mov eax, edx
+ WORD $0x8b48; BYTE $0x17 // mov rdx, qword [rdi]
+ WORD $0x148b; BYTE $0x91 // mov edx, dword [rcx + 4*rdx]
+ WORD $0x1689 // mov dword [rsi], edx
+ LONG $0x08578b48 // mov rdx, qword [rdi + 8]
+ WORD $0x148b; BYTE $0x91 // mov edx, dword [rcx + 4*rdx]
+ WORD $0x5689; BYTE $0x04 // mov dword [rsi + 4], edx
+ LONG $0x10578b48 // mov rdx, qword [rdi + 16]
+ WORD $0x148b; BYTE $0x91 // mov edx, dword [rcx + 4*rdx]
+ WORD $0x5689; BYTE $0x08 // mov dword [rsi + 8], edx
+ LONG $0x18578b48 // mov rdx, qword [rdi + 24]
+ WORD $0x148b; BYTE $0x91 // mov edx, dword [rcx + 4*rdx]
+ WORD $0x5689; BYTE $0x0c // mov dword [rsi + 12], edx
+ WORD $0x508d; BYTE $0xfc // lea edx, [rax - 4]
+ LONG $0x20c78348 // add rdi, 32
+ LONG $0x10c68348 // add rsi, 16
+ WORD $0xf883; BYTE $0x07 // cmp eax, 7
+ JG LBB46_5
+
+LBB46_1:
+ WORD $0xd285 // test edx, edx
+ JLE LBB46_4
+ WORD $0xc283; BYTE $0x01 // add edx, 1
+ WORD $0x3145; BYTE $0xc0 // xor r8d, r8d
+
+LBB46_3:
+ LONG $0x47048b4a // mov rax, qword [rdi + 2*r8]
+ WORD $0x048b; BYTE $0x81 // mov eax, dword [rcx + 4*rax]
+ LONG $0x06048942 // mov dword [rsi + r8], eax
+ LONG $0x04c08349 // add r8, 4
+ WORD $0xc283; BYTE $0xff // add edx, -1
+ WORD $0xfa83; BYTE $0x01 // cmp edx, 1
+ JG LBB46_3
+
+LBB46_4:
+ RET
+
+TEXT ·_transpose_int64_int32_sse4(SB), $0-32
+
+ MOVQ src+0(FP), DI
+ MOVQ dest+8(FP), SI
+ MOVQ length+16(FP), DX
+ MOVQ transposeMap+24(FP), CX
+
+ WORD $0xfa83; BYTE $0x04 // cmp edx, 4
+ JL LBB47_1
+
+LBB47_5:
+ WORD $0xd089 // mov eax, edx
+ WORD $0x8b48; BYTE $0x17 // mov rdx, qword [rdi]
+ WORD $0x148b; BYTE $0x91 // mov edx, dword [rcx + 4*rdx]
+ WORD $0x1689 // mov dword [rsi], edx
+ LONG $0x08578b48 // mov rdx, qword [rdi + 8]
+ WORD $0x148b; BYTE $0x91 // mov edx, dword [rcx + 4*rdx]
+ WORD $0x5689; BYTE $0x04 // mov dword [rsi + 4], edx
+ LONG $0x10578b48 // mov rdx, qword [rdi + 16]
+ WORD $0x148b; BYTE $0x91 // mov edx, dword [rcx + 4*rdx]
+ WORD $0x5689; BYTE $0x08 // mov dword [rsi + 8], edx
+ LONG $0x18578b48 // mov rdx, qword [rdi + 24]
+ WORD $0x148b; BYTE $0x91 // mov edx, dword [rcx + 4*rdx]
+ WORD $0x5689; BYTE $0x0c // mov dword [rsi + 12], edx
+ WORD $0x508d; BYTE $0xfc // lea edx, [rax - 4]
+ LONG $0x20c78348 // add rdi, 32
+ LONG $0x10c68348 // add rsi, 16
+ WORD $0xf883; BYTE $0x07 // cmp eax, 7
+ JG LBB47_5
+
+LBB47_1:
+ WORD $0xd285 // test edx, edx
+ JLE LBB47_4
+ WORD $0xc283; BYTE $0x01 // add edx, 1
+ WORD $0x3145; BYTE $0xc0 // xor r8d, r8d
+
+LBB47_3:
+ LONG $0x47048b4a // mov rax, qword [rdi + 2*r8]
+ WORD $0x048b; BYTE $0x81 // mov eax, dword [rcx + 4*rax]
+ LONG $0x06048942 // mov dword [rsi + r8], eax
+ LONG $0x04c08349 // add r8, 4
+ WORD $0xc283; BYTE $0xff // add edx, -1
+ WORD $0xfa83; BYTE $0x01 // cmp edx, 1
+ JG LBB47_3
+
+LBB47_4:
+ RET
+
+TEXT ·_transpose_uint8_uint64_sse4(SB), $0-32
+
+ MOVQ src+0(FP), DI
+ MOVQ dest+8(FP), SI
+ MOVQ length+16(FP), DX
+ MOVQ transposeMap+24(FP), CX
+
+ WORD $0xfa83; BYTE $0x04 // cmp edx, 4
+ JL LBB48_1
+
+LBB48_5:
+ WORD $0xd089 // mov eax, edx
+ WORD $0xb60f; BYTE $0x17 // movzx edx, byte [rdi]
+ LONG $0x91146348 // movsxd rdx, dword [rcx + 4*rdx]
+ WORD $0x8948; BYTE $0x16 // mov qword [rsi], rdx
+ LONG $0x0157b60f // movzx edx, byte [rdi + 1]
+ LONG $0x91146348 // movsxd rdx, dword [rcx + 4*rdx]
+ LONG $0x08568948 // mov qword [rsi + 8], rdx
+ LONG $0x0257b60f // movzx edx, byte [rdi + 2]
+ LONG $0x91146348 // movsxd rdx, dword [rcx + 4*rdx]
+ LONG $0x10568948 // mov qword [rsi + 16], rdx
+ LONG $0x0357b60f // movzx edx, byte [rdi + 3]
+ LONG $0x91146348 // movsxd rdx, dword [rcx + 4*rdx]
+ LONG $0x18568948 // mov qword [rsi + 24], rdx
+ WORD $0x508d; BYTE $0xfc // lea edx, [rax - 4]
+ LONG $0x04c78348 // add rdi, 4
+ LONG $0x20c68348 // add rsi, 32
+ WORD $0xf883; BYTE $0x07 // cmp eax, 7
+ JG LBB48_5
+
+LBB48_1:
+ WORD $0xd285 // test edx, edx
+ JLE LBB48_4
+ WORD $0xc283; BYTE $0x01 // add edx, 1
+ WORD $0x3145; BYTE $0xc0 // xor r8d, r8d
+
+LBB48_3:
+ LONG $0x04b60f42; BYTE $0x07 // movzx eax, byte [rdi + r8]
+ LONG $0x81046348 // movsxd rax, dword [rcx + 4*rax]
+ LONG $0xc604894a // mov qword [rsi + 8*r8], rax
+ LONG $0x01c08349 // add r8, 1
+ WORD $0xc283; BYTE $0xff // add edx, -1
+ WORD $0xfa83; BYTE $0x01 // cmp edx, 1
+ JG LBB48_3
+
+LBB48_4:
+ RET
+
+TEXT ·_transpose_int8_uint64_sse4(SB), $0-32
+
+ MOVQ src+0(FP), DI
+ MOVQ dest+8(FP), SI
+ MOVQ length+16(FP), DX
+ MOVQ transposeMap+24(FP), CX
+
+ WORD $0xfa83; BYTE $0x04 // cmp edx, 4
+ JL LBB49_1
+
+LBB49_5:
+ WORD $0xd089 // mov eax, edx
+ LONG $0x17be0f48 // movsx rdx, byte [rdi]
+ LONG $0x91146348 // movsxd rdx, dword [rcx + 4*rdx]
+ WORD $0x8948; BYTE $0x16 // mov qword [rsi], rdx
+ LONG $0x57be0f48; BYTE $0x01 // movsx rdx, byte [rdi + 1]
+ LONG $0x91146348 // movsxd rdx, dword [rcx + 4*rdx]
+ LONG $0x08568948 // mov qword [rsi + 8], rdx
+ LONG $0x57be0f48; BYTE $0x02 // movsx rdx, byte [rdi + 2]
+ LONG $0x91146348 // movsxd rdx, dword [rcx + 4*rdx]
+ LONG $0x10568948 // mov qword [rsi + 16], rdx
+ LONG $0x57be0f48; BYTE $0x03 // movsx rdx, byte [rdi + 3]
+ LONG $0x91146348 // movsxd rdx, dword [rcx + 4*rdx]
+ LONG $0x18568948 // mov qword [rsi + 24], rdx
+ WORD $0x508d; BYTE $0xfc // lea edx, [rax - 4]
+ LONG $0x04c78348 // add rdi, 4
+ LONG $0x20c68348 // add rsi, 32
+ WORD $0xf883; BYTE $0x07 // cmp eax, 7
+ JG LBB49_5
+
+LBB49_1:
+ WORD $0xd285 // test edx, edx
+ JLE LBB49_4
+ WORD $0xc283; BYTE $0x01 // add edx, 1
+ WORD $0x3145; BYTE $0xc0 // xor r8d, r8d
+
+LBB49_3:
+ LONG $0x04be0f4a; BYTE $0x07 // movsx rax, byte [rdi + r8]
+ LONG $0x81046348 // movsxd rax, dword [rcx + 4*rax]
+ LONG $0xc604894a // mov qword [rsi + 8*r8], rax
+ LONG $0x01c08349 // add r8, 1
+ WORD $0xc283; BYTE $0xff // add edx, -1
+ WORD $0xfa83; BYTE $0x01 // cmp edx, 1
+ JG LBB49_3
+
+LBB49_4:
+ RET
+
+TEXT ·_transpose_uint16_uint64_sse4(SB), $0-32
+
+ MOVQ src+0(FP), DI
+ MOVQ dest+8(FP), SI
+ MOVQ length+16(FP), DX
+ MOVQ transposeMap+24(FP), CX
+
+ WORD $0xfa83; BYTE $0x04 // cmp edx, 4
+ JL LBB50_1
+
+LBB50_5:
+ WORD $0xd089 // mov eax, edx
+ WORD $0xb70f; BYTE $0x17 // movzx edx, word [rdi]
+ LONG $0x91146348 // movsxd rdx, dword [rcx + 4*rdx]
+ WORD $0x8948; BYTE $0x16 // mov qword [rsi], rdx
+ LONG $0x0257b70f // movzx edx, word [rdi + 2]
+ LONG $0x91146348 // movsxd rdx, dword [rcx + 4*rdx]
+ LONG $0x08568948 // mov qword [rsi + 8], rdx
+ LONG $0x0457b70f // movzx edx, word [rdi + 4]
+ LONG $0x91146348 // movsxd rdx, dword [rcx + 4*rdx]
+ LONG $0x10568948 // mov qword [rsi + 16], rdx
+ LONG $0x0657b70f // movzx edx, word [rdi + 6]
+ LONG $0x91146348 // movsxd rdx, dword [rcx + 4*rdx]
+ LONG $0x18568948 // mov qword [rsi + 24], rdx
+ WORD $0x508d; BYTE $0xfc // lea edx, [rax - 4]
+ LONG $0x08c78348 // add rdi, 8
+ LONG $0x20c68348 // add rsi, 32
+ WORD $0xf883; BYTE $0x07 // cmp eax, 7
+ JG LBB50_5
+
+LBB50_1:
+ WORD $0xd285 // test edx, edx
+ JLE LBB50_4
+ WORD $0xc283; BYTE $0x01 // add edx, 1
+ WORD $0x3145; BYTE $0xc0 // xor r8d, r8d
+
+LBB50_3:
+ LONG $0x04b70f42; BYTE $0x07 // movzx eax, word [rdi + r8]
+ LONG $0x81046348 // movsxd rax, dword [rcx + 4*rax]
+ LONG $0x8604894a // mov qword [rsi + 4*r8], rax
+ LONG $0x02c08349 // add r8, 2
+ WORD $0xc283; BYTE $0xff // add edx, -1
+ WORD $0xfa83; BYTE $0x01 // cmp edx, 1
+ JG LBB50_3
+
+LBB50_4:
+ RET
+
+TEXT ·_transpose_int16_uint64_sse4(SB), $0-32
+
+ MOVQ src+0(FP), DI
+ MOVQ dest+8(FP), SI
+ MOVQ length+16(FP), DX
+ MOVQ transposeMap+24(FP), CX
+
+ WORD $0xfa83; BYTE $0x04 // cmp edx, 4
+ JL LBB51_1
+
+LBB51_5:
+ WORD $0xd089 // mov eax, edx
+ LONG $0x17bf0f48 // movsx rdx, word [rdi]
+ LONG $0x91146348 // movsxd rdx, dword [rcx + 4*rdx]
+ WORD $0x8948; BYTE $0x16 // mov qword [rsi], rdx
+ LONG $0x57bf0f48; BYTE $0x02 // movsx rdx, word [rdi + 2]
+ LONG $0x91146348 // movsxd rdx, dword [rcx + 4*rdx]
+ LONG $0x08568948 // mov qword [rsi + 8], rdx
+ LONG $0x57bf0f48; BYTE $0x04 // movsx rdx, word [rdi + 4]
+ LONG $0x91146348 // movsxd rdx, dword [rcx + 4*rdx]
+ LONG $0x10568948 // mov qword [rsi + 16], rdx
+ LONG $0x57bf0f48; BYTE $0x06 // movsx rdx, word [rdi + 6]
+ LONG $0x91146348 // movsxd rdx, dword [rcx + 4*rdx]
+ LONG $0x18568948 // mov qword [rsi + 24], rdx
+ WORD $0x508d; BYTE $0xfc // lea edx, [rax - 4]
+ LONG $0x08c78348 // add rdi, 8
+ LONG $0x20c68348 // add rsi, 32
+ WORD $0xf883; BYTE $0x07 // cmp eax, 7
+ JG LBB51_5
+
+LBB51_1:
+ WORD $0xd285 // test edx, edx
+ JLE LBB51_4
+ WORD $0xc283; BYTE $0x01 // add edx, 1
+ WORD $0x3145; BYTE $0xc0 // xor r8d, r8d
+
+LBB51_3:
+ LONG $0x04bf0f4a; BYTE $0x07 // movsx rax, word [rdi + r8]
+ LONG $0x81046348 // movsxd rax, dword [rcx + 4*rax]
+ LONG $0x8604894a // mov qword [rsi + 4*r8], rax
+ LONG $0x02c08349 // add r8, 2
+ WORD $0xc283; BYTE $0xff // add edx, -1
+ WORD $0xfa83; BYTE $0x01 // cmp edx, 1
+ JG LBB51_3
+
+LBB51_4:
+ RET
+
+TEXT ·_transpose_uint32_uint64_sse4(SB), $0-32
+
+ MOVQ src+0(FP), DI
+ MOVQ dest+8(FP), SI
+ MOVQ length+16(FP), DX
+ MOVQ transposeMap+24(FP), CX
+
+ WORD $0xfa83; BYTE $0x04 // cmp edx, 4
+ JL LBB52_1
+
+LBB52_5:
+ WORD $0xd089 // mov eax, edx
+ WORD $0x178b // mov edx, dword [rdi]
+ LONG $0x91146348 // movsxd rdx, dword [rcx + 4*rdx]
+ WORD $0x8948; BYTE $0x16 // mov qword [rsi], rdx
+ WORD $0x578b; BYTE $0x04 // mov edx, dword [rdi + 4]
+ LONG $0x91146348 // movsxd rdx, dword [rcx + 4*rdx]
+ LONG $0x08568948 // mov qword [rsi + 8], rdx
+ WORD $0x578b; BYTE $0x08 // mov edx, dword [rdi + 8]
+ LONG $0x91146348 // movsxd rdx, dword [rcx + 4*rdx]
+ LONG $0x10568948 // mov qword [rsi + 16], rdx
+ WORD $0x578b; BYTE $0x0c // mov edx, dword [rdi + 12]
+ LONG $0x91146348 // movsxd rdx, dword [rcx + 4*rdx]
+ LONG $0x18568948 // mov qword [rsi + 24], rdx
+ WORD $0x508d; BYTE $0xfc // lea edx, [rax - 4]
+ LONG $0x10c78348 // add rdi, 16
+ LONG $0x20c68348 // add rsi, 32
+ WORD $0xf883; BYTE $0x07 // cmp eax, 7
+ JG LBB52_5
+
+LBB52_1:
+ WORD $0xd285 // test edx, edx
+ JLE LBB52_4
+ WORD $0xc283; BYTE $0x01 // add edx, 1
+ WORD $0x3145; BYTE $0xc0 // xor r8d, r8d
+
+LBB52_3:
+ LONG $0x07048b42 // mov eax, dword [rdi + r8]
+ LONG $0x81046348 // movsxd rax, dword [rcx + 4*rax]
+ LONG $0x4604894a // mov qword [rsi + 2*r8], rax
+ LONG $0x04c08349 // add r8, 4
+ WORD $0xc283; BYTE $0xff // add edx, -1
+ WORD $0xfa83; BYTE $0x01 // cmp edx, 1
+ JG LBB52_3
+
+LBB52_4:
+ RET
+
+TEXT ·_transpose_int32_uint64_sse4(SB), $0-32
+
+ MOVQ src+0(FP), DI
+ MOVQ dest+8(FP), SI
+ MOVQ length+16(FP), DX
+ MOVQ transposeMap+24(FP), CX
+
+ WORD $0xfa83; BYTE $0x04 // cmp edx, 4
+ JL LBB53_1
+
+LBB53_5:
+ WORD $0xd089 // mov eax, edx
+ WORD $0x6348; BYTE $0x17 // movsxd rdx, dword [rdi]
+ LONG $0x91146348 // movsxd rdx, dword [rcx + 4*rdx]
+ WORD $0x8948; BYTE $0x16 // mov qword [rsi], rdx
+ LONG $0x04576348 // movsxd rdx, dword [rdi + 4]
+ LONG $0x91146348 // movsxd rdx, dword [rcx + 4*rdx]
+ LONG $0x08568948 // mov qword [rsi + 8], rdx
+ LONG $0x08576348 // movsxd rdx, dword [rdi + 8]
+ LONG $0x91146348 // movsxd rdx, dword [rcx + 4*rdx]
+ LONG $0x10568948 // mov qword [rsi + 16], rdx
+ LONG $0x0c576348 // movsxd rdx, dword [rdi + 12]
+ LONG $0x91146348 // movsxd rdx, dword [rcx + 4*rdx]
+ LONG $0x18568948 // mov qword [rsi + 24], rdx
+ WORD $0x508d; BYTE $0xfc // lea edx, [rax - 4]
+ LONG $0x10c78348 // add rdi, 16
+ LONG $0x20c68348 // add rsi, 32
+ WORD $0xf883; BYTE $0x07 // cmp eax, 7
+ JG LBB53_5
+
+LBB53_1:
+ WORD $0xd285 // test edx, edx
+ JLE LBB53_4
+ WORD $0xc283; BYTE $0x01 // add edx, 1
+ WORD $0x3145; BYTE $0xc0 // xor r8d, r8d
+
+LBB53_3:
+ LONG $0x0704634a // movsxd rax, dword [rdi + r8]
+ LONG $0x81046348 // movsxd rax, dword [rcx + 4*rax]
+ LONG $0x4604894a // mov qword [rsi + 2*r8], rax
+ LONG $0x04c08349 // add r8, 4
+ WORD $0xc283; BYTE $0xff // add edx, -1
+ WORD $0xfa83; BYTE $0x01 // cmp edx, 1
+ JG LBB53_3
+
+LBB53_4:
+ RET
+
+TEXT ·_transpose_uint64_uint64_sse4(SB), $0-32
+
+ MOVQ src+0(FP), DI
+ MOVQ dest+8(FP), SI
+ MOVQ length+16(FP), DX
+ MOVQ transposeMap+24(FP), CX
+
+ WORD $0xfa83; BYTE $0x04 // cmp edx, 4
+ JL LBB54_1
+
+LBB54_5:
+ WORD $0xd089 // mov eax, edx
+ WORD $0x8b48; BYTE $0x17 // mov rdx, qword [rdi]
+ LONG $0x91146348 // movsxd rdx, dword [rcx + 4*rdx]
+ WORD $0x8948; BYTE $0x16 // mov qword [rsi], rdx
+ LONG $0x08578b48 // mov rdx, qword [rdi + 8]
+ LONG $0x91146348 // movsxd rdx, dword [rcx + 4*rdx]
+ LONG $0x08568948 // mov qword [rsi + 8], rdx
+ LONG $0x10578b48 // mov rdx, qword [rdi + 16]
+ LONG $0x91146348 // movsxd rdx, dword [rcx + 4*rdx]
+ LONG $0x10568948 // mov qword [rsi + 16], rdx
+ LONG $0x18578b48 // mov rdx, qword [rdi + 24]
+ LONG $0x91146348 // movsxd rdx, dword [rcx + 4*rdx]
+ LONG $0x18568948 // mov qword [rsi + 24], rdx
+ WORD $0x508d; BYTE $0xfc // lea edx, [rax - 4]
+ LONG $0x20c78348 // add rdi, 32
+ LONG $0x20c68348 // add rsi, 32
+ WORD $0xf883; BYTE $0x07 // cmp eax, 7
+ JG LBB54_5
+
+LBB54_1:
+ WORD $0xd285 // test edx, edx
+ JLE LBB54_4
+ WORD $0xc283; BYTE $0x01 // add edx, 1
+ WORD $0x3145; BYTE $0xc0 // xor r8d, r8d
+
+LBB54_3:
+ LONG $0x07048b4a // mov rax, qword [rdi + r8]
+ LONG $0x81046348 // movsxd rax, dword [rcx + 4*rax]
+ LONG $0x0604894a // mov qword [rsi + r8], rax
+ LONG $0x08c08349 // add r8, 8
+ WORD $0xc283; BYTE $0xff // add edx, -1
+ WORD $0xfa83; BYTE $0x01 // cmp edx, 1
+ JG LBB54_3
+
+LBB54_4:
+ RET
+
+TEXT ·_transpose_int64_uint64_sse4(SB), $0-32
+
+ MOVQ src+0(FP), DI
+ MOVQ dest+8(FP), SI
+ MOVQ length+16(FP), DX
+ MOVQ transposeMap+24(FP), CX
+
+ WORD $0xfa83; BYTE $0x04 // cmp edx, 4
+ JL LBB55_1
+
+LBB55_5:
+ WORD $0xd089 // mov eax, edx
+ WORD $0x8b48; BYTE $0x17 // mov rdx, qword [rdi]
+ LONG $0x91146348 // movsxd rdx, dword [rcx + 4*rdx]
+ WORD $0x8948; BYTE $0x16 // mov qword [rsi], rdx
+ LONG $0x08578b48 // mov rdx, qword [rdi + 8]
+ LONG $0x91146348 // movsxd rdx, dword [rcx + 4*rdx]
+ LONG $0x08568948 // mov qword [rsi + 8], rdx
+ LONG $0x10578b48 // mov rdx, qword [rdi + 16]
+ LONG $0x91146348 // movsxd rdx, dword [rcx + 4*rdx]
+ LONG $0x10568948 // mov qword [rsi + 16], rdx
+ LONG $0x18578b48 // mov rdx, qword [rdi + 24]
+ LONG $0x91146348 // movsxd rdx, dword [rcx + 4*rdx]
+ LONG $0x18568948 // mov qword [rsi + 24], rdx
+ WORD $0x508d; BYTE $0xfc // lea edx, [rax - 4]
+ LONG $0x20c78348 // add rdi, 32
+ LONG $0x20c68348 // add rsi, 32
+ WORD $0xf883; BYTE $0x07 // cmp eax, 7
+ JG LBB55_5
+
+LBB55_1:
+ WORD $0xd285 // test edx, edx
+ JLE LBB55_4
+ WORD $0xc283; BYTE $0x01 // add edx, 1
+ WORD $0x3145; BYTE $0xc0 // xor r8d, r8d
+
+LBB55_3:
+ LONG $0x07048b4a // mov rax, qword [rdi + r8]
+ LONG $0x81046348 // movsxd rax, dword [rcx + 4*rax]
+ LONG $0x0604894a // mov qword [rsi + r8], rax
+ LONG $0x08c08349 // add r8, 8
+ WORD $0xc283; BYTE $0xff // add edx, -1
+ WORD $0xfa83; BYTE $0x01 // cmp edx, 1
+ JG LBB55_3
+
+LBB55_4:
+ RET
+
+TEXT ·_transpose_uint8_int64_sse4(SB), $0-32
+
+ MOVQ src+0(FP), DI
+ MOVQ dest+8(FP), SI
+ MOVQ length+16(FP), DX
+ MOVQ transposeMap+24(FP), CX
+
+ WORD $0xfa83; BYTE $0x04 // cmp edx, 4
+ JL LBB56_1
+
+LBB56_5:
+ WORD $0xd089 // mov eax, edx
+ WORD $0xb60f; BYTE $0x17 // movzx edx, byte [rdi]
+ LONG $0x91146348 // movsxd rdx, dword [rcx + 4*rdx]
+ WORD $0x8948; BYTE $0x16 // mov qword [rsi], rdx
+ LONG $0x0157b60f // movzx edx, byte [rdi + 1]
+ LONG $0x91146348 // movsxd rdx, dword [rcx + 4*rdx]
+ LONG $0x08568948 // mov qword [rsi + 8], rdx
+ LONG $0x0257b60f // movzx edx, byte [rdi + 2]
+ LONG $0x91146348 // movsxd rdx, dword [rcx + 4*rdx]
+ LONG $0x10568948 // mov qword [rsi + 16], rdx
+ LONG $0x0357b60f // movzx edx, byte [rdi + 3]
+ LONG $0x91146348 // movsxd rdx, dword [rcx + 4*rdx]
+ LONG $0x18568948 // mov qword [rsi + 24], rdx
+ WORD $0x508d; BYTE $0xfc // lea edx, [rax - 4]
+ LONG $0x04c78348 // add rdi, 4
+ LONG $0x20c68348 // add rsi, 32
+ WORD $0xf883; BYTE $0x07 // cmp eax, 7
+ JG LBB56_5
+
+LBB56_1:
+ WORD $0xd285 // test edx, edx
+ JLE LBB56_4
+ WORD $0xc283; BYTE $0x01 // add edx, 1
+ WORD $0x3145; BYTE $0xc0 // xor r8d, r8d
+
+LBB56_3:
+ LONG $0x04b60f42; BYTE $0x07 // movzx eax, byte [rdi + r8]
+ LONG $0x81046348 // movsxd rax, dword [rcx + 4*rax]
+ LONG $0xc604894a // mov qword [rsi + 8*r8], rax
+ LONG $0x01c08349 // add r8, 1
+ WORD $0xc283; BYTE $0xff // add edx, -1
+ WORD $0xfa83; BYTE $0x01 // cmp edx, 1
+ JG LBB56_3
+
+LBB56_4:
+ RET
+
+TEXT ·_transpose_int8_int64_sse4(SB), $0-32
+
+ MOVQ src+0(FP), DI
+ MOVQ dest+8(FP), SI
+ MOVQ length+16(FP), DX
+ MOVQ transposeMap+24(FP), CX
+
+ WORD $0xfa83; BYTE $0x04 // cmp edx, 4
+ JL LBB57_1
+
+LBB57_5:
+ WORD $0xd089 // mov eax, edx
+ LONG $0x17be0f48 // movsx rdx, byte [rdi]
+ LONG $0x91146348 // movsxd rdx, dword [rcx + 4*rdx]
+ WORD $0x8948; BYTE $0x16 // mov qword [rsi], rdx
+ LONG $0x57be0f48; BYTE $0x01 // movsx rdx, byte [rdi + 1]
+ LONG $0x91146348 // movsxd rdx, dword [rcx + 4*rdx]
+ LONG $0x08568948 // mov qword [rsi + 8], rdx
+ LONG $0x57be0f48; BYTE $0x02 // movsx rdx, byte [rdi + 2]
+ LONG $0x91146348 // movsxd rdx, dword [rcx + 4*rdx]
+ LONG $0x10568948 // mov qword [rsi + 16], rdx
+ LONG $0x57be0f48; BYTE $0x03 // movsx rdx, byte [rdi + 3]
+ LONG $0x91146348 // movsxd rdx, dword [rcx + 4*rdx]
+ LONG $0x18568948 // mov qword [rsi + 24], rdx
+ WORD $0x508d; BYTE $0xfc // lea edx, [rax - 4]
+ LONG $0x04c78348 // add rdi, 4
+ LONG $0x20c68348 // add rsi, 32
+ WORD $0xf883; BYTE $0x07 // cmp eax, 7
+ JG LBB57_5
+
+LBB57_1:
+ WORD $0xd285 // test edx, edx
+ JLE LBB57_4
+ WORD $0xc283; BYTE $0x01 // add edx, 1
+ WORD $0x3145; BYTE $0xc0 // xor r8d, r8d
+
+LBB57_3:
+ LONG $0x04be0f4a; BYTE $0x07 // movsx rax, byte [rdi + r8]
+ LONG $0x81046348 // movsxd rax, dword [rcx + 4*rax]
+ LONG $0xc604894a // mov qword [rsi + 8*r8], rax
+ LONG $0x01c08349 // add r8, 1
+ WORD $0xc283; BYTE $0xff // add edx, -1
+ WORD $0xfa83; BYTE $0x01 // cmp edx, 1
+ JG LBB57_3
+
+LBB57_4:
+ RET
+
+TEXT ·_transpose_uint16_int64_sse4(SB), $0-32
+
+ MOVQ src+0(FP), DI
+ MOVQ dest+8(FP), SI
+ MOVQ length+16(FP), DX
+ MOVQ transposeMap+24(FP), CX
+
+ WORD $0xfa83; BYTE $0x04 // cmp edx, 4
+ JL LBB58_1
+
+LBB58_5:
+ WORD $0xd089 // mov eax, edx
+ WORD $0xb70f; BYTE $0x17 // movzx edx, word [rdi]
+ LONG $0x91146348 // movsxd rdx, dword [rcx + 4*rdx]
+ WORD $0x8948; BYTE $0x16 // mov qword [rsi], rdx
+ LONG $0x0257b70f // movzx edx, word [rdi + 2]
+ LONG $0x91146348 // movsxd rdx, dword [rcx + 4*rdx]
+ LONG $0x08568948 // mov qword [rsi + 8], rdx
+ LONG $0x0457b70f // movzx edx, word [rdi + 4]
+ LONG $0x91146348 // movsxd rdx, dword [rcx + 4*rdx]
+ LONG $0x10568948 // mov qword [rsi + 16], rdx
+ LONG $0x0657b70f // movzx edx, word [rdi + 6]
+ LONG $0x91146348 // movsxd rdx, dword [rcx + 4*rdx]
+ LONG $0x18568948 // mov qword [rsi + 24], rdx
+ WORD $0x508d; BYTE $0xfc // lea edx, [rax - 4]
+ LONG $0x08c78348 // add rdi, 8
+ LONG $0x20c68348 // add rsi, 32
+ WORD $0xf883; BYTE $0x07 // cmp eax, 7
+ JG LBB58_5
+
+LBB58_1:
+ WORD $0xd285 // test edx, edx
+ JLE LBB58_4
+ WORD $0xc283; BYTE $0x01 // add edx, 1
+ WORD $0x3145; BYTE $0xc0 // xor r8d, r8d
+
+LBB58_3:
+ LONG $0x04b70f42; BYTE $0x07 // movzx eax, word [rdi + r8]
+ LONG $0x81046348 // movsxd rax, dword [rcx + 4*rax]
+ LONG $0x8604894a // mov qword [rsi + 4*r8], rax
+ LONG $0x02c08349 // add r8, 2
+ WORD $0xc283; BYTE $0xff // add edx, -1
+ WORD $0xfa83; BYTE $0x01 // cmp edx, 1
+ JG LBB58_3
+
+LBB58_4:
+ RET
+
+TEXT ·_transpose_int16_int64_sse4(SB), $0-32
+
+ MOVQ src+0(FP), DI
+ MOVQ dest+8(FP), SI
+ MOVQ length+16(FP), DX
+ MOVQ transposeMap+24(FP), CX
+
+ WORD $0xfa83; BYTE $0x04 // cmp edx, 4
+ JL LBB59_1
+
+LBB59_5:
+ WORD $0xd089 // mov eax, edx
+ LONG $0x17bf0f48 // movsx rdx, word [rdi]
+ LONG $0x91146348 // movsxd rdx, dword [rcx + 4*rdx]
+ WORD $0x8948; BYTE $0x16 // mov qword [rsi], rdx
+ LONG $0x57bf0f48; BYTE $0x02 // movsx rdx, word [rdi + 2]
+ LONG $0x91146348 // movsxd rdx, dword [rcx + 4*rdx]
+ LONG $0x08568948 // mov qword [rsi + 8], rdx
+ LONG $0x57bf0f48; BYTE $0x04 // movsx rdx, word [rdi + 4]
+ LONG $0x91146348 // movsxd rdx, dword [rcx + 4*rdx]
+ LONG $0x10568948 // mov qword [rsi + 16], rdx
+ LONG $0x57bf0f48; BYTE $0x06 // movsx rdx, word [rdi + 6]
+ LONG $0x91146348 // movsxd rdx, dword [rcx + 4*rdx]
+ LONG $0x18568948 // mov qword [rsi + 24], rdx
+ WORD $0x508d; BYTE $0xfc // lea edx, [rax - 4]
+ LONG $0x08c78348 // add rdi, 8
+ LONG $0x20c68348 // add rsi, 32
+ WORD $0xf883; BYTE $0x07 // cmp eax, 7
+ JG LBB59_5
+
+LBB59_1:
+ WORD $0xd285 // test edx, edx
+ JLE LBB59_4
+ WORD $0xc283; BYTE $0x01 // add edx, 1
+ WORD $0x3145; BYTE $0xc0 // xor r8d, r8d
+
+LBB59_3:
+ LONG $0x04bf0f4a; BYTE $0x07 // movsx rax, word [rdi + r8]
+ LONG $0x81046348 // movsxd rax, dword [rcx + 4*rax]
+ LONG $0x8604894a // mov qword [rsi + 4*r8], rax
+ LONG $0x02c08349 // add r8, 2
+ WORD $0xc283; BYTE $0xff // add edx, -1
+ WORD $0xfa83; BYTE $0x01 // cmp edx, 1
+ JG LBB59_3
+
+LBB59_4:
+ RET
+
+TEXT ·_transpose_uint32_int64_sse4(SB), $0-32
+
+ MOVQ src+0(FP), DI
+ MOVQ dest+8(FP), SI
+ MOVQ length+16(FP), DX
+ MOVQ transposeMap+24(FP), CX
+
+ WORD $0xfa83; BYTE $0x04 // cmp edx, 4
+ JL LBB60_1
+
+LBB60_5:
+ WORD $0xd089 // mov eax, edx
+ WORD $0x178b // mov edx, dword [rdi]
+ LONG $0x91146348 // movsxd rdx, dword [rcx + 4*rdx]
+ WORD $0x8948; BYTE $0x16 // mov qword [rsi], rdx
+ WORD $0x578b; BYTE $0x04 // mov edx, dword [rdi + 4]
+ LONG $0x91146348 // movsxd rdx, dword [rcx + 4*rdx]
+ LONG $0x08568948 // mov qword [rsi + 8], rdx
+ WORD $0x578b; BYTE $0x08 // mov edx, dword [rdi + 8]
+ LONG $0x91146348 // movsxd rdx, dword [rcx + 4*rdx]
+ LONG $0x10568948 // mov qword [rsi + 16], rdx
+ WORD $0x578b; BYTE $0x0c // mov edx, dword [rdi + 12]
+ LONG $0x91146348 // movsxd rdx, dword [rcx + 4*rdx]
+ LONG $0x18568948 // mov qword [rsi + 24], rdx
+ WORD $0x508d; BYTE $0xfc // lea edx, [rax - 4]
+ LONG $0x10c78348 // add rdi, 16
+ LONG $0x20c68348 // add rsi, 32
+ WORD $0xf883; BYTE $0x07 // cmp eax, 7
+ JG LBB60_5
+
+LBB60_1:
+ WORD $0xd285 // test edx, edx
+ JLE LBB60_4
+ WORD $0xc283; BYTE $0x01 // add edx, 1
+ WORD $0x3145; BYTE $0xc0 // xor r8d, r8d
+
+LBB60_3:
+ LONG $0x07048b42 // mov eax, dword [rdi + r8]
+ LONG $0x81046348 // movsxd rax, dword [rcx + 4*rax]
+ LONG $0x4604894a // mov qword [rsi + 2*r8], rax
+ LONG $0x04c08349 // add r8, 4
+ WORD $0xc283; BYTE $0xff // add edx, -1
+ WORD $0xfa83; BYTE $0x01 // cmp edx, 1
+ JG LBB60_3
+
+LBB60_4:
+ RET
+
+TEXT ·_transpose_int32_int64_sse4(SB), $0-32
+
+ MOVQ src+0(FP), DI
+ MOVQ dest+8(FP), SI
+ MOVQ length+16(FP), DX
+ MOVQ transposeMap+24(FP), CX
+
+ WORD $0xfa83; BYTE $0x04 // cmp edx, 4
+ JL LBB61_1
+
+LBB61_5:
+ WORD $0xd089 // mov eax, edx
+ WORD $0x6348; BYTE $0x17 // movsxd rdx, dword [rdi]
+ LONG $0x91146348 // movsxd rdx, dword [rcx + 4*rdx]
+ WORD $0x8948; BYTE $0x16 // mov qword [rsi], rdx
+ LONG $0x04576348 // movsxd rdx, dword [rdi + 4]
+ LONG $0x91146348 // movsxd rdx, dword [rcx + 4*rdx]
+ LONG $0x08568948 // mov qword [rsi + 8], rdx
+ LONG $0x08576348 // movsxd rdx, dword [rdi + 8]
+ LONG $0x91146348 // movsxd rdx, dword [rcx + 4*rdx]
+ LONG $0x10568948 // mov qword [rsi + 16], rdx
+ LONG $0x0c576348 // movsxd rdx, dword [rdi + 12]
+ LONG $0x91146348 // movsxd rdx, dword [rcx + 4*rdx]
+ LONG $0x18568948 // mov qword [rsi + 24], rdx
+ WORD $0x508d; BYTE $0xfc // lea edx, [rax - 4]
+ LONG $0x10c78348 // add rdi, 16
+ LONG $0x20c68348 // add rsi, 32
+ WORD $0xf883; BYTE $0x07 // cmp eax, 7
+ JG LBB61_5
+
+LBB61_1:
+ WORD $0xd285 // test edx, edx
+ JLE LBB61_4
+ WORD $0xc283; BYTE $0x01 // add edx, 1
+ WORD $0x3145; BYTE $0xc0 // xor r8d, r8d
+
+LBB61_3:
+ LONG $0x0704634a // movsxd rax, dword [rdi + r8]
+ LONG $0x81046348 // movsxd rax, dword [rcx + 4*rax]
+ LONG $0x4604894a // mov qword [rsi + 2*r8], rax
+ LONG $0x04c08349 // add r8, 4
+ WORD $0xc283; BYTE $0xff // add edx, -1
+ WORD $0xfa83; BYTE $0x01 // cmp edx, 1
+ JG LBB61_3
+
+LBB61_4:
+ RET
+
+TEXT ·_transpose_uint64_int64_sse4(SB), $0-32
+
+ MOVQ src+0(FP), DI
+ MOVQ dest+8(FP), SI
+ MOVQ length+16(FP), DX
+ MOVQ transposeMap+24(FP), CX
+
+ WORD $0xfa83; BYTE $0x04 // cmp edx, 4
+ JL LBB62_1
+
+LBB62_5:
+ WORD $0xd089 // mov eax, edx
+ WORD $0x8b48; BYTE $0x17 // mov rdx, qword [rdi]
+ LONG $0x91146348 // movsxd rdx, dword [rcx + 4*rdx]
+ WORD $0x8948; BYTE $0x16 // mov qword [rsi], rdx
+ LONG $0x08578b48 // mov rdx, qword [rdi + 8]
+ LONG $0x91146348 // movsxd rdx, dword [rcx + 4*rdx]
+ LONG $0x08568948 // mov qword [rsi + 8], rdx
+ LONG $0x10578b48 // mov rdx, qword [rdi + 16]
+ LONG $0x91146348 // movsxd rdx, dword [rcx + 4*rdx]
+ LONG $0x10568948 // mov qword [rsi + 16], rdx
+ LONG $0x18578b48 // mov rdx, qword [rdi + 24]
+ LONG $0x91146348 // movsxd rdx, dword [rcx + 4*rdx]
+ LONG $0x18568948 // mov qword [rsi + 24], rdx
+ WORD $0x508d; BYTE $0xfc // lea edx, [rax - 4]
+ LONG $0x20c78348 // add rdi, 32
+ LONG $0x20c68348 // add rsi, 32
+ WORD $0xf883; BYTE $0x07 // cmp eax, 7
+ JG LBB62_5
+
+LBB62_1:
+ WORD $0xd285 // test edx, edx
+ JLE LBB62_4
+ WORD $0xc283; BYTE $0x01 // add edx, 1
+ WORD $0x3145; BYTE $0xc0 // xor r8d, r8d
+
+LBB62_3:
+ LONG $0x07048b4a // mov rax, qword [rdi + r8]
+ LONG $0x81046348 // movsxd rax, dword [rcx + 4*rax]
+ LONG $0x0604894a // mov qword [rsi + r8], rax
+ LONG $0x08c08349 // add r8, 8
+ WORD $0xc283; BYTE $0xff // add edx, -1
+ WORD $0xfa83; BYTE $0x01 // cmp edx, 1
+ JG LBB62_3
+
+LBB62_4:
+ RET
+
+TEXT ·_transpose_int64_int64_sse4(SB), $0-32
+
+ MOVQ src+0(FP), DI
+ MOVQ dest+8(FP), SI
+ MOVQ length+16(FP), DX
+ MOVQ transposeMap+24(FP), CX
+
+ WORD $0xfa83; BYTE $0x04 // cmp edx, 4
+ JL LBB63_1
+
+LBB63_5:
+ WORD $0xd089 // mov eax, edx
+ WORD $0x8b48; BYTE $0x17 // mov rdx, qword [rdi]
+ LONG $0x91146348 // movsxd rdx, dword [rcx + 4*rdx]
+ WORD $0x8948; BYTE $0x16 // mov qword [rsi], rdx
+ LONG $0x08578b48 // mov rdx, qword [rdi + 8]
+ LONG $0x91146348 // movsxd rdx, dword [rcx + 4*rdx]
+ LONG $0x08568948 // mov qword [rsi + 8], rdx
+ LONG $0x10578b48 // mov rdx, qword [rdi + 16]
+ LONG $0x91146348 // movsxd rdx, dword [rcx + 4*rdx]
+ LONG $0x10568948 // mov qword [rsi + 16], rdx
+ LONG $0x18578b48 // mov rdx, qword [rdi + 24]
+ LONG $0x91146348 // movsxd rdx, dword [rcx + 4*rdx]
+ LONG $0x18568948 // mov qword [rsi + 24], rdx
+ WORD $0x508d; BYTE $0xfc // lea edx, [rax - 4]
+ LONG $0x20c78348 // add rdi, 32
+ LONG $0x20c68348 // add rsi, 32
+ WORD $0xf883; BYTE $0x07 // cmp eax, 7
+ JG LBB63_5
+
+LBB63_1:
+ WORD $0xd285 // test edx, edx
+ JLE LBB63_4
+ WORD $0xc283; BYTE $0x01 // add edx, 1
+ WORD $0x3145; BYTE $0xc0 // xor r8d, r8d
+
+LBB63_3:
+ LONG $0x07048b4a // mov rax, qword [rdi + r8]
+ LONG $0x81046348 // movsxd rax, dword [rcx + 4*rax]
+ LONG $0x0604894a // mov qword [rsi + r8], rax
+ LONG $0x08c08349 // add r8, 8
+ WORD $0xc283; BYTE $0xff // add edx, -1
+ WORD $0xfa83; BYTE $0x01 // cmp edx, 1
+ JG LBB63_3
+
+LBB63_4:
+ RET