aboutsummaryrefslogtreecommitdiffstats
path: root/vendor/cloud.google.com/go/bigquery
diff options
context:
space:
mode:
authorTaras Madan <tarasmadan@google.com>2024-09-10 12:16:33 +0200
committerTaras Madan <tarasmadan@google.com>2024-09-10 14:05:26 +0000
commitc97c816133b42257d0bcf1ee4bd178bb2a7a2b9e (patch)
tree0bcbc2e540bbf8f62f6c17887cdd53b8c2cee637 /vendor/cloud.google.com/go/bigquery
parent54e657429ab892ad06c90cd7c1a4eb33ba93a3dc (diff)
vendor: update
Diffstat (limited to 'vendor/cloud.google.com/go/bigquery')
-rw-r--r--vendor/cloud.google.com/go/bigquery/CHANGES.md45
-rw-r--r--vendor/cloud.google.com/go/bigquery/arrow.go23
-rw-r--r--vendor/cloud.google.com/go/bigquery/bigquery.go10
-rw-r--r--vendor/cloud.google.com/go/bigquery/doc.go39
-rw-r--r--vendor/cloud.google.com/go/bigquery/internal/version.go2
-rw-r--r--vendor/cloud.google.com/go/bigquery/iterator.go8
-rw-r--r--vendor/cloud.google.com/go/bigquery/job.go4
-rw-r--r--vendor/cloud.google.com/go/bigquery/load.go28
-rw-r--r--vendor/cloud.google.com/go/bigquery/params.go80
-rw-r--r--vendor/cloud.google.com/go/bigquery/rangevalue.go28
-rw-r--r--vendor/cloud.google.com/go/bigquery/schema.go72
-rw-r--r--vendor/cloud.google.com/go/bigquery/standardsql.go24
-rw-r--r--vendor/cloud.google.com/go/bigquery/storage/apiv1/big_query_read_client.go4
-rw-r--r--vendor/cloud.google.com/go/bigquery/storage/apiv1/big_query_write_client.go4
-rw-r--r--vendor/cloud.google.com/go/bigquery/storage/apiv1/storagepb/annotations.pb.go6
-rw-r--r--vendor/cloud.google.com/go/bigquery/storage/apiv1/storagepb/arrow.pb.go14
-rw-r--r--vendor/cloud.google.com/go/bigquery/storage/apiv1/storagepb/avro.pb.go14
-rw-r--r--vendor/cloud.google.com/go/bigquery/storage/apiv1/storagepb/protobuf.pb.go12
-rw-r--r--vendor/cloud.google.com/go/bigquery/storage/apiv1/storagepb/storage.pb.go58
-rw-r--r--vendor/cloud.google.com/go/bigquery/storage/apiv1/storagepb/stream.pb.go22
-rw-r--r--vendor/cloud.google.com/go/bigquery/storage/apiv1/storagepb/table.pb.go14
-rw-r--r--vendor/cloud.google.com/go/bigquery/value.go69
22 files changed, 481 insertions, 99 deletions
diff --git a/vendor/cloud.google.com/go/bigquery/CHANGES.md b/vendor/cloud.google.com/go/bigquery/CHANGES.md
index bd19d2656..3d7996aee 100644
--- a/vendor/cloud.google.com/go/bigquery/CHANGES.md
+++ b/vendor/cloud.google.com/go/bigquery/CHANGES.md
@@ -3,6 +3,51 @@
+## [1.62.0](https://github.com/googleapis/google-cloud-go/compare/bigquery/v1.61.0...bigquery/v1.62.0) (2024-07-22)
+
+
+### Features
+
+* **bigquery/analyticshub:** Support Direct Table Access Toggle (Egress GA) ([b660d68](https://github.com/googleapis/google-cloud-go/commit/b660d6870658fe6881883785bcebaea0929fec0a))
+* **bigquery/analyticshub:** Support public directory self service for Listings/Exchanges ([#10485](https://github.com/googleapis/google-cloud-go/issues/10485)) ([b660d68](https://github.com/googleapis/google-cloud-go/commit/b660d6870658fe6881883785bcebaea0929fec0a))
+* **bigquery:** Add rounding mode to FieldSchema ([#10328](https://github.com/googleapis/google-cloud-go/issues/10328)) ([1a9e204](https://github.com/googleapis/google-cloud-go/commit/1a9e204d7752c5bfe9edfd7bc7ee36c5b1385783))
+* **bigquery:** Json support on managedwriter/adapt pkg ([#10542](https://github.com/googleapis/google-cloud-go/issues/10542)) ([978d4a1](https://github.com/googleapis/google-cloud-go/commit/978d4a1e47cbd8d4bf567b616381a2f12fac4cab))
+* **bigquery:** Support column name character map in load jobs ([#10425](https://github.com/googleapis/google-cloud-go/issues/10425)) ([b829327](https://github.com/googleapis/google-cloud-go/commit/b82932789af82b5e0799c20a096aab98132b5eb1))
+
+
+### Bug Fixes
+
+* **bigquery/storage/managedwriter:** Faster context failure on send ([#10169](https://github.com/googleapis/google-cloud-go/issues/10169)) ([1fb0e64](https://github.com/googleapis/google-cloud-go/commit/1fb0e6401d584bf8ede60a170b4d82dc211010b8))
+* **bigquery:** Bump google.golang.org/api@v0.187.0 ([8fa9e39](https://github.com/googleapis/google-cloud-go/commit/8fa9e398e512fd8533fd49060371e61b5725a85b))
+* **bigquery:** Bump google.golang.org/grpc@v1.64.1 ([8ecc4e9](https://github.com/googleapis/google-cloud-go/commit/8ecc4e9622e5bbe9b90384d5848ab816027226c5))
+* **bigquery:** Empty slice instead of nil slice for primitive repeated fields ([#7315](https://github.com/googleapis/google-cloud-go/issues/7315)) ([b371210](https://github.com/googleapis/google-cloud-go/commit/b3712100831061fea8605e574d482d7f768ecf14))
+* **bigquery:** Reduce default backoffs ([#10558](https://github.com/googleapis/google-cloud-go/issues/10558)) ([037e9ef](https://github.com/googleapis/google-cloud-go/commit/037e9efa929ad9f8d6f725b28ec8096c3e536b76))
+
+
+### Documentation
+
+* **bigquery/analyticshub:** A comment for message `DataExchange` is changed ([b660d68](https://github.com/googleapis/google-cloud-go/commit/b660d6870658fe6881883785bcebaea0929fec0a))
+* **bigquery/analyticshub:** A comment for message `Listing` is changed ([b660d68](https://github.com/googleapis/google-cloud-go/commit/b660d6870658fe6881883785bcebaea0929fec0a))
+* **bigquery/datatransfer:** Update OAuth links in `CreateTransferConfigRequest` and `UpdateTransferConfigRequest` ([3df3c04](https://github.com/googleapis/google-cloud-go/commit/3df3c04f0dffad3fa2fe272eb7b2c263801b9ada))
+* **bigquery:** Improve Inserter and StructSaver godoc ([#10170](https://github.com/googleapis/google-cloud-go/issues/10170)) ([c1cffb6](https://github.com/googleapis/google-cloud-go/commit/c1cffb63c33ae49f3a705bd0bc7a32cd2b0319bc))
+* **bigquery:** Update description of query preview feature ([#10554](https://github.com/googleapis/google-cloud-go/issues/10554)) ([25c5cbe](https://github.com/googleapis/google-cloud-go/commit/25c5cbe6f31d62fdea1455889ac2e336d1287615))
+
+## [1.61.0](https://github.com/googleapis/google-cloud-go/compare/bigquery/v1.60.0...bigquery/v1.61.0) (2024-04-24)
+
+
+### Features
+
+* **bigquery/storage/managedwriter/adapt:** Add RANGE support to adapt ([#9836](https://github.com/googleapis/google-cloud-go/issues/9836)) ([ae25253](https://github.com/googleapis/google-cloud-go/commit/ae252533375b21dd41a0ea13e85462bbcad291af))
+* **bigquery:** RANGE support for basic data movement ([#9762](https://github.com/googleapis/google-cloud-go/issues/9762)) ([07f0806](https://github.com/googleapis/google-cloud-go/commit/07f0806a945c2cf0fbc431b63d9c8a30ed3a22fd))
+* **bigquery:** RANGE support when reading Arrow format ([#9795](https://github.com/googleapis/google-cloud-go/issues/9795)) ([da245fa](https://github.com/googleapis/google-cloud-go/commit/da245fac5ee335e86c63bfa5f165b0ab84960847))
+* **bigquery:** RANGE type StandardSQLDataType support ([#9754](https://github.com/googleapis/google-cloud-go/issues/9754)) ([33666cf](https://github.com/googleapis/google-cloud-go/commit/33666cfeaefdebc474045894af069ca7172e836b))
+
+
+### Bug Fixes
+
+* **bigquery/datatransfer:** Mark parent/name fields with the REQUIRED field_behavior annotation ([8892943](https://github.com/googleapis/google-cloud-go/commit/8892943b169060f8ba7be227cd65680696c494a0))
+* **bigquery:** Bump x/net to v0.24.0 ([ba31ed5](https://github.com/googleapis/google-cloud-go/commit/ba31ed5fda2c9664f2e1cf972469295e63deb5b4))
+
## [1.60.0](https://github.com/googleapis/google-cloud-go/compare/bigquery/v1.59.1...bigquery/v1.60.0) (2024-03-27)
diff --git a/vendor/cloud.google.com/go/bigquery/arrow.go b/vendor/cloud.google.com/go/bigquery/arrow.go
index 1e8da9a41..825256dec 100644
--- a/vendor/cloud.google.com/go/bigquery/arrow.go
+++ b/vendor/cloud.google.com/go/bigquery/arrow.go
@@ -23,10 +23,10 @@ import (
"math/big"
"cloud.google.com/go/civil"
- "github.com/apache/arrow/go/v14/arrow"
- "github.com/apache/arrow/go/v14/arrow/array"
- "github.com/apache/arrow/go/v14/arrow/ipc"
- "github.com/apache/arrow/go/v14/arrow/memory"
+ "github.com/apache/arrow/go/v15/arrow"
+ "github.com/apache/arrow/go/v15/arrow/array"
+ "github.com/apache/arrow/go/v15/arrow/ipc"
+ "github.com/apache/arrow/go/v15/arrow/memory"
"google.golang.org/api/iterator"
)
@@ -272,6 +272,21 @@ func convertArrowValue(col arrow.Array, i int, ft arrow.DataType, fs *FieldSchem
arr := col.(*array.Struct)
nestedValues := []Value{}
fields := ft.(*arrow.StructType).Fields()
+ if fs.Type == RangeFieldType {
+ rangeFieldSchema := &FieldSchema{
+ Type: fs.RangeElementType.Type,
+ }
+ start, err := convertArrowValue(arr.Field(0), i, fields[0].Type, rangeFieldSchema)
+ if err != nil {
+ return nil, err
+ }
+ end, err := convertArrowValue(arr.Field(1), i, fields[1].Type, rangeFieldSchema)
+ if err != nil {
+ return nil, err
+ }
+ rangeValue := &RangeValue{Start: start, End: end}
+ return Value(rangeValue), nil
+ }
for fIndex, f := range fields {
v, err := convertArrowValue(arr.Field(fIndex), i, f.Type, fs.Schema[fIndex])
if err != nil {
diff --git a/vendor/cloud.google.com/go/bigquery/bigquery.go b/vendor/cloud.google.com/go/bigquery/bigquery.go
index c597679bc..fce568d7a 100644
--- a/vendor/cloud.google.com/go/bigquery/bigquery.go
+++ b/vendor/cloud.google.com/go/bigquery/bigquery.go
@@ -84,7 +84,7 @@ const DetectProjectID = "*detect-project-id*"
// variables. By setting the environment variable QUERY_PREVIEW_ENABLED to the string
// "TRUE", the client will enable preview features, though behavior may still be
// controlled via the bigquery service as well. Currently, the feature(s) in scope
-// include: stateless queries (query execution without corresponding job metadata).
+// include: short mode queries (query execution without corresponding job metadata).
func NewClient(ctx context.Context, projectID string, opts ...option.ClientOption) (*Client, error) {
o := []option.ClientOption{
option.WithScopes(Scope),
@@ -249,8 +249,12 @@ func runWithRetryExplicit(ctx context.Context, call func() error, allowedReasons
var (
defaultRetryReasons = []string{"backendError", "rateLimitExceeded"}
- jobRetryReasons = []string{"backendError", "rateLimitExceeded", "internalError"}
- retry5xxCodes = []int{
+
+ // These reasons are used exclusively for enqueuing jobs (jobs.insert and jobs.query).
+ // Using them for polling may cause unwanted retries until context deadline/cancellation/etc.
+ jobRetryReasons = []string{"backendError", "rateLimitExceeded", "jobRateLimitExceeded", "internalError"}
+
+ retry5xxCodes = []int{
http.StatusInternalServerError,
http.StatusBadGateway,
http.StatusServiceUnavailable,
diff --git a/vendor/cloud.google.com/go/bigquery/doc.go b/vendor/cloud.google.com/go/bigquery/doc.go
index 37d116a31..c3e2819e0 100644
--- a/vendor/cloud.google.com/go/bigquery/doc.go
+++ b/vendor/cloud.google.com/go/bigquery/doc.go
@@ -260,6 +260,21 @@ it as well, and call its Run method.
To upload, first define a type that implements the ValueSaver interface, which has a single method named Save.
Then create an Inserter, and call its Put method with a slice of values.
+ type Item struct {
+ Name string
+ Size float64
+ Count int
+ }
+
+ // Save implements the ValueSaver interface.
+ func (i *Item) Save() (map[string]bigquery.Value, string, error) {
+ return map[string]bigquery.Value{
+ "Name": i.Name,
+ "Size": i.Size,
+ "Count": i.Count,
+ }, "", nil
+ }
+
u := table.Inserter()
// Item implements the ValueSaver interface.
items := []*Item{
@@ -272,15 +287,33 @@ Then create an Inserter, and call its Put method with a slice of values.
}
You can also upload a struct that doesn't implement ValueSaver. Use the StructSaver type
-to specify the schema and insert ID by hand, or just supply the struct or struct pointer
-directly and the schema will be inferred:
+to specify the schema and insert ID by hand:
+
+ type item struct {
+ Name string
+ Num int
+ }
+
+ // Assume schema holds the table's schema.
+ savers := []*bigquery.StructSaver{
+ {Struct: score{Name: "n1", Num: 12}, Schema: schema, InsertID: "id1"},
+ {Struct: score{Name: "n2", Num: 31}, Schema: schema, InsertID: "id2"},
+ {Struct: score{Name: "n3", Num: 7}, Schema: schema, InsertID: "id3"},
+ }
+
+ if err := u.Put(ctx, savers); err != nil {
+ // TODO: Handle error.
+ }
+
+Lastly, but not least, you can just supply the struct or struct pointer directly and the schema will be inferred:
type Item2 struct {
Name string
Size float64
Count int
}
- // Item implements the ValueSaver interface.
+
+ // Item2 doesn't implement ValueSaver interface, so schema will be inferred.
items2 := []*Item2{
{Name: "n1", Size: 32.6, Count: 7},
{Name: "n2", Size: 4, Count: 2},
diff --git a/vendor/cloud.google.com/go/bigquery/internal/version.go b/vendor/cloud.google.com/go/bigquery/internal/version.go
index 5145d8203..eba46c4b9 100644
--- a/vendor/cloud.google.com/go/bigquery/internal/version.go
+++ b/vendor/cloud.google.com/go/bigquery/internal/version.go
@@ -16,4 +16,4 @@
package internal
// Version is the current tagged release of the library.
-const Version = "1.60.0"
+const Version = "1.62.0"
diff --git a/vendor/cloud.google.com/go/bigquery/iterator.go b/vendor/cloud.google.com/go/bigquery/iterator.go
index 9d177d1b8..942be4205 100644
--- a/vendor/cloud.google.com/go/bigquery/iterator.go
+++ b/vendor/cloud.google.com/go/bigquery/iterator.go
@@ -140,8 +140,12 @@ type pageFetcher func(ctx context.Context, _ *rowSource, _ Schema, startIndex ui
// See https://cloud.google.com/bigquery/docs/reference/standard-sql/data-types#numeric-type
// for more on NUMERIC.
//
-// A repeated field corresponds to a slice or array of the element type. A STRUCT
-// type (RECORD or nested schema) corresponds to a nested struct or struct pointer.
+// A repeated field corresponds to a slice or array of the element type. BigQuery translates
+// NULL arrays into an empty array, so we follow that behavior.
+// See https://cloud.google.com/bigquery/docs/reference/standard-sql/data-types#array_nulls
+// for more about NULL and empty arrays.
+//
+// A STRUCT type (RECORD or nested schema) corresponds to a nested struct or struct pointer.
// All calls to Next on the same iterator must use the same struct type.
//
// It is an error to attempt to read a BigQuery NULL value into a struct field,
diff --git a/vendor/cloud.google.com/go/bigquery/job.go b/vendor/cloud.google.com/go/bigquery/job.go
index d1d7f0631..ba69e647f 100644
--- a/vendor/cloud.google.com/go/bigquery/job.go
+++ b/vendor/cloud.google.com/go/bigquery/job.go
@@ -356,8 +356,8 @@ func (j *Job) waitForQuery(ctx context.Context, projectID string) (Schema, uint6
call = call.FormatOptionsUseInt64Timestamp(true)
setClientHeader(call.Header())
backoff := gax.Backoff{
- Initial: 1 * time.Second,
- Multiplier: 2,
+ Initial: 50 * time.Millisecond,
+ Multiplier: 1.3,
Max: 60 * time.Second,
}
var res *bq.GetQueryResultsResponse
diff --git a/vendor/cloud.google.com/go/bigquery/load.go b/vendor/cloud.google.com/go/bigquery/load.go
index 3693719c4..d5c53002b 100644
--- a/vendor/cloud.google.com/go/bigquery/load.go
+++ b/vendor/cloud.google.com/go/bigquery/load.go
@@ -105,6 +105,11 @@ type LoadConfig struct {
// MediaOptions stores options for customizing media upload.
MediaOptions []googleapi.MediaOption
+
+ // Controls the behavior of column naming during a load job.
+ // For more information, see:
+ // https://cloud.google.com/bigquery/docs/reference/rest/v2/Job#columnnamecharactermap
+ ColumnNameCharacterMap ColumnNameCharacterMap
}
func (l *LoadConfig) toBQ() (*bq.JobConfiguration, io.Reader) {
@@ -124,6 +129,7 @@ func (l *LoadConfig) toBQ() (*bq.JobConfiguration, io.Reader) {
HivePartitioningOptions: l.HivePartitioningOptions.toBQ(),
ReferenceFileSchemaUri: l.ReferenceFileSchemaURI,
CreateSession: l.CreateSession,
+ ColumnNameCharacterMap: string(l.ColumnNameCharacterMap),
},
JobTimeoutMs: l.JobTimeout.Milliseconds(),
}
@@ -153,6 +159,7 @@ func bqToLoadConfig(q *bq.JobConfiguration, c *Client) *LoadConfig {
HivePartitioningOptions: bqToHivePartitioningOptions(q.Load.HivePartitioningOptions),
ReferenceFileSchemaURI: q.Load.ReferenceFileSchemaUri,
CreateSession: q.Load.CreateSession,
+ ColumnNameCharacterMap: ColumnNameCharacterMap(q.Load.ColumnNameCharacterMap),
}
if q.JobTimeoutMs > 0 {
lc.JobTimeout = time.Duration(q.JobTimeoutMs) * time.Millisecond
@@ -238,3 +245,24 @@ var (
// StringTargetType indicates the preferred type is STRING when supported.
StringTargetType DecimalTargetType = "STRING"
)
+
+// ColumnNameCharacterMap is used to specific column naming behavior for load jobs.
+type ColumnNameCharacterMap string
+
+var (
+
+ // UnspecifiedColumnNameCharacterMap is the unspecified default value.
+ UnspecifiedColumnNameCharacterMap ColumnNameCharacterMap = "COLUMN_NAME_CHARACTER_MAP_UNSPECIFIED"
+
+ // StrictColumnNameCharacterMap indicates support for flexible column names.
+ // Invalid column names will be rejected.
+ StrictColumnNameCharacterMap ColumnNameCharacterMap = "STRICT"
+
+ // V1ColumnNameCharacterMap indicates support for alphanumeric + underscore characters and names must start with a letter or underscore.
+ // Invalid column names will be normalized.
+ V1ColumnNameCharacterMap ColumnNameCharacterMap = "V1"
+
+ // V2ColumnNameCharacterMap indicates support for flexible column names.
+ // Invalid column names will be normalized.
+ V2ColumnNameCharacterMap ColumnNameCharacterMap = "V2"
+)
diff --git a/vendor/cloud.google.com/go/bigquery/params.go b/vendor/cloud.google.com/go/bigquery/params.go
index cf47e9a23..e256a7c3a 100644
--- a/vendor/cloud.google.com/go/bigquery/params.go
+++ b/vendor/cloud.google.com/go/bigquery/params.go
@@ -40,7 +40,10 @@ var (
validFieldName = regexp.MustCompile("^[a-zA-Z_][a-zA-Z0-9_]{0,127}$")
)
-const nullableTagOption = "nullable"
+const (
+ nullableTagOption = "nullable"
+ jsonTagOption = "json"
+)
func bqTagParser(t reflect.StructTag) (name string, keep bool, other interface{}, err error) {
name, keep, opts, err := fields.ParseStandardTag("bigquery", t)
@@ -51,10 +54,10 @@ func bqTagParser(t reflect.StructTag) (name string, keep bool, other interface{}
return "", false, nil, invalidFieldNameError(name)
}
for _, opt := range opts {
- if opt != nullableTagOption {
+ if opt != nullableTagOption && opt != jsonTagOption {
return "", false, nil, fmt.Errorf(
- "bigquery: invalid tag option %q. The only valid option is %q",
- opt, nullableTagOption)
+ "bigquery: invalid tag option %q. The only valid options are %q and %q",
+ opt, nullableTagOption, jsonTagOption)
}
}
return name, keep, opts, nil
@@ -83,6 +86,7 @@ var (
geographyParamType = &bq.QueryParameterType{Type: "GEOGRAPHY"}
intervalParamType = &bq.QueryParameterType{Type: "INTERVAL"}
jsonParamType = &bq.QueryParameterType{Type: "JSON"}
+ rangeParamType = &bq.QueryParameterType{Type: "RANGE"}
)
var (
@@ -92,6 +96,7 @@ var (
typeOfGoTime = reflect.TypeOf(time.Time{})
typeOfRat = reflect.TypeOf(&big.Rat{})
typeOfIntervalValue = reflect.TypeOf(&IntervalValue{})
+ typeOfRangeValue = reflect.TypeOf(&RangeValue{})
typeOfQueryParameterValue = reflect.TypeOf(&QueryParameterValue{})
)
@@ -312,9 +317,11 @@ func (p QueryParameter) toBQ() (*bq.QueryParameter, error) {
}, nil
}
+var errNilParam = fmt.Errorf("bigquery: nil parameter")
+
func paramType(t reflect.Type, v reflect.Value) (*bq.QueryParameterType, error) {
if t == nil {
- return nil, errors.New("bigquery: nil parameter")
+ return nil, errNilParam
}
switch t {
case typeOfDate, typeOfNullDate:
@@ -341,6 +348,25 @@ func paramType(t reflect.Type, v reflect.Value) (*bq.QueryParameterType, error)
return geographyParamType, nil
case typeOfNullJSON:
return jsonParamType, nil
+ case typeOfRangeValue:
+ iv := v.Interface().(*RangeValue)
+ // In order to autodetect a Range param correctly, at least one of start,end must be populated.
+ // Without it, users must declare typing via using QueryParameterValue.
+ element := iv.Start
+ if element == nil {
+ element = iv.End
+ }
+ if element == nil {
+ return nil, fmt.Errorf("unable to determine range element type from RangeValue without a non-nil start or end value")
+ }
+ elet, err := paramType(reflect.TypeOf(element), reflect.ValueOf(element))
+ if err != nil {
+ return nil, err
+ }
+ return &bq.QueryParameterType{
+ Type: "RANGE",
+ RangeElementType: elet,
+ }, nil
case typeOfQueryParameterValue:
return v.Interface().(*QueryParameterValue).toBQParamType(), nil
}
@@ -407,7 +433,7 @@ func paramType(t reflect.Type, v reflect.Value) (*bq.QueryParameterType, error)
func paramValue(v reflect.Value) (*bq.QueryParameterValue, error) {
res := &bq.QueryParameterValue{}
if !v.IsValid() {
- return res, errors.New("bigquery: nil parameter")
+ return res, errNilParam
}
t := v.Type()
switch t {
@@ -489,6 +515,28 @@ func paramValue(v reflect.Value) (*bq.QueryParameterValue, error) {
case typeOfIntervalValue:
res.Value = IntervalString(v.Interface().(*IntervalValue))
return res, nil
+ case typeOfRangeValue:
+ // RangeValue is a compound type, and we must process the start/end to
+ // fully populate the value.
+ res.RangeValue = &bq.RangeValue{}
+ iv := v.Interface().(*RangeValue)
+ sVal, err := paramValue(reflect.ValueOf(iv.Start))
+ if err != nil {
+ if !errors.Is(err, errNilParam) {
+ return nil, err
+ }
+ } else {
+ res.RangeValue.Start = sVal
+ }
+ eVal, err := paramValue(reflect.ValueOf(iv.End))
+ if err != nil {
+ if !errors.Is(err, errNilParam) {
+ return nil, err
+ }
+ } else {
+ res.RangeValue.End = eVal
+ }
+ return res, nil
case typeOfQueryParameterValue:
return v.Interface().(*QueryParameterValue).toBQParamValue()
}
@@ -589,6 +637,26 @@ func convertParamValue(qval *bq.QueryParameterValue, qtype *bq.QueryParameterTyp
return map[string]interface{}(nil), nil
}
return convertParamStruct(qval.StructValues, qtype.StructTypes)
+ case "RANGE":
+ rv := &RangeValue{}
+ if qval.RangeValue == nil {
+ return rv, nil
+ }
+ if qval.RangeValue.Start != nil {
+ startVal, err := convertParamValue(qval.RangeValue.Start, qtype.RangeElementType)
+ if err != nil {
+ return nil, err
+ }
+ rv.Start = startVal
+ }
+ if qval.RangeValue.End != nil {
+ endVal, err := convertParamValue(qval.RangeValue.End, qtype.RangeElementType)
+ if err != nil {
+ return nil, err
+ }
+ rv.End = endVal
+ }
+ return rv, nil
case "TIMESTAMP":
if isNullScalar(qval) {
return NullTimestamp{Valid: false}, nil
diff --git a/vendor/cloud.google.com/go/bigquery/rangevalue.go b/vendor/cloud.google.com/go/bigquery/rangevalue.go
new file mode 100644
index 000000000..63010ce7e
--- /dev/null
+++ b/vendor/cloud.google.com/go/bigquery/rangevalue.go
@@ -0,0 +1,28 @@
+// Copyright 2024 Google LLC
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package bigquery
+
+// RangeValue represents a continuous RANGE of values of a given element
+// type. The supported element types for RANGE are currently the BigQuery
+// DATE, DATETIME, and TIMESTAMP, types.
+type RangeValue struct {
+ // The start value of the range. A missing value represents an
+ // unbounded start.
+ Start Value `json:"start"`
+
+ // The end value of the range. A missing value represents an
+ // unbounded end.
+ End Value `json:"end"`
+}
diff --git a/vendor/cloud.google.com/go/bigquery/schema.go b/vendor/cloud.google.com/go/bigquery/schema.go
index 17d4ab71d..40b939507 100644
--- a/vendor/cloud.google.com/go/bigquery/schema.go
+++ b/vendor/cloud.google.com/go/bigquery/schema.go
@@ -152,6 +152,11 @@ type FieldSchema struct {
// Information about the range.
// If the type is RANGE, this field is required.
RangeElementType *RangeElementType
+
+ // RoundingMode specifies the rounding mode to be used when storing
+ // values of NUMERIC and BIGNUMERIC type.
+ // If unspecified, default value is RoundHalfAwayFromZero.
+ RoundingMode RoundingMode
}
func (fs *FieldSchema) toBQ() *bq.TableFieldSchema {
@@ -166,6 +171,7 @@ func (fs *FieldSchema) toBQ() *bq.TableFieldSchema {
DefaultValueExpression: fs.DefaultValueExpression,
Collation: string(fs.Collation),
RangeElementType: fs.RangeElementType.toBQ(),
+ RoundingMode: string(fs.RoundingMode),
}
if fs.Repeated {
@@ -253,6 +259,7 @@ func bqToFieldSchema(tfs *bq.TableFieldSchema) *FieldSchema {
DefaultValueExpression: tfs.DefaultValueExpression,
Collation: tfs.Collation,
RangeElementType: bqToRangeElementType(tfs.RangeElementType),
+ RoundingMode: RoundingMode(tfs.RoundingMode),
}
for _, f := range tfs.Fields {
@@ -363,6 +370,7 @@ var typeOfByteSlice = reflect.TypeOf([]byte{})
// TIME civil.Time
// DATETIME civil.DateTime
// NUMERIC *big.Rat
+// JSON map[string]interface{}
//
// The big.Rat type supports numbers of arbitrary size and precision. Values
// will be rounded to 9 digits after the decimal point before being transmitted
@@ -375,6 +383,15 @@ var typeOfByteSlice = reflect.TypeOf([]byte{})
// Due to lack of unique native Go type for GEOGRAPHY, there is no schema
// inference to GEOGRAPHY at this time.
//
+// This package also provides some value types for expressing the corresponding SQL types.
+//
+// INTERVAL *IntervalValue
+// RANGE *RangeValue
+//
+// In the case of RANGE types, a RANGE represents a continuous set of values of a given
+// element type (DATE, DATETIME, or TIMESTAMP). InferSchema does not attempt to determine
+// the element type, as it uses generic Value types to denote the start/end of the range.
+//
// Nullable fields are inferred from the NullXXX types, declared in this package:
//
// STRING NullString
@@ -421,6 +438,21 @@ func InferSchema(st interface{}) (Schema, error) {
return inferSchemaReflectCached(reflect.TypeOf(st))
}
+// RoundingMode represents the rounding mode to be used when storing
+// values of NUMERIC and BIGNUMERIC type.
+type RoundingMode string
+
+const (
+ // RoundHalfAwayFromZero rounds half values away from zero when applying
+ // precision and scale upon writing of NUMERIC and BIGNUMERIC values.
+ // For Scale: 0 1.1, 1.2, 1.3, 1.4 => 1 1.5, 1.6, 1.7, 1.8, 1.9 => 2
+ RoundHalfAwayFromZero RoundingMode = "ROUND_HALF_AWAY_FROM_ZERO"
+ // RoundHalfEven rounds half values to the nearest even value when applying
+ // precision and scale upon writing of NUMERIC and BIGNUMERIC values.
+ // For Scale: 0 1.1, 1.2, 1.3, 1.4 => 1 1.5 => 2 1.6, 1.7, 1.8, 1.9 => 2 2.5 => 2
+ RoundHalfEven RoundingMode = "ROUND_HALF_EVEN"
+)
+
var schemaCache sync.Map
type cacheVal struct {
@@ -469,11 +501,15 @@ func inferStruct(t reflect.Type) (Schema, error) {
}
// inferFieldSchema infers the FieldSchema for a Go type
-func inferFieldSchema(fieldName string, rt reflect.Type, nullable bool) (*FieldSchema, error) {
+func inferFieldSchema(fieldName string, rt reflect.Type, nullable, json bool) (*FieldSchema, error) {
// Only []byte and struct pointers can be tagged nullable.
if nullable && !(rt == typeOfByteSlice || rt.Kind() == reflect.Ptr && rt.Elem().Kind() == reflect.Struct) {
return nil, badNullableError{fieldName, rt}
}
+ // Only structs and struct pointers can be tagged as json.
+ if json && !(rt.Kind() == reflect.Struct || rt.Kind() == reflect.Ptr && rt.Elem().Kind() == reflect.Struct) {
+ return nil, badJSONError{fieldName, rt}
+ }
switch rt {
case typeOfByteSlice:
return &FieldSchema{Required: !nullable, Type: BytesFieldType}, nil
@@ -491,6 +527,12 @@ func inferFieldSchema(fieldName string, rt reflect.Type, nullable bool) (*FieldS
// larger precision of BIGNUMERIC need to manipulate the inferred
// schema.
return &FieldSchema{Required: !nullable, Type: NumericFieldType}, nil
+ case typeOfIntervalValue:
+ return &FieldSchema{Required: !nullable, Type: IntervalFieldType}, nil
+ case typeOfRangeValue:
+ // We can't fully infer the element type of a range without additional
+ // information, and don't set the RangeElementType when inferred.
+ return &FieldSchema{Required: !nullable, Type: RangeFieldType}, nil
}
if ft := nullableFieldType(rt); ft != "" {
return &FieldSchema{Required: false, Type: ft}, nil
@@ -509,7 +551,7 @@ func inferFieldSchema(fieldName string, rt reflect.Type, nullable bool) (*FieldS
// Repeated nullable types are not supported by BigQuery.
return nil, unsupportedFieldTypeError{fieldName, rt}
}
- f, err := inferFieldSchema(fieldName, et, false)
+ f, err := inferFieldSchema(fieldName, et, false, false)
if err != nil {
return nil, err
}
@@ -522,6 +564,10 @@ func inferFieldSchema(fieldName string, rt reflect.Type, nullable bool) (*FieldS
}
fallthrough
case reflect.Struct:
+ if json {
+ return &FieldSchema{Required: !nullable, Type: JSONFieldType}, nil
+ }
+
nested, err := inferStruct(rt)
if err != nil {
return nil, err
@@ -533,6 +579,11 @@ func inferFieldSchema(fieldName string, rt reflect.Type, nullable bool) (*FieldS
return &FieldSchema{Required: !nullable, Type: BooleanFieldType}, nil
case reflect.Float32, reflect.Float64:
return &FieldSchema{Required: !nullable, Type: FloatFieldType}, nil
+ case reflect.Map:
+ if rt.Key().Kind() != reflect.String {
+ return nil, unsupportedFieldTypeError{fieldName, rt}
+ }
+ return &FieldSchema{Required: !nullable, Type: JSONFieldType}, nil
default:
return nil, unsupportedFieldTypeError{fieldName, rt}
}
@@ -546,14 +597,16 @@ func inferFields(rt reflect.Type) (Schema, error) {
return nil, err
}
for _, field := range fields {
- var nullable bool
+ var nullable, json bool
for _, opt := range field.ParsedTag.([]string) {
if opt == nullableTagOption {
nullable = true
- break
+ }
+ if opt == jsonTagOption {
+ json = true
}
}
- f, err := inferFieldSchema(field.Name, field.Type, nullable)
+ f, err := inferFieldSchema(field.Name, field.Type, nullable, json)
if err != nil {
return nil, err
}
@@ -694,6 +747,15 @@ func (e badNullableError) Error() string {
return fmt.Sprintf(`bigquery: field %q of type %s: use "nullable" only for []byte and struct pointers; for all other types, use a NullXXX type`, e.name, e.typ)
}
+type badJSONError struct {
+ name string
+ typ reflect.Type
+}
+
+func (e badJSONError) Error() string {
+ return fmt.Sprintf(`bigquery: field %q of type %s: use "json" only for struct and struct pointers`, e.name, e.typ)
+}
+
type unsupportedFieldTypeError struct {
name string
typ reflect.Type
diff --git a/vendor/cloud.google.com/go/bigquery/standardsql.go b/vendor/cloud.google.com/go/bigquery/standardsql.go
index 7f8ca6e11..da24f4262 100644
--- a/vendor/cloud.google.com/go/bigquery/standardsql.go
+++ b/vendor/cloud.google.com/go/bigquery/standardsql.go
@@ -26,6 +26,8 @@ type StandardSQLDataType struct {
// ArrayElementType indicates the type of an array's elements, when the
// TypeKind is ARRAY.
ArrayElementType *StandardSQLDataType
+ // The type of the range's elements, if TypeKind is RANGE.
+ RangeElementType *StandardSQLDataType
// StructType indicates the struct definition (fields), when the
// TypeKind is STRUCT.
StructType *StandardSQLStructType
@@ -60,6 +62,13 @@ func (ssdt *StandardSQLDataType) toBQ() (*bq.StandardSqlDataType, error) {
}
bqdt.StructType = dt
}
+ if ssdt.RangeElementType != nil {
+ dt, err := ssdt.RangeElementType.toBQ()
+ if err != nil {
+ return nil, err
+ }
+ bqdt.RangeElementType = dt
+ }
return bqdt, nil
}
@@ -77,6 +86,14 @@ func (ssdt StandardSQLDataType) toBQParamType() *bq.QueryParameterType {
}
return &bq.QueryParameterType{Type: "STRUCT", StructTypes: fts}
}
+ if ssdt.RangeElementType != nil {
+ return &bq.QueryParameterType{
+ Type: string(RangeFieldType),
+ RangeElementType: &bq.QueryParameterType{
+ Type: ssdt.RangeElementType.TypeKind,
+ },
+ }
+ }
return &bq.QueryParameterType{Type: ssdt.TypeKind}
}
@@ -102,6 +119,13 @@ func bqToStandardSQLDataType(bqdt *bq.StandardSqlDataType) (*StandardSQLDataType
}
ssdt.StructType = st
}
+ if bqdt.RangeElementType != nil {
+ st, err := bqToStandardSQLDataType(bqdt.RangeElementType)
+ if err != nil {
+ return nil, err
+ }
+ ssdt.RangeElementType = st
+ }
return ssdt, nil
}
diff --git a/vendor/cloud.google.com/go/bigquery/storage/apiv1/big_query_read_client.go b/vendor/cloud.google.com/go/bigquery/storage/apiv1/big_query_read_client.go
index dd7ffecc4..89a066bbd 100644
--- a/vendor/cloud.google.com/go/bigquery/storage/apiv1/big_query_read_client.go
+++ b/vendor/cloud.google.com/go/bigquery/storage/apiv1/big_query_read_client.go
@@ -259,7 +259,9 @@ func (c *bigQueryReadGRPCClient) Connection() *grpc.ClientConn {
func (c *bigQueryReadGRPCClient) setGoogleClientInfo(keyval ...string) {
kv := append([]string{"gl-go", gax.GoVersion}, keyval...)
kv = append(kv, "gapic", getVersionClient(), "gax", gax.Version, "grpc", grpc.Version)
- c.xGoogHeaders = []string{"x-goog-api-client", gax.XGoogHeader(kv...)}
+ c.xGoogHeaders = []string{
+ "x-goog-api-client", gax.XGoogHeader(kv...),
+ }
}
// Close closes the connection to the API service. The user should invoke this when
diff --git a/vendor/cloud.google.com/go/bigquery/storage/apiv1/big_query_write_client.go b/vendor/cloud.google.com/go/bigquery/storage/apiv1/big_query_write_client.go
index cdacb42f6..1dd51e6e7 100644
--- a/vendor/cloud.google.com/go/bigquery/storage/apiv1/big_query_write_client.go
+++ b/vendor/cloud.google.com/go/bigquery/storage/apiv1/big_query_write_client.go
@@ -343,7 +343,9 @@ func (c *bigQueryWriteGRPCClient) Connection() *grpc.ClientConn {
func (c *bigQueryWriteGRPCClient) setGoogleClientInfo(keyval ...string) {
kv := append([]string{"gl-go", gax.GoVersion}, keyval...)
kv = append(kv, "gapic", getVersionClient(), "gax", gax.Version, "grpc", grpc.Version)
- c.xGoogHeaders = []string{"x-goog-api-client", gax.XGoogHeader(kv...)}
+ c.xGoogHeaders = []string{
+ "x-goog-api-client", gax.XGoogHeader(kv...),
+ }
}
// Close closes the connection to the API service. The user should invoke this when
diff --git a/vendor/cloud.google.com/go/bigquery/storage/apiv1/storagepb/annotations.pb.go b/vendor/cloud.google.com/go/bigquery/storage/apiv1/storagepb/annotations.pb.go
index 58b4a66a5..0e295f086 100644
--- a/vendor/cloud.google.com/go/bigquery/storage/apiv1/storagepb/annotations.pb.go
+++ b/vendor/cloud.google.com/go/bigquery/storage/apiv1/storagepb/annotations.pb.go
@@ -1,7 +1,7 @@
// Code generated by protoc-gen-go. DO NOT EDIT.
// versions:
-// protoc-gen-go v1.32.0
-// protoc v4.25.2
+// protoc-gen-go v1.34.2
+// protoc v4.25.3
// source: google/cloud/bigquery/storage/v1/annotations.proto
package storagepb
@@ -81,7 +81,7 @@ var file_google_cloud_bigquery_storage_v1_annotations_proto_rawDesc = []byte{
0x56, 0x31, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33,
}
-var file_google_cloud_bigquery_storage_v1_annotations_proto_goTypes = []interface{}{
+var file_google_cloud_bigquery_storage_v1_annotations_proto_goTypes = []any{
(*descriptorpb.FieldOptions)(nil), // 0: google.protobuf.FieldOptions
}
var file_google_cloud_bigquery_storage_v1_annotations_proto_depIdxs = []int32{
diff --git a/vendor/cloud.google.com/go/bigquery/storage/apiv1/storagepb/arrow.pb.go b/vendor/cloud.google.com/go/bigquery/storage/apiv1/storagepb/arrow.pb.go
index d60ddef96..e9cd4cc7f 100644
--- a/vendor/cloud.google.com/go/bigquery/storage/apiv1/storagepb/arrow.pb.go
+++ b/vendor/cloud.google.com/go/bigquery/storage/apiv1/storagepb/arrow.pb.go
@@ -1,4 +1,4 @@
-// Copyright 2023 Google LLC
+// Copyright 2024 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
@@ -14,8 +14,8 @@
// Code generated by protoc-gen-go. DO NOT EDIT.
// versions:
-// protoc-gen-go v1.32.0
-// protoc v4.25.2
+// protoc-gen-go v1.34.2
+// protoc v4.25.3
// source: google/cloud/bigquery/storage/v1/arrow.proto
package storagepb
@@ -316,7 +316,7 @@ func file_google_cloud_bigquery_storage_v1_arrow_proto_rawDescGZIP() []byte {
var file_google_cloud_bigquery_storage_v1_arrow_proto_enumTypes = make([]protoimpl.EnumInfo, 1)
var file_google_cloud_bigquery_storage_v1_arrow_proto_msgTypes = make([]protoimpl.MessageInfo, 3)
-var file_google_cloud_bigquery_storage_v1_arrow_proto_goTypes = []interface{}{
+var file_google_cloud_bigquery_storage_v1_arrow_proto_goTypes = []any{
(ArrowSerializationOptions_CompressionCodec)(0), // 0: google.cloud.bigquery.storage.v1.ArrowSerializationOptions.CompressionCodec
(*ArrowSchema)(nil), // 1: google.cloud.bigquery.storage.v1.ArrowSchema
(*ArrowRecordBatch)(nil), // 2: google.cloud.bigquery.storage.v1.ArrowRecordBatch
@@ -337,7 +337,7 @@ func file_google_cloud_bigquery_storage_v1_arrow_proto_init() {
return
}
if !protoimpl.UnsafeEnabled {
- file_google_cloud_bigquery_storage_v1_arrow_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} {
+ file_google_cloud_bigquery_storage_v1_arrow_proto_msgTypes[0].Exporter = func(v any, i int) any {
switch v := v.(*ArrowSchema); i {
case 0:
return &v.state
@@ -349,7 +349,7 @@ func file_google_cloud_bigquery_storage_v1_arrow_proto_init() {
return nil
}
}
- file_google_cloud_bigquery_storage_v1_arrow_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} {
+ file_google_cloud_bigquery_storage_v1_arrow_proto_msgTypes[1].Exporter = func(v any, i int) any {
switch v := v.(*ArrowRecordBatch); i {
case 0:
return &v.state
@@ -361,7 +361,7 @@ func file_google_cloud_bigquery_storage_v1_arrow_proto_init() {
return nil
}
}
- file_google_cloud_bigquery_storage_v1_arrow_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} {
+ file_google_cloud_bigquery_storage_v1_arrow_proto_msgTypes[2].Exporter = func(v any, i int) any {
switch v := v.(*ArrowSerializationOptions); i {
case 0:
return &v.state
diff --git a/vendor/cloud.google.com/go/bigquery/storage/apiv1/storagepb/avro.pb.go b/vendor/cloud.google.com/go/bigquery/storage/apiv1/storagepb/avro.pb.go
index e712e1c10..3e0c48d91 100644
--- a/vendor/cloud.google.com/go/bigquery/storage/apiv1/storagepb/avro.pb.go
+++ b/vendor/cloud.google.com/go/bigquery/storage/apiv1/storagepb/avro.pb.go
@@ -1,4 +1,4 @@
-// Copyright 2023 Google LLC
+// Copyright 2024 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
@@ -14,8 +14,8 @@
// Code generated by protoc-gen-go. DO NOT EDIT.
// versions:
-// protoc-gen-go v1.32.0
-// protoc v4.25.2
+// protoc-gen-go v1.34.2
+// protoc v4.25.3
// source: google/cloud/bigquery/storage/v1/avro.proto
package storagepb
@@ -255,7 +255,7 @@ func file_google_cloud_bigquery_storage_v1_avro_proto_rawDescGZIP() []byte {
}
var file_google_cloud_bigquery_storage_v1_avro_proto_msgTypes = make([]protoimpl.MessageInfo, 3)
-var file_google_cloud_bigquery_storage_v1_avro_proto_goTypes = []interface{}{
+var file_google_cloud_bigquery_storage_v1_avro_proto_goTypes = []any{
(*AvroSchema)(nil), // 0: google.cloud.bigquery.storage.v1.AvroSchema
(*AvroRows)(nil), // 1: google.cloud.bigquery.storage.v1.AvroRows
(*AvroSerializationOptions)(nil), // 2: google.cloud.bigquery.storage.v1.AvroSerializationOptions
@@ -274,7 +274,7 @@ func file_google_cloud_bigquery_storage_v1_avro_proto_init() {
return
}
if !protoimpl.UnsafeEnabled {
- file_google_cloud_bigquery_storage_v1_avro_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} {
+ file_google_cloud_bigquery_storage_v1_avro_proto_msgTypes[0].Exporter = func(v any, i int) any {
switch v := v.(*AvroSchema); i {
case 0:
return &v.state
@@ -286,7 +286,7 @@ func file_google_cloud_bigquery_storage_v1_avro_proto_init() {
return nil
}
}
- file_google_cloud_bigquery_storage_v1_avro_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} {
+ file_google_cloud_bigquery_storage_v1_avro_proto_msgTypes[1].Exporter = func(v any, i int) any {
switch v := v.(*AvroRows); i {
case 0:
return &v.state
@@ -298,7 +298,7 @@ func file_google_cloud_bigquery_storage_v1_avro_proto_init() {
return nil
}
}
- file_google_cloud_bigquery_storage_v1_avro_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} {
+ file_google_cloud_bigquery_storage_v1_avro_proto_msgTypes[2].Exporter = func(v any, i int) any {
switch v := v.(*AvroSerializationOptions); i {
case 0:
return &v.state
diff --git a/vendor/cloud.google.com/go/bigquery/storage/apiv1/storagepb/protobuf.pb.go b/vendor/cloud.google.com/go/bigquery/storage/apiv1/storagepb/protobuf.pb.go
index e1286ff3c..1af704532 100644
--- a/vendor/cloud.google.com/go/bigquery/storage/apiv1/storagepb/protobuf.pb.go
+++ b/vendor/cloud.google.com/go/bigquery/storage/apiv1/storagepb/protobuf.pb.go
@@ -1,4 +1,4 @@
-// Copyright 2023 Google LLC
+// Copyright 2024 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
@@ -14,8 +14,8 @@
// Code generated by protoc-gen-go. DO NOT EDIT.
// versions:
-// protoc-gen-go v1.32.0
-// protoc v4.25.2
+// protoc-gen-go v1.34.2
+// protoc v4.25.3
// source: google/cloud/bigquery/storage/v1/protobuf.proto
package storagepb
@@ -191,7 +191,7 @@ func file_google_cloud_bigquery_storage_v1_protobuf_proto_rawDescGZIP() []byte {
}
var file_google_cloud_bigquery_storage_v1_protobuf_proto_msgTypes = make([]protoimpl.MessageInfo, 2)
-var file_google_cloud_bigquery_storage_v1_protobuf_proto_goTypes = []interface{}{
+var file_google_cloud_bigquery_storage_v1_protobuf_proto_goTypes = []any{
(*ProtoSchema)(nil), // 0: google.cloud.bigquery.storage.v1.ProtoSchema
(*ProtoRows)(nil), // 1: google.cloud.bigquery.storage.v1.ProtoRows
(*descriptorpb.DescriptorProto)(nil), // 2: google.protobuf.DescriptorProto
@@ -211,7 +211,7 @@ func file_google_cloud_bigquery_storage_v1_protobuf_proto_init() {
return
}
if !protoimpl.UnsafeEnabled {
- file_google_cloud_bigquery_storage_v1_protobuf_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} {
+ file_google_cloud_bigquery_storage_v1_protobuf_proto_msgTypes[0].Exporter = func(v any, i int) any {
switch v := v.(*ProtoSchema); i {
case 0:
return &v.state
@@ -223,7 +223,7 @@ func file_google_cloud_bigquery_storage_v1_protobuf_proto_init() {
return nil
}
}
- file_google_cloud_bigquery_storage_v1_protobuf_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} {
+ file_google_cloud_bigquery_storage_v1_protobuf_proto_msgTypes[1].Exporter = func(v any, i int) any {
switch v := v.(*ProtoRows); i {
case 0:
return &v.state
diff --git a/vendor/cloud.google.com/go/bigquery/storage/apiv1/storagepb/storage.pb.go b/vendor/cloud.google.com/go/bigquery/storage/apiv1/storagepb/storage.pb.go
index 3b5d82246..f86e638eb 100644
--- a/vendor/cloud.google.com/go/bigquery/storage/apiv1/storagepb/storage.pb.go
+++ b/vendor/cloud.google.com/go/bigquery/storage/apiv1/storagepb/storage.pb.go
@@ -1,4 +1,4 @@
-// Copyright 2023 Google LLC
+// Copyright 2024 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
@@ -14,8 +14,8 @@
// Code generated by protoc-gen-go. DO NOT EDIT.
// versions:
-// protoc-gen-go v1.32.0
-// protoc v4.25.2
+// protoc-gen-go v1.34.2
+// protoc v4.25.3
// source: google/cloud/bigquery/storage/v1/storage.proto
package storagepb
@@ -2439,7 +2439,7 @@ func file_google_cloud_bigquery_storage_v1_storage_proto_rawDescGZIP() []byte {
var file_google_cloud_bigquery_storage_v1_storage_proto_enumTypes = make([]protoimpl.EnumInfo, 3)
var file_google_cloud_bigquery_storage_v1_storage_proto_msgTypes = make([]protoimpl.MessageInfo, 23)
-var file_google_cloud_bigquery_storage_v1_storage_proto_goTypes = []interface{}{
+var file_google_cloud_bigquery_storage_v1_storage_proto_goTypes = []any{
(AppendRowsRequest_MissingValueInterpretation)(0), // 0: google.cloud.bigquery.storage.v1.AppendRowsRequest.MissingValueInterpretation
(StorageError_StorageErrorCode)(0), // 1: google.cloud.bigquery.storage.v1.StorageError.StorageErrorCode
(RowError_RowErrorCode)(0), // 2: google.cloud.bigquery.storage.v1.RowError.RowErrorCode
@@ -2547,7 +2547,7 @@ func file_google_cloud_bigquery_storage_v1_storage_proto_init() {
file_google_cloud_bigquery_storage_v1_stream_proto_init()
file_google_cloud_bigquery_storage_v1_table_proto_init()
if !protoimpl.UnsafeEnabled {
- file_google_cloud_bigquery_storage_v1_storage_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} {
+ file_google_cloud_bigquery_storage_v1_storage_proto_msgTypes[0].Exporter = func(v any, i int) any {
switch v := v.(*CreateReadSessionRequest); i {
case 0:
return &v.state
@@ -2559,7 +2559,7 @@ func file_google_cloud_bigquery_storage_v1_storage_proto_init() {
return nil
}
}
- file_google_cloud_bigquery_storage_v1_storage_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} {
+ file_google_cloud_bigquery_storage_v1_storage_proto_msgTypes[1].Exporter = func(v any, i int) any {
switch v := v.(*ReadRowsRequest); i {
case 0:
return &v.state
@@ -2571,7 +2571,7 @@ func file_google_cloud_bigquery_storage_v1_storage_proto_init() {
return nil
}
}
- file_google_cloud_bigquery_storage_v1_storage_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} {
+ file_google_cloud_bigquery_storage_v1_storage_proto_msgTypes[2].Exporter = func(v any, i int) any {
switch v := v.(*ThrottleState); i {
case 0:
return &v.state
@@ -2583,7 +2583,7 @@ func file_google_cloud_bigquery_storage_v1_storage_proto_init() {
return nil
}
}
- file_google_cloud_bigquery_storage_v1_storage_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} {
+ file_google_cloud_bigquery_storage_v1_storage_proto_msgTypes[3].Exporter = func(v any, i int) any {
switch v := v.(*StreamStats); i {
case 0:
return &v.state
@@ -2595,7 +2595,7 @@ func file_google_cloud_bigquery_storage_v1_storage_proto_init() {
return nil
}
}
- file_google_cloud_bigquery_storage_v1_storage_proto_msgTypes[4].Exporter = func(v interface{}, i int) interface{} {
+ file_google_cloud_bigquery_storage_v1_storage_proto_msgTypes[4].Exporter = func(v any, i int) any {
switch v := v.(*ReadRowsResponse); i {
case 0:
return &v.state
@@ -2607,7 +2607,7 @@ func file_google_cloud_bigquery_storage_v1_storage_proto_init() {
return nil
}
}
- file_google_cloud_bigquery_storage_v1_storage_proto_msgTypes[5].Exporter = func(v interface{}, i int) interface{} {
+ file_google_cloud_bigquery_storage_v1_storage_proto_msgTypes[5].Exporter = func(v any, i int) any {
switch v := v.(*SplitReadStreamRequest); i {
case 0:
return &v.state
@@ -2619,7 +2619,7 @@ func file_google_cloud_bigquery_storage_v1_storage_proto_init() {
return nil
}
}
- file_google_cloud_bigquery_storage_v1_storage_proto_msgTypes[6].Exporter = func(v interface{}, i int) interface{} {
+ file_google_cloud_bigquery_storage_v1_storage_proto_msgTypes[6].Exporter = func(v any, i int) any {
switch v := v.(*SplitReadStreamResponse); i {
case 0:
return &v.state
@@ -2631,7 +2631,7 @@ func file_google_cloud_bigquery_storage_v1_storage_proto_init() {
return nil
}
}
- file_google_cloud_bigquery_storage_v1_storage_proto_msgTypes[7].Exporter = func(v interface{}, i int) interface{} {
+ file_google_cloud_bigquery_storage_v1_storage_proto_msgTypes[7].Exporter = func(v any, i int) any {
switch v := v.(*CreateWriteStreamRequest); i {
case 0:
return &v.state
@@ -2643,7 +2643,7 @@ func file_google_cloud_bigquery_storage_v1_storage_proto_init() {
return nil
}
}
- file_google_cloud_bigquery_storage_v1_storage_proto_msgTypes[8].Exporter = func(v interface{}, i int) interface{} {
+ file_google_cloud_bigquery_storage_v1_storage_proto_msgTypes[8].Exporter = func(v any, i int) any {
switch v := v.(*AppendRowsRequest); i {
case 0:
return &v.state
@@ -2655,7 +2655,7 @@ func file_google_cloud_bigquery_storage_v1_storage_proto_init() {
return nil
}
}
- file_google_cloud_bigquery_storage_v1_storage_proto_msgTypes[9].Exporter = func(v interface{}, i int) interface{} {
+ file_google_cloud_bigquery_storage_v1_storage_proto_msgTypes[9].Exporter = func(v any, i int) any {
switch v := v.(*AppendRowsResponse); i {
case 0:
return &v.state
@@ -2667,7 +2667,7 @@ func file_google_cloud_bigquery_storage_v1_storage_proto_init() {
return nil
}
}
- file_google_cloud_bigquery_storage_v1_storage_proto_msgTypes[10].Exporter = func(v interface{}, i int) interface{} {
+ file_google_cloud_bigquery_storage_v1_storage_proto_msgTypes[10].Exporter = func(v any, i int) any {
switch v := v.(*GetWriteStreamRequest); i {
case 0:
return &v.state
@@ -2679,7 +2679,7 @@ func file_google_cloud_bigquery_storage_v1_storage_proto_init() {
return nil
}
}
- file_google_cloud_bigquery_storage_v1_storage_proto_msgTypes[11].Exporter = func(v interface{}, i int) interface{} {
+ file_google_cloud_bigquery_storage_v1_storage_proto_msgTypes[11].Exporter = func(v any, i int) any {
switch v := v.(*BatchCommitWriteStreamsRequest); i {
case 0:
return &v.state
@@ -2691,7 +2691,7 @@ func file_google_cloud_bigquery_storage_v1_storage_proto_init() {
return nil
}
}
- file_google_cloud_bigquery_storage_v1_storage_proto_msgTypes[12].Exporter = func(v interface{}, i int) interface{} {
+ file_google_cloud_bigquery_storage_v1_storage_proto_msgTypes[12].Exporter = func(v any, i int) any {
switch v := v.(*BatchCommitWriteStreamsResponse); i {
case 0:
return &v.state
@@ -2703,7 +2703,7 @@ func file_google_cloud_bigquery_storage_v1_storage_proto_init() {
return nil
}
}
- file_google_cloud_bigquery_storage_v1_storage_proto_msgTypes[13].Exporter = func(v interface{}, i int) interface{} {
+ file_google_cloud_bigquery_storage_v1_storage_proto_msgTypes[13].Exporter = func(v any, i int) any {
switch v := v.(*FinalizeWriteStreamRequest); i {
case 0:
return &v.state
@@ -2715,7 +2715,7 @@ func file_google_cloud_bigquery_storage_v1_storage_proto_init() {
return nil
}
}
- file_google_cloud_bigquery_storage_v1_storage_proto_msgTypes[14].Exporter = func(v interface{}, i int) interface{} {
+ file_google_cloud_bigquery_storage_v1_storage_proto_msgTypes[14].Exporter = func(v any, i int) any {
switch v := v.(*FinalizeWriteStreamResponse); i {
case 0:
return &v.state
@@ -2727,7 +2727,7 @@ func file_google_cloud_bigquery_storage_v1_storage_proto_init() {
return nil
}
}
- file_google_cloud_bigquery_storage_v1_storage_proto_msgTypes[15].Exporter = func(v interface{}, i int) interface{} {
+ file_google_cloud_bigquery_storage_v1_storage_proto_msgTypes[15].Exporter = func(v any, i int) any {
switch v := v.(*FlushRowsRequest); i {
case 0:
return &v.state
@@ -2739,7 +2739,7 @@ func file_google_cloud_bigquery_storage_v1_storage_proto_init() {
return nil
}
}
- file_google_cloud_bigquery_storage_v1_storage_proto_msgTypes[16].Exporter = func(v interface{}, i int) interface{} {
+ file_google_cloud_bigquery_storage_v1_storage_proto_msgTypes[16].Exporter = func(v any, i int) any {
switch v := v.(*FlushRowsResponse); i {
case 0:
return &v.state
@@ -2751,7 +2751,7 @@ func file_google_cloud_bigquery_storage_v1_storage_proto_init() {
return nil
}
}
- file_google_cloud_bigquery_storage_v1_storage_proto_msgTypes[17].Exporter = func(v interface{}, i int) interface{} {
+ file_google_cloud_bigquery_storage_v1_storage_proto_msgTypes[17].Exporter = func(v any, i int) any {
switch v := v.(*StorageError); i {
case 0:
return &v.state
@@ -2763,7 +2763,7 @@ func file_google_cloud_bigquery_storage_v1_storage_proto_init() {
return nil
}
}
- file_google_cloud_bigquery_storage_v1_storage_proto_msgTypes[18].Exporter = func(v interface{}, i int) interface{} {
+ file_google_cloud_bigquery_storage_v1_storage_proto_msgTypes[18].Exporter = func(v any, i int) any {
switch v := v.(*RowError); i {
case 0:
return &v.state
@@ -2775,7 +2775,7 @@ func file_google_cloud_bigquery_storage_v1_storage_proto_init() {
return nil
}
}
- file_google_cloud_bigquery_storage_v1_storage_proto_msgTypes[19].Exporter = func(v interface{}, i int) interface{} {
+ file_google_cloud_bigquery_storage_v1_storage_proto_msgTypes[19].Exporter = func(v any, i int) any {
switch v := v.(*StreamStats_Progress); i {
case 0:
return &v.state
@@ -2787,7 +2787,7 @@ func file_google_cloud_bigquery_storage_v1_storage_proto_init() {
return nil
}
}
- file_google_cloud_bigquery_storage_v1_storage_proto_msgTypes[20].Exporter = func(v interface{}, i int) interface{} {
+ file_google_cloud_bigquery_storage_v1_storage_proto_msgTypes[20].Exporter = func(v any, i int) any {
switch v := v.(*AppendRowsRequest_ProtoData); i {
case 0:
return &v.state
@@ -2799,7 +2799,7 @@ func file_google_cloud_bigquery_storage_v1_storage_proto_init() {
return nil
}
}
- file_google_cloud_bigquery_storage_v1_storage_proto_msgTypes[22].Exporter = func(v interface{}, i int) interface{} {
+ file_google_cloud_bigquery_storage_v1_storage_proto_msgTypes[22].Exporter = func(v any, i int) any {
switch v := v.(*AppendRowsResponse_AppendResult); i {
case 0:
return &v.state
@@ -2812,16 +2812,16 @@ func file_google_cloud_bigquery_storage_v1_storage_proto_init() {
}
}
}
- file_google_cloud_bigquery_storage_v1_storage_proto_msgTypes[4].OneofWrappers = []interface{}{
+ file_google_cloud_bigquery_storage_v1_storage_proto_msgTypes[4].OneofWrappers = []any{
(*ReadRowsResponse_AvroRows)(nil),
(*ReadRowsResponse_ArrowRecordBatch)(nil),
(*ReadRowsResponse_AvroSchema)(nil),
(*ReadRowsResponse_ArrowSchema)(nil),
}
- file_google_cloud_bigquery_storage_v1_storage_proto_msgTypes[8].OneofWrappers = []interface{}{
+ file_google_cloud_bigquery_storage_v1_storage_proto_msgTypes[8].OneofWrappers = []any{
(*AppendRowsRequest_ProtoRows)(nil),
}
- file_google_cloud_bigquery_storage_v1_storage_proto_msgTypes[9].OneofWrappers = []interface{}{
+ file_google_cloud_bigquery_storage_v1_storage_proto_msgTypes[9].OneofWrappers = []any{
(*AppendRowsResponse_AppendResult_)(nil),
(*AppendRowsResponse_Error)(nil),
}
diff --git a/vendor/cloud.google.com/go/bigquery/storage/apiv1/storagepb/stream.pb.go b/vendor/cloud.google.com/go/bigquery/storage/apiv1/storagepb/stream.pb.go
index 682979f03..5e92d1a0d 100644
--- a/vendor/cloud.google.com/go/bigquery/storage/apiv1/storagepb/stream.pb.go
+++ b/vendor/cloud.google.com/go/bigquery/storage/apiv1/storagepb/stream.pb.go
@@ -1,4 +1,4 @@
-// Copyright 2023 Google LLC
+// Copyright 2024 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
@@ -14,8 +14,8 @@
// Code generated by protoc-gen-go. DO NOT EDIT.
// versions:
-// protoc-gen-go v1.32.0
-// protoc v4.25.2
+// protoc-gen-go v1.34.2
+// protoc v4.25.3
// source: google/cloud/bigquery/storage/v1/stream.proto
package storagepb
@@ -1161,7 +1161,7 @@ func file_google_cloud_bigquery_storage_v1_stream_proto_rawDescGZIP() []byte {
var file_google_cloud_bigquery_storage_v1_stream_proto_enumTypes = make([]protoimpl.EnumInfo, 5)
var file_google_cloud_bigquery_storage_v1_stream_proto_msgTypes = make([]protoimpl.MessageInfo, 5)
-var file_google_cloud_bigquery_storage_v1_stream_proto_goTypes = []interface{}{
+var file_google_cloud_bigquery_storage_v1_stream_proto_goTypes = []any{
(DataFormat)(0), // 0: google.cloud.bigquery.storage.v1.DataFormat
(WriteStreamView)(0), // 1: google.cloud.bigquery.storage.v1.WriteStreamView
(ReadSession_TableReadOptions_ResponseCompressionCodec)(0), // 2: google.cloud.bigquery.storage.v1.ReadSession.TableReadOptions.ResponseCompressionCodec
@@ -1212,7 +1212,7 @@ func file_google_cloud_bigquery_storage_v1_stream_proto_init() {
file_google_cloud_bigquery_storage_v1_avro_proto_init()
file_google_cloud_bigquery_storage_v1_table_proto_init()
if !protoimpl.UnsafeEnabled {
- file_google_cloud_bigquery_storage_v1_stream_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} {
+ file_google_cloud_bigquery_storage_v1_stream_proto_msgTypes[0].Exporter = func(v any, i int) any {
switch v := v.(*ReadSession); i {
case 0:
return &v.state
@@ -1224,7 +1224,7 @@ func file_google_cloud_bigquery_storage_v1_stream_proto_init() {
return nil
}
}
- file_google_cloud_bigquery_storage_v1_stream_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} {
+ file_google_cloud_bigquery_storage_v1_stream_proto_msgTypes[1].Exporter = func(v any, i int) any {
switch v := v.(*ReadStream); i {
case 0:
return &v.state
@@ -1236,7 +1236,7 @@ func file_google_cloud_bigquery_storage_v1_stream_proto_init() {
return nil
}
}
- file_google_cloud_bigquery_storage_v1_stream_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} {
+ file_google_cloud_bigquery_storage_v1_stream_proto_msgTypes[2].Exporter = func(v any, i int) any {
switch v := v.(*WriteStream); i {
case 0:
return &v.state
@@ -1248,7 +1248,7 @@ func file_google_cloud_bigquery_storage_v1_stream_proto_init() {
return nil
}
}
- file_google_cloud_bigquery_storage_v1_stream_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} {
+ file_google_cloud_bigquery_storage_v1_stream_proto_msgTypes[3].Exporter = func(v any, i int) any {
switch v := v.(*ReadSession_TableModifiers); i {
case 0:
return &v.state
@@ -1260,7 +1260,7 @@ func file_google_cloud_bigquery_storage_v1_stream_proto_init() {
return nil
}
}
- file_google_cloud_bigquery_storage_v1_stream_proto_msgTypes[4].Exporter = func(v interface{}, i int) interface{} {
+ file_google_cloud_bigquery_storage_v1_stream_proto_msgTypes[4].Exporter = func(v any, i int) any {
switch v := v.(*ReadSession_TableReadOptions); i {
case 0:
return &v.state
@@ -1273,11 +1273,11 @@ func file_google_cloud_bigquery_storage_v1_stream_proto_init() {
}
}
}
- file_google_cloud_bigquery_storage_v1_stream_proto_msgTypes[0].OneofWrappers = []interface{}{
+ file_google_cloud_bigquery_storage_v1_stream_proto_msgTypes[0].OneofWrappers = []any{
(*ReadSession_AvroSchema)(nil),
(*ReadSession_ArrowSchema)(nil),
}
- file_google_cloud_bigquery_storage_v1_stream_proto_msgTypes[4].OneofWrappers = []interface{}{
+ file_google_cloud_bigquery_storage_v1_stream_proto_msgTypes[4].OneofWrappers = []any{
(*ReadSession_TableReadOptions_ArrowSerializationOptions)(nil),
(*ReadSession_TableReadOptions_AvroSerializationOptions)(nil),
}
diff --git a/vendor/cloud.google.com/go/bigquery/storage/apiv1/storagepb/table.pb.go b/vendor/cloud.google.com/go/bigquery/storage/apiv1/storagepb/table.pb.go
index c7764f870..f26b67506 100644
--- a/vendor/cloud.google.com/go/bigquery/storage/apiv1/storagepb/table.pb.go
+++ b/vendor/cloud.google.com/go/bigquery/storage/apiv1/storagepb/table.pb.go
@@ -1,4 +1,4 @@
-// Copyright 2023 Google LLC
+// Copyright 2024 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
@@ -14,8 +14,8 @@
// Code generated by protoc-gen-go. DO NOT EDIT.
// versions:
-// protoc-gen-go v1.32.0
-// protoc v4.25.2
+// protoc-gen-go v1.34.2
+// protoc v4.25.3
// source: google/cloud/bigquery/storage/v1/table.proto
package storagepb
@@ -586,7 +586,7 @@ func file_google_cloud_bigquery_storage_v1_table_proto_rawDescGZIP() []byte {
var file_google_cloud_bigquery_storage_v1_table_proto_enumTypes = make([]protoimpl.EnumInfo, 2)
var file_google_cloud_bigquery_storage_v1_table_proto_msgTypes = make([]protoimpl.MessageInfo, 3)
-var file_google_cloud_bigquery_storage_v1_table_proto_goTypes = []interface{}{
+var file_google_cloud_bigquery_storage_v1_table_proto_goTypes = []any{
(TableFieldSchema_Type)(0), // 0: google.cloud.bigquery.storage.v1.TableFieldSchema.Type
(TableFieldSchema_Mode)(0), // 1: google.cloud.bigquery.storage.v1.TableFieldSchema.Mode
(*TableSchema)(nil), // 2: google.cloud.bigquery.storage.v1.TableSchema
@@ -613,7 +613,7 @@ func file_google_cloud_bigquery_storage_v1_table_proto_init() {
return
}
if !protoimpl.UnsafeEnabled {
- file_google_cloud_bigquery_storage_v1_table_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} {
+ file_google_cloud_bigquery_storage_v1_table_proto_msgTypes[0].Exporter = func(v any, i int) any {
switch v := v.(*TableSchema); i {
case 0:
return &v.state
@@ -625,7 +625,7 @@ func file_google_cloud_bigquery_storage_v1_table_proto_init() {
return nil
}
}
- file_google_cloud_bigquery_storage_v1_table_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} {
+ file_google_cloud_bigquery_storage_v1_table_proto_msgTypes[1].Exporter = func(v any, i int) any {
switch v := v.(*TableFieldSchema); i {
case 0:
return &v.state
@@ -637,7 +637,7 @@ func file_google_cloud_bigquery_storage_v1_table_proto_init() {
return nil
}
}
- file_google_cloud_bigquery_storage_v1_table_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} {
+ file_google_cloud_bigquery_storage_v1_table_proto_msgTypes[2].Exporter = func(v any, i int) any {
switch v := v.(*TableFieldSchema_FieldElementType); i {
case 0:
return &v.state
diff --git a/vendor/cloud.google.com/go/bigquery/value.go b/vendor/cloud.google.com/go/bigquery/value.go
index 34070d033..5c0165f73 100644
--- a/vendor/cloud.google.com/go/bigquery/value.go
+++ b/vendor/cloud.google.com/go/bigquery/value.go
@@ -82,6 +82,9 @@ func loadMap(m map[string]Value, vals []Value, s Schema) {
}
v = vs
}
+ if f.Repeated && (v == nil || reflect.ValueOf(v).IsNil()) {
+ v = []Value{}
+ }
m[f.Name] = v
}
@@ -433,7 +436,16 @@ func determineSetFunc(ftype reflect.Type, stype FieldType) setFunc {
return setNull(v, x, func() interface{} { return x.(*big.Rat) })
}
}
+
+ case RangeFieldType:
+ if ftype == typeOfRangeValue {
+ return func(v reflect.Value, x interface{}) error {
+ return setNull(v, x, func() interface{} { return x.(*RangeValue) })
+ }
+ }
+
}
+
return nil
}
@@ -765,6 +777,8 @@ func toUploadValueReflect(v reflect.Value, fs *FieldSchema) interface{} {
return formatUploadValue(v, fs, func(v reflect.Value) string {
return IntervalString(v.Interface().(*IntervalValue))
})
+ case RangeFieldType:
+ return v.Interface()
default:
if !fs.Repeated || v.Len() > 0 {
return v.Interface()
@@ -879,7 +893,17 @@ func convertRow(r *bq.TableRow, schema Schema) ([]Value, error) {
var values []Value
for i, cell := range r.F {
fs := schema[i]
- v, err := convertValue(cell.V, fs.Type, fs.Schema)
+ var v Value
+ var err error
+ if fs.Type == RangeFieldType {
+ // interception range conversion here, as we don't propagate range element type more deeply.
+ if fs.RangeElementType == nil {
+ return nil, errors.New("bigquery: incomplete range schema for conversion")
+ }
+ v, err = convertRangeValue(cell.V.(string), fs.RangeElementType.Type)
+ } else {
+ v, err = convertValue(cell.V, fs.Type, fs.Schema)
+ }
if err != nil {
return nil, err
}
@@ -991,3 +1015,46 @@ func convertBasicType(val string, typ FieldType) (Value, error) {
return nil, fmt.Errorf("unrecognized type: %s", typ)
}
}
+
+// how BQ declares an unbounded RANGE.
+var unboundedRangeSentinel = "UNBOUNDED"
+
+// convertRangeValue aids in parsing the compound RANGE api data representation.
+// The format for a range value is: "[startval, endval)"
+func convertRangeValue(val string, elementType FieldType) (Value, error) {
+ supported := false
+ for _, t := range []FieldType{DateFieldType, DateTimeFieldType, TimestampFieldType} {
+ if elementType == t {
+ supported = true
+ break
+ }
+ }
+ if !supported {
+ return nil, fmt.Errorf("bigquery: invalid RANGE element type %q", elementType)
+ }
+ if !strings.HasPrefix(val, "[") || !strings.HasSuffix(val, ")") {
+ return nil, fmt.Errorf("bigquery: invalid RANGE value %q", val)
+ }
+ // trim the leading/trailing characters
+ val = val[1 : len(val)-1]
+ parts := strings.Split(val, ", ")
+ if len(parts) != 2 {
+ return nil, fmt.Errorf("bigquery: invalid RANGE value %q", val)
+ }
+ rv := &RangeValue{}
+ if parts[0] != unboundedRangeSentinel {
+ sv, err := convertBasicType(parts[0], elementType)
+ if err != nil {
+ return nil, fmt.Errorf("bigquery: invalid RANGE start value %q", parts[0])
+ }
+ rv.Start = sv
+ }
+ if parts[1] != unboundedRangeSentinel {
+ ev, err := convertBasicType(parts[1], elementType)
+ if err != nil {
+ return nil, fmt.Errorf("bigquery: invalid RANGE end value %q", parts[1])
+ }
+ rv.End = ev
+ }
+ return rv, nil
+}