aboutsummaryrefslogtreecommitdiffstats
path: root/vendor/cloud.google.com
diff options
context:
space:
mode:
authorAleksandr Nogikh <nogikh@google.com>2025-01-02 11:58:29 +0100
committerAleksandr Nogikh <nogikh@google.com>2025-01-22 13:17:53 +0000
commit7512e6e7738143bd302d9b20cb1fd0d1d7af9643 (patch)
tree67988d580d111bacbd009acfc0057f89aafa6522 /vendor/cloud.google.com
parent44f2ad31190603135f4ac758273f26111ca6003c (diff)
vendor: fetch the dependencies
Diffstat (limited to 'vendor/cloud.google.com')
-rw-r--r--vendor/cloud.google.com/go/spanner/admin/database/apiv1/auxiliary.go636
-rw-r--r--vendor/cloud.google.com/go/spanner/admin/database/apiv1/backup.go61
-rw-r--r--vendor/cloud.google.com/go/spanner/admin/database/apiv1/database.go120
-rw-r--r--vendor/cloud.google.com/go/spanner/admin/database/apiv1/database_admin_client.go4194
-rw-r--r--vendor/cloud.google.com/go/spanner/admin/database/apiv1/databasepb/backup.pb.go2446
-rw-r--r--vendor/cloud.google.com/go/spanner/admin/database/apiv1/databasepb/backup_schedule.pb.go1080
-rw-r--r--vendor/cloud.google.com/go/spanner/admin/database/apiv1/databasepb/common.pb.go567
-rw-r--r--vendor/cloud.google.com/go/spanner/admin/database/apiv1/databasepb/spanner_database_admin.pb.go4807
-rw-r--r--vendor/cloud.google.com/go/spanner/admin/database/apiv1/doc.go128
-rw-r--r--vendor/cloud.google.com/go/spanner/admin/database/apiv1/init.go39
-rw-r--r--vendor/cloud.google.com/go/spanner/admin/database/apiv1/path_funcs.go49
-rw-r--r--vendor/cloud.google.com/go/spanner/admin/database/apiv1/version.go23
-rw-r--r--vendor/cloud.google.com/go/spanner/admin/instance/apiv1/auxiliary.go664
-rw-r--r--vendor/cloud.google.com/go/spanner/admin/instance/apiv1/doc.go125
-rw-r--r--vendor/cloud.google.com/go/spanner/admin/instance/apiv1/init.go39
-rw-r--r--vendor/cloud.google.com/go/spanner/admin/instance/apiv1/instance_admin_client.go3660
-rw-r--r--vendor/cloud.google.com/go/spanner/admin/instance/apiv1/instancepb/common.pb.go279
-rw-r--r--vendor/cloud.google.com/go/spanner/admin/instance/apiv1/instancepb/spanner_instance_admin.pb.go6928
-rw-r--r--vendor/cloud.google.com/go/spanner/admin/instance/apiv1/path_funcs.go61
-rw-r--r--vendor/cloud.google.com/go/spanner/admin/instance/apiv1/version.go23
-rw-r--r--vendor/cloud.google.com/go/spanner/spansql/fuzz.go29
-rw-r--r--vendor/cloud.google.com/go/spanner/spansql/keywords.go322
-rw-r--r--vendor/cloud.google.com/go/spanner/spansql/parser.go4696
-rw-r--r--vendor/cloud.google.com/go/spanner/spansql/sql.go1183
-rw-r--r--vendor/cloud.google.com/go/spanner/spansql/types.go1394
25 files changed, 33553 insertions, 0 deletions
diff --git a/vendor/cloud.google.com/go/spanner/admin/database/apiv1/auxiliary.go b/vendor/cloud.google.com/go/spanner/admin/database/apiv1/auxiliary.go
new file mode 100644
index 000000000..1c8385f86
--- /dev/null
+++ b/vendor/cloud.google.com/go/spanner/admin/database/apiv1/auxiliary.go
@@ -0,0 +1,636 @@
+// Copyright 2024 Google LLC
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Code generated by protoc-gen-go_gapic. DO NOT EDIT.
+
+package database
+
+import (
+ "context"
+ "time"
+
+ "cloud.google.com/go/longrunning"
+ longrunningpb "cloud.google.com/go/longrunning/autogen/longrunningpb"
+ databasepb "cloud.google.com/go/spanner/admin/database/apiv1/databasepb"
+ gax "github.com/googleapis/gax-go/v2"
+ "google.golang.org/api/iterator"
+)
+
+// CopyBackupOperation manages a long-running operation from CopyBackup.
+type CopyBackupOperation struct {
+ lro *longrunning.Operation
+ pollPath string
+}
+
+// Wait blocks until the long-running operation is completed, returning the response and any errors encountered.
+//
+// See documentation of Poll for error-handling information.
+func (op *CopyBackupOperation) Wait(ctx context.Context, opts ...gax.CallOption) (*databasepb.Backup, error) {
+ opts = append([]gax.CallOption{gax.WithPath(op.pollPath)}, opts...)
+ var resp databasepb.Backup
+ if err := op.lro.WaitWithInterval(ctx, &resp, time.Minute, opts...); err != nil {
+ return nil, err
+ }
+ return &resp, nil
+}
+
+// Poll fetches the latest state of the long-running operation.
+//
+// Poll also fetches the latest metadata, which can be retrieved by Metadata.
+//
+// If Poll fails, the error is returned and op is unmodified. If Poll succeeds and
+// the operation has completed with failure, the error is returned and op.Done will return true.
+// If Poll succeeds and the operation has completed successfully,
+// op.Done will return true, and the response of the operation is returned.
+// If Poll succeeds and the operation has not completed, the returned response and error are both nil.
+func (op *CopyBackupOperation) Poll(ctx context.Context, opts ...gax.CallOption) (*databasepb.Backup, error) {
+ opts = append([]gax.CallOption{gax.WithPath(op.pollPath)}, opts...)
+ var resp databasepb.Backup
+ if err := op.lro.Poll(ctx, &resp, opts...); err != nil {
+ return nil, err
+ }
+ if !op.Done() {
+ return nil, nil
+ }
+ return &resp, nil
+}
+
+// Metadata returns metadata associated with the long-running operation.
+// Metadata itself does not contact the server, but Poll does.
+// To get the latest metadata, call this method after a successful call to Poll.
+// If the metadata is not available, the returned metadata and error are both nil.
+func (op *CopyBackupOperation) Metadata() (*databasepb.CopyBackupMetadata, error) {
+ var meta databasepb.CopyBackupMetadata
+ if err := op.lro.Metadata(&meta); err == longrunning.ErrNoMetadata {
+ return nil, nil
+ } else if err != nil {
+ return nil, err
+ }
+ return &meta, nil
+}
+
+// Done reports whether the long-running operation has completed.
+func (op *CopyBackupOperation) Done() bool {
+ return op.lro.Done()
+}
+
+// Name returns the name of the long-running operation.
+// The name is assigned by the server and is unique within the service from which the operation is created.
+func (op *CopyBackupOperation) Name() string {
+ return op.lro.Name()
+}
+
+// CreateBackupOperation manages a long-running operation from CreateBackup.
+type CreateBackupOperation struct {
+ lro *longrunning.Operation
+ pollPath string
+}
+
+// Wait blocks until the long-running operation is completed, returning the response and any errors encountered.
+//
+// See documentation of Poll for error-handling information.
+func (op *CreateBackupOperation) Wait(ctx context.Context, opts ...gax.CallOption) (*databasepb.Backup, error) {
+ opts = append([]gax.CallOption{gax.WithPath(op.pollPath)}, opts...)
+ var resp databasepb.Backup
+ if err := op.lro.WaitWithInterval(ctx, &resp, time.Minute, opts...); err != nil {
+ return nil, err
+ }
+ return &resp, nil
+}
+
+// Poll fetches the latest state of the long-running operation.
+//
+// Poll also fetches the latest metadata, which can be retrieved by Metadata.
+//
+// If Poll fails, the error is returned and op is unmodified. If Poll succeeds and
+// the operation has completed with failure, the error is returned and op.Done will return true.
+// If Poll succeeds and the operation has completed successfully,
+// op.Done will return true, and the response of the operation is returned.
+// If Poll succeeds and the operation has not completed, the returned response and error are both nil.
+func (op *CreateBackupOperation) Poll(ctx context.Context, opts ...gax.CallOption) (*databasepb.Backup, error) {
+ opts = append([]gax.CallOption{gax.WithPath(op.pollPath)}, opts...)
+ var resp databasepb.Backup
+ if err := op.lro.Poll(ctx, &resp, opts...); err != nil {
+ return nil, err
+ }
+ if !op.Done() {
+ return nil, nil
+ }
+ return &resp, nil
+}
+
+// Metadata returns metadata associated with the long-running operation.
+// Metadata itself does not contact the server, but Poll does.
+// To get the latest metadata, call this method after a successful call to Poll.
+// If the metadata is not available, the returned metadata and error are both nil.
+func (op *CreateBackupOperation) Metadata() (*databasepb.CreateBackupMetadata, error) {
+ var meta databasepb.CreateBackupMetadata
+ if err := op.lro.Metadata(&meta); err == longrunning.ErrNoMetadata {
+ return nil, nil
+ } else if err != nil {
+ return nil, err
+ }
+ return &meta, nil
+}
+
+// Done reports whether the long-running operation has completed.
+func (op *CreateBackupOperation) Done() bool {
+ return op.lro.Done()
+}
+
+// Name returns the name of the long-running operation.
+// The name is assigned by the server and is unique within the service from which the operation is created.
+func (op *CreateBackupOperation) Name() string {
+ return op.lro.Name()
+}
+
+// CreateDatabaseOperation manages a long-running operation from CreateDatabase.
+type CreateDatabaseOperation struct {
+ lro *longrunning.Operation
+ pollPath string
+}
+
+// Wait blocks until the long-running operation is completed, returning the response and any errors encountered.
+//
+// See documentation of Poll for error-handling information.
+func (op *CreateDatabaseOperation) Wait(ctx context.Context, opts ...gax.CallOption) (*databasepb.Database, error) {
+ opts = append([]gax.CallOption{gax.WithPath(op.pollPath)}, opts...)
+ var resp databasepb.Database
+ if err := op.lro.WaitWithInterval(ctx, &resp, time.Minute, opts...); err != nil {
+ return nil, err
+ }
+ return &resp, nil
+}
+
+// Poll fetches the latest state of the long-running operation.
+//
+// Poll also fetches the latest metadata, which can be retrieved by Metadata.
+//
+// If Poll fails, the error is returned and op is unmodified. If Poll succeeds and
+// the operation has completed with failure, the error is returned and op.Done will return true.
+// If Poll succeeds and the operation has completed successfully,
+// op.Done will return true, and the response of the operation is returned.
+// If Poll succeeds and the operation has not completed, the returned response and error are both nil.
+func (op *CreateDatabaseOperation) Poll(ctx context.Context, opts ...gax.CallOption) (*databasepb.Database, error) {
+ opts = append([]gax.CallOption{gax.WithPath(op.pollPath)}, opts...)
+ var resp databasepb.Database
+ if err := op.lro.Poll(ctx, &resp, opts...); err != nil {
+ return nil, err
+ }
+ if !op.Done() {
+ return nil, nil
+ }
+ return &resp, nil
+}
+
+// Metadata returns metadata associated with the long-running operation.
+// Metadata itself does not contact the server, but Poll does.
+// To get the latest metadata, call this method after a successful call to Poll.
+// If the metadata is not available, the returned metadata and error are both nil.
+func (op *CreateDatabaseOperation) Metadata() (*databasepb.CreateDatabaseMetadata, error) {
+ var meta databasepb.CreateDatabaseMetadata
+ if err := op.lro.Metadata(&meta); err == longrunning.ErrNoMetadata {
+ return nil, nil
+ } else if err != nil {
+ return nil, err
+ }
+ return &meta, nil
+}
+
+// Done reports whether the long-running operation has completed.
+func (op *CreateDatabaseOperation) Done() bool {
+ return op.lro.Done()
+}
+
+// Name returns the name of the long-running operation.
+// The name is assigned by the server and is unique within the service from which the operation is created.
+func (op *CreateDatabaseOperation) Name() string {
+ return op.lro.Name()
+}
+
+// RestoreDatabaseOperation manages a long-running operation from RestoreDatabase.
+type RestoreDatabaseOperation struct {
+ lro *longrunning.Operation
+ pollPath string
+}
+
+// Wait blocks until the long-running operation is completed, returning the response and any errors encountered.
+//
+// See documentation of Poll for error-handling information.
+func (op *RestoreDatabaseOperation) Wait(ctx context.Context, opts ...gax.CallOption) (*databasepb.Database, error) {
+ opts = append([]gax.CallOption{gax.WithPath(op.pollPath)}, opts...)
+ var resp databasepb.Database
+ if err := op.lro.WaitWithInterval(ctx, &resp, time.Minute, opts...); err != nil {
+ return nil, err
+ }
+ return &resp, nil
+}
+
+// Poll fetches the latest state of the long-running operation.
+//
+// Poll also fetches the latest metadata, which can be retrieved by Metadata.
+//
+// If Poll fails, the error is returned and op is unmodified. If Poll succeeds and
+// the operation has completed with failure, the error is returned and op.Done will return true.
+// If Poll succeeds and the operation has completed successfully,
+// op.Done will return true, and the response of the operation is returned.
+// If Poll succeeds and the operation has not completed, the returned response and error are both nil.
+func (op *RestoreDatabaseOperation) Poll(ctx context.Context, opts ...gax.CallOption) (*databasepb.Database, error) {
+ opts = append([]gax.CallOption{gax.WithPath(op.pollPath)}, opts...)
+ var resp databasepb.Database
+ if err := op.lro.Poll(ctx, &resp, opts...); err != nil {
+ return nil, err
+ }
+ if !op.Done() {
+ return nil, nil
+ }
+ return &resp, nil
+}
+
+// Metadata returns metadata associated with the long-running operation.
+// Metadata itself does not contact the server, but Poll does.
+// To get the latest metadata, call this method after a successful call to Poll.
+// If the metadata is not available, the returned metadata and error are both nil.
+func (op *RestoreDatabaseOperation) Metadata() (*databasepb.RestoreDatabaseMetadata, error) {
+ var meta databasepb.RestoreDatabaseMetadata
+ if err := op.lro.Metadata(&meta); err == longrunning.ErrNoMetadata {
+ return nil, nil
+ } else if err != nil {
+ return nil, err
+ }
+ return &meta, nil
+}
+
+// Done reports whether the long-running operation has completed.
+func (op *RestoreDatabaseOperation) Done() bool {
+ return op.lro.Done()
+}
+
+// Name returns the name of the long-running operation.
+// The name is assigned by the server and is unique within the service from which the operation is created.
+func (op *RestoreDatabaseOperation) Name() string {
+ return op.lro.Name()
+}
+
+// UpdateDatabaseDdlOperation manages a long-running operation from UpdateDatabaseDdl.
+type UpdateDatabaseDdlOperation struct {
+ lro *longrunning.Operation
+ pollPath string
+}
+
+// Wait blocks until the long-running operation is completed, returning the response and any errors encountered.
+//
+// See documentation of Poll for error-handling information.
+func (op *UpdateDatabaseDdlOperation) Wait(ctx context.Context, opts ...gax.CallOption) error {
+ opts = append([]gax.CallOption{gax.WithPath(op.pollPath)}, opts...)
+ return op.lro.WaitWithInterval(ctx, nil, time.Minute, opts...)
+}
+
+// Poll fetches the latest state of the long-running operation.
+//
+// Poll also fetches the latest metadata, which can be retrieved by Metadata.
+//
+// If Poll fails, the error is returned and op is unmodified. If Poll succeeds and
+// the operation has completed with failure, the error is returned and op.Done will return true.
+// If Poll succeeds and the operation has completed successfully,
+// op.Done will return true, and the response of the operation is returned.
+// If Poll succeeds and the operation has not completed, the returned response and error are both nil.
+func (op *UpdateDatabaseDdlOperation) Poll(ctx context.Context, opts ...gax.CallOption) error {
+ opts = append([]gax.CallOption{gax.WithPath(op.pollPath)}, opts...)
+ return op.lro.Poll(ctx, nil, opts...)
+}
+
+// Metadata returns metadata associated with the long-running operation.
+// Metadata itself does not contact the server, but Poll does.
+// To get the latest metadata, call this method after a successful call to Poll.
+// If the metadata is not available, the returned metadata and error are both nil.
+func (op *UpdateDatabaseDdlOperation) Metadata() (*databasepb.UpdateDatabaseDdlMetadata, error) {
+ var meta databasepb.UpdateDatabaseDdlMetadata
+ if err := op.lro.Metadata(&meta); err == longrunning.ErrNoMetadata {
+ return nil, nil
+ } else if err != nil {
+ return nil, err
+ }
+ return &meta, nil
+}
+
+// Done reports whether the long-running operation has completed.
+func (op *UpdateDatabaseDdlOperation) Done() bool {
+ return op.lro.Done()
+}
+
+// Name returns the name of the long-running operation.
+// The name is assigned by the server and is unique within the service from which the operation is created.
+func (op *UpdateDatabaseDdlOperation) Name() string {
+ return op.lro.Name()
+}
+
+// UpdateDatabaseOperation manages a long-running operation from UpdateDatabase.
+type UpdateDatabaseOperation struct {
+ lro *longrunning.Operation
+ pollPath string
+}
+
+// Wait blocks until the long-running operation is completed, returning the response and any errors encountered.
+//
+// See documentation of Poll for error-handling information.
+func (op *UpdateDatabaseOperation) Wait(ctx context.Context, opts ...gax.CallOption) (*databasepb.Database, error) {
+ opts = append([]gax.CallOption{gax.WithPath(op.pollPath)}, opts...)
+ var resp databasepb.Database
+ if err := op.lro.WaitWithInterval(ctx, &resp, time.Minute, opts...); err != nil {
+ return nil, err
+ }
+ return &resp, nil
+}
+
+// Poll fetches the latest state of the long-running operation.
+//
+// Poll also fetches the latest metadata, which can be retrieved by Metadata.
+//
+// If Poll fails, the error is returned and op is unmodified. If Poll succeeds and
+// the operation has completed with failure, the error is returned and op.Done will return true.
+// If Poll succeeds and the operation has completed successfully,
+// op.Done will return true, and the response of the operation is returned.
+// If Poll succeeds and the operation has not completed, the returned response and error are both nil.
+func (op *UpdateDatabaseOperation) Poll(ctx context.Context, opts ...gax.CallOption) (*databasepb.Database, error) {
+ opts = append([]gax.CallOption{gax.WithPath(op.pollPath)}, opts...)
+ var resp databasepb.Database
+ if err := op.lro.Poll(ctx, &resp, opts...); err != nil {
+ return nil, err
+ }
+ if !op.Done() {
+ return nil, nil
+ }
+ return &resp, nil
+}
+
+// Metadata returns metadata associated with the long-running operation.
+// Metadata itself does not contact the server, but Poll does.
+// To get the latest metadata, call this method after a successful call to Poll.
+// If the metadata is not available, the returned metadata and error are both nil.
+func (op *UpdateDatabaseOperation) Metadata() (*databasepb.UpdateDatabaseMetadata, error) {
+ var meta databasepb.UpdateDatabaseMetadata
+ if err := op.lro.Metadata(&meta); err == longrunning.ErrNoMetadata {
+ return nil, nil
+ } else if err != nil {
+ return nil, err
+ }
+ return &meta, nil
+}
+
+// Done reports whether the long-running operation has completed.
+func (op *UpdateDatabaseOperation) Done() bool {
+ return op.lro.Done()
+}
+
+// Name returns the name of the long-running operation.
+// The name is assigned by the server and is unique within the service from which the operation is created.
+func (op *UpdateDatabaseOperation) Name() string {
+ return op.lro.Name()
+}
+
+// BackupIterator manages a stream of *databasepb.Backup.
+type BackupIterator struct {
+ items []*databasepb.Backup
+ pageInfo *iterator.PageInfo
+ nextFunc func() error
+
+ // Response is the raw response for the current page.
+ // It must be cast to the RPC response type.
+ // Calling Next() or InternalFetch() updates this value.
+ Response interface{}
+
+ // InternalFetch is for use by the Google Cloud Libraries only.
+ // It is not part of the stable interface of this package.
+ //
+ // InternalFetch returns results from a single call to the underlying RPC.
+ // The number of results is no greater than pageSize.
+ // If there are no more results, nextPageToken is empty and err is nil.
+ InternalFetch func(pageSize int, pageToken string) (results []*databasepb.Backup, nextPageToken string, err error)
+}
+
+// PageInfo supports pagination. See the google.golang.org/api/iterator package for details.
+func (it *BackupIterator) PageInfo() *iterator.PageInfo {
+ return it.pageInfo
+}
+
+// Next returns the next result. Its second return value is iterator.Done if there are no more
+// results. Once Next returns Done, all subsequent calls will return Done.
+func (it *BackupIterator) Next() (*databasepb.Backup, error) {
+ var item *databasepb.Backup
+ if err := it.nextFunc(); err != nil {
+ return item, err
+ }
+ item = it.items[0]
+ it.items = it.items[1:]
+ return item, nil
+}
+
+func (it *BackupIterator) bufLen() int {
+ return len(it.items)
+}
+
+func (it *BackupIterator) takeBuf() interface{} {
+ b := it.items
+ it.items = nil
+ return b
+}
+
+// BackupScheduleIterator manages a stream of *databasepb.BackupSchedule.
+type BackupScheduleIterator struct {
+ items []*databasepb.BackupSchedule
+ pageInfo *iterator.PageInfo
+ nextFunc func() error
+
+ // Response is the raw response for the current page.
+ // It must be cast to the RPC response type.
+ // Calling Next() or InternalFetch() updates this value.
+ Response interface{}
+
+ // InternalFetch is for use by the Google Cloud Libraries only.
+ // It is not part of the stable interface of this package.
+ //
+ // InternalFetch returns results from a single call to the underlying RPC.
+ // The number of results is no greater than pageSize.
+ // If there are no more results, nextPageToken is empty and err is nil.
+ InternalFetch func(pageSize int, pageToken string) (results []*databasepb.BackupSchedule, nextPageToken string, err error)
+}
+
+// PageInfo supports pagination. See the google.golang.org/api/iterator package for details.
+func (it *BackupScheduleIterator) PageInfo() *iterator.PageInfo {
+ return it.pageInfo
+}
+
+// Next returns the next result. Its second return value is iterator.Done if there are no more
+// results. Once Next returns Done, all subsequent calls will return Done.
+func (it *BackupScheduleIterator) Next() (*databasepb.BackupSchedule, error) {
+ var item *databasepb.BackupSchedule
+ if err := it.nextFunc(); err != nil {
+ return item, err
+ }
+ item = it.items[0]
+ it.items = it.items[1:]
+ return item, nil
+}
+
+func (it *BackupScheduleIterator) bufLen() int {
+ return len(it.items)
+}
+
+func (it *BackupScheduleIterator) takeBuf() interface{} {
+ b := it.items
+ it.items = nil
+ return b
+}
+
+// DatabaseIterator manages a stream of *databasepb.Database.
+type DatabaseIterator struct {
+ items []*databasepb.Database
+ pageInfo *iterator.PageInfo
+ nextFunc func() error
+
+ // Response is the raw response for the current page.
+ // It must be cast to the RPC response type.
+ // Calling Next() or InternalFetch() updates this value.
+ Response interface{}
+
+ // InternalFetch is for use by the Google Cloud Libraries only.
+ // It is not part of the stable interface of this package.
+ //
+ // InternalFetch returns results from a single call to the underlying RPC.
+ // The number of results is no greater than pageSize.
+ // If there are no more results, nextPageToken is empty and err is nil.
+ InternalFetch func(pageSize int, pageToken string) (results []*databasepb.Database, nextPageToken string, err error)
+}
+
+// PageInfo supports pagination. See the google.golang.org/api/iterator package for details.
+func (it *DatabaseIterator) PageInfo() *iterator.PageInfo {
+ return it.pageInfo
+}
+
+// Next returns the next result. Its second return value is iterator.Done if there are no more
+// results. Once Next returns Done, all subsequent calls will return Done.
+func (it *DatabaseIterator) Next() (*databasepb.Database, error) {
+ var item *databasepb.Database
+ if err := it.nextFunc(); err != nil {
+ return item, err
+ }
+ item = it.items[0]
+ it.items = it.items[1:]
+ return item, nil
+}
+
+func (it *DatabaseIterator) bufLen() int {
+ return len(it.items)
+}
+
+func (it *DatabaseIterator) takeBuf() interface{} {
+ b := it.items
+ it.items = nil
+ return b
+}
+
+// DatabaseRoleIterator manages a stream of *databasepb.DatabaseRole.
+type DatabaseRoleIterator struct {
+ items []*databasepb.DatabaseRole
+ pageInfo *iterator.PageInfo
+ nextFunc func() error
+
+ // Response is the raw response for the current page.
+ // It must be cast to the RPC response type.
+ // Calling Next() or InternalFetch() updates this value.
+ Response interface{}
+
+ // InternalFetch is for use by the Google Cloud Libraries only.
+ // It is not part of the stable interface of this package.
+ //
+ // InternalFetch returns results from a single call to the underlying RPC.
+ // The number of results is no greater than pageSize.
+ // If there are no more results, nextPageToken is empty and err is nil.
+ InternalFetch func(pageSize int, pageToken string) (results []*databasepb.DatabaseRole, nextPageToken string, err error)
+}
+
+// PageInfo supports pagination. See the google.golang.org/api/iterator package for details.
+func (it *DatabaseRoleIterator) PageInfo() *iterator.PageInfo {
+ return it.pageInfo
+}
+
+// Next returns the next result. Its second return value is iterator.Done if there are no more
+// results. Once Next returns Done, all subsequent calls will return Done.
+func (it *DatabaseRoleIterator) Next() (*databasepb.DatabaseRole, error) {
+ var item *databasepb.DatabaseRole
+ if err := it.nextFunc(); err != nil {
+ return item, err
+ }
+ item = it.items[0]
+ it.items = it.items[1:]
+ return item, nil
+}
+
+func (it *DatabaseRoleIterator) bufLen() int {
+ return len(it.items)
+}
+
+func (it *DatabaseRoleIterator) takeBuf() interface{} {
+ b := it.items
+ it.items = nil
+ return b
+}
+
+// OperationIterator manages a stream of *longrunningpb.Operation.
+type OperationIterator struct {
+ items []*longrunningpb.Operation
+ pageInfo *iterator.PageInfo
+ nextFunc func() error
+
+ // Response is the raw response for the current page.
+ // It must be cast to the RPC response type.
+ // Calling Next() or InternalFetch() updates this value.
+ Response interface{}
+
+ // InternalFetch is for use by the Google Cloud Libraries only.
+ // It is not part of the stable interface of this package.
+ //
+ // InternalFetch returns results from a single call to the underlying RPC.
+ // The number of results is no greater than pageSize.
+ // If there are no more results, nextPageToken is empty and err is nil.
+ InternalFetch func(pageSize int, pageToken string) (results []*longrunningpb.Operation, nextPageToken string, err error)
+}
+
+// PageInfo supports pagination. See the google.golang.org/api/iterator package for details.
+func (it *OperationIterator) PageInfo() *iterator.PageInfo {
+ return it.pageInfo
+}
+
+// Next returns the next result. Its second return value is iterator.Done if there are no more
+// results. Once Next returns Done, all subsequent calls will return Done.
+func (it *OperationIterator) Next() (*longrunningpb.Operation, error) {
+ var item *longrunningpb.Operation
+ if err := it.nextFunc(); err != nil {
+ return item, err
+ }
+ item = it.items[0]
+ it.items = it.items[1:]
+ return item, nil
+}
+
+func (it *OperationIterator) bufLen() int {
+ return len(it.items)
+}
+
+func (it *OperationIterator) takeBuf() interface{} {
+ b := it.items
+ it.items = nil
+ return b
+}
diff --git a/vendor/cloud.google.com/go/spanner/admin/database/apiv1/backup.go b/vendor/cloud.google.com/go/spanner/admin/database/apiv1/backup.go
new file mode 100644
index 000000000..648c7f88e
--- /dev/null
+++ b/vendor/cloud.google.com/go/spanner/admin/database/apiv1/backup.go
@@ -0,0 +1,61 @@
+/*
+Copyright 2020 Google LLC
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+package database
+
+import (
+ "context"
+ "fmt"
+ "regexp"
+ "time"
+
+ "cloud.google.com/go/spanner/admin/database/apiv1/databasepb"
+ "github.com/googleapis/gax-go/v2"
+ pbt "google.golang.org/protobuf/types/known/timestamppb"
+)
+
+var (
+ validDBPattern = regexp.MustCompile("^projects/(?P<project>[^/]+)/instances/(?P<instance>[^/]+)/databases/(?P<database>[^/]+)$")
+)
+
+// StartBackupOperation creates a backup of the given database. It will be stored
+// as projects/<project>/instances/<instance>/backups/<backupID>. The
+// backup will be automatically deleted by Cloud Spanner after its expiration.
+//
+// backupID must be unique across an instance.
+//
+// expireTime is the time the backup will expire. It is respected to
+// microsecond granularity.
+//
+// databasePath must have the form
+// projects/<project>/instances/<instance>/databases/<database>.
+func (c *DatabaseAdminClient) StartBackupOperation(ctx context.Context, backupID string, databasePath string, expireTime time.Time, opts ...gax.CallOption) (*CreateBackupOperation, error) {
+ m := validDBPattern.FindStringSubmatch(databasePath)
+ if m == nil {
+ return nil, fmt.Errorf("database name %q should conform to pattern %q",
+ databasePath, validDBPattern)
+ }
+ ts := &pbt.Timestamp{Seconds: expireTime.Unix(), Nanos: int32(expireTime.Nanosecond())}
+ // Create request from parameters.
+ req := &databasepb.CreateBackupRequest{
+ Parent: fmt.Sprintf("projects/%s/instances/%s", m[1], m[2]),
+ BackupId: backupID,
+ Backup: &databasepb.Backup{
+ Database: databasePath,
+ ExpireTime: ts,
+ },
+ }
+ return c.CreateBackup(ctx, req, opts...)
+}
diff --git a/vendor/cloud.google.com/go/spanner/admin/database/apiv1/database.go b/vendor/cloud.google.com/go/spanner/admin/database/apiv1/database.go
new file mode 100644
index 000000000..f68a4cb65
--- /dev/null
+++ b/vendor/cloud.google.com/go/spanner/admin/database/apiv1/database.go
@@ -0,0 +1,120 @@
+// Copyright 2020 Google LLC
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package database
+
+import (
+ "context"
+ "fmt"
+ "regexp"
+ "strings"
+ "time"
+ "unicode"
+
+ "cloud.google.com/go/longrunning/autogen/longrunningpb"
+ "cloud.google.com/go/spanner/admin/database/apiv1/databasepb"
+ "github.com/googleapis/gax-go/v2"
+ "google.golang.org/api/iterator"
+ "google.golang.org/grpc/codes"
+ "google.golang.org/grpc/status"
+)
+
+var retryer = gax.OnCodes(
+ []codes.Code{codes.DeadlineExceeded, codes.Unavailable},
+ gax.Backoff{Initial: time.Millisecond, Max: time.Millisecond, Multiplier: 1.0},
+)
+
+// CreateDatabaseWithRetry creates a new database and retries the call if the
+// backend returns a retryable error. The actual CreateDatabase RPC is only
+// retried if the initial call did not reach the server. In other cases, the
+// client will query the backend for the long-running operation that was
+// created by the initial RPC and return that operation.
+func (c *DatabaseAdminClient) CreateDatabaseWithRetry(ctx context.Context, req *databasepb.CreateDatabaseRequest, opts ...gax.CallOption) (*CreateDatabaseOperation, error) {
+ for {
+ db, createErr := c.CreateDatabase(ctx, req, opts...)
+ if createErr == nil {
+ return db, nil
+ }
+ // Failed, check whether we should retry.
+ delay, shouldRetry := retryer.Retry(createErr)
+ if !shouldRetry {
+ return nil, createErr
+ }
+ if err := gax.Sleep(ctx, delay); err != nil {
+ return nil, err
+ }
+ // Extract the name of the database.
+ dbName := extractDBName(req.CreateStatement)
+ // Query the backend for any corresponding long-running operation to
+ // determine whether we should retry the RPC or not.
+ iter := c.ListDatabaseOperations(ctx, &databasepb.ListDatabaseOperationsRequest{
+ Parent: req.Parent,
+ Filter: fmt.Sprintf("(metadata.@type:type.googleapis.com/google.spanner.admin.database.v1.CreateDatabaseMetadata) AND (name:%s/databases/%s/operations/)", req.Parent, dbName),
+ }, opts...)
+ var mostRecentOp *longrunningpb.Operation
+ for {
+ op, err := iter.Next()
+ if err == iterator.Done {
+ break
+ }
+ if err != nil {
+ return nil, err
+ }
+ // A running operation is the most recent and should be returned.
+ if !op.Done {
+ return c.CreateDatabaseOperation(op.Name), nil
+ }
+ if op.GetError() == nil {
+ mostRecentOp = op
+ }
+ }
+ if mostRecentOp == nil {
+ continue
+ }
+ // Only finished operations found. Check whether the database exists.
+ _, getErr := c.GetDatabase(ctx, &databasepb.GetDatabaseRequest{
+ Name: fmt.Sprintf("%s/databases/%s", req.Parent, dbName),
+ })
+ if getErr == nil {
+ // Database found, return one of the long-running operations that
+ // has finished, which again should return the database.
+ return c.CreateDatabaseOperation(mostRecentOp.Name), nil
+ }
+ if status.Code(getErr) == codes.NotFound {
+ continue
+ }
+ // Error getting the database that was not NotFound.
+ return nil, getErr
+ }
+}
+
+var dbNameRegEx = regexp.MustCompile("\\s*CREATE\\s+DATABASE\\s+(.+)\\s*")
+
+// extractDBName extracts the database name from a valid CREATE DATABASE <db>
+// statement. We don't have to worry about invalid create statements, as those
+// should already have been handled by the backend and should return a non-
+// retryable error.
+func extractDBName(createStatement string) string {
+ if dbNameRegEx.MatchString(createStatement) {
+ namePossiblyWithQuotes := strings.TrimRightFunc(dbNameRegEx.FindStringSubmatch(createStatement)[1], unicode.IsSpace)
+ if len(namePossiblyWithQuotes) > 0 && namePossiblyWithQuotes[0] == '`' {
+ if len(namePossiblyWithQuotes) > 5 && namePossiblyWithQuotes[1] == '`' && namePossiblyWithQuotes[2] == '`' {
+ return string(namePossiblyWithQuotes[3 : len(namePossiblyWithQuotes)-3])
+ }
+ return string(namePossiblyWithQuotes[1 : len(namePossiblyWithQuotes)-1])
+ }
+ return string(namePossiblyWithQuotes)
+ }
+ return ""
+}
diff --git a/vendor/cloud.google.com/go/spanner/admin/database/apiv1/database_admin_client.go b/vendor/cloud.google.com/go/spanner/admin/database/apiv1/database_admin_client.go
new file mode 100644
index 000000000..1af1d1ed5
--- /dev/null
+++ b/vendor/cloud.google.com/go/spanner/admin/database/apiv1/database_admin_client.go
@@ -0,0 +1,4194 @@
+// Copyright 2024 Google LLC
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Code generated by protoc-gen-go_gapic. DO NOT EDIT.
+
+package database
+
+import (
+ "bytes"
+ "context"
+ "fmt"
+ "io"
+ "math"
+ "net/http"
+ "net/url"
+ "time"
+
+ iampb "cloud.google.com/go/iam/apiv1/iampb"
+ "cloud.google.com/go/longrunning"
+ lroauto "cloud.google.com/go/longrunning/autogen"
+ longrunningpb "cloud.google.com/go/longrunning/autogen/longrunningpb"
+ databasepb "cloud.google.com/go/spanner/admin/database/apiv1/databasepb"
+ gax "github.com/googleapis/gax-go/v2"
+ "google.golang.org/api/googleapi"
+ "google.golang.org/api/iterator"
+ "google.golang.org/api/option"
+ "google.golang.org/api/option/internaloption"
+ gtransport "google.golang.org/api/transport/grpc"
+ httptransport "google.golang.org/api/transport/http"
+ "google.golang.org/grpc"
+ "google.golang.org/grpc/codes"
+ "google.golang.org/protobuf/encoding/protojson"
+ "google.golang.org/protobuf/proto"
+)
+
+var newDatabaseAdminClientHook clientHook
+
+// DatabaseAdminCallOptions contains the retry settings for each method of DatabaseAdminClient.
+type DatabaseAdminCallOptions struct {
+ ListDatabases []gax.CallOption
+ CreateDatabase []gax.CallOption
+ GetDatabase []gax.CallOption
+ UpdateDatabase []gax.CallOption
+ UpdateDatabaseDdl []gax.CallOption
+ DropDatabase []gax.CallOption
+ GetDatabaseDdl []gax.CallOption
+ SetIamPolicy []gax.CallOption
+ GetIamPolicy []gax.CallOption
+ TestIamPermissions []gax.CallOption
+ CreateBackup []gax.CallOption
+ CopyBackup []gax.CallOption
+ GetBackup []gax.CallOption
+ UpdateBackup []gax.CallOption
+ DeleteBackup []gax.CallOption
+ ListBackups []gax.CallOption
+ RestoreDatabase []gax.CallOption
+ ListDatabaseOperations []gax.CallOption
+ ListBackupOperations []gax.CallOption
+ ListDatabaseRoles []gax.CallOption
+ CreateBackupSchedule []gax.CallOption
+ GetBackupSchedule []gax.CallOption
+ UpdateBackupSchedule []gax.CallOption
+ DeleteBackupSchedule []gax.CallOption
+ ListBackupSchedules []gax.CallOption
+ CancelOperation []gax.CallOption
+ DeleteOperation []gax.CallOption
+ GetOperation []gax.CallOption
+ ListOperations []gax.CallOption
+}
+
+func defaultDatabaseAdminGRPCClientOptions() []option.ClientOption {
+ return []option.ClientOption{
+ internaloption.WithDefaultEndpoint("spanner.googleapis.com:443"),
+ internaloption.WithDefaultEndpointTemplate("spanner.UNIVERSE_DOMAIN:443"),
+ internaloption.WithDefaultMTLSEndpoint("spanner.mtls.googleapis.com:443"),
+ internaloption.WithDefaultUniverseDomain("googleapis.com"),
+ internaloption.WithDefaultAudience("https://spanner.googleapis.com/"),
+ internaloption.WithDefaultScopes(DefaultAuthScopes()...),
+ internaloption.EnableJwtWithScope(),
+ option.WithGRPCDialOption(grpc.WithDefaultCallOptions(
+ grpc.MaxCallRecvMsgSize(math.MaxInt32))),
+ }
+}
+
+func defaultDatabaseAdminCallOptions() *DatabaseAdminCallOptions {
+ return &DatabaseAdminCallOptions{
+ ListDatabases: []gax.CallOption{
+ gax.WithTimeout(3600000 * time.Millisecond),
+ gax.WithRetry(func() gax.Retryer {
+ return gax.OnCodes([]codes.Code{
+ codes.Unavailable,
+ codes.DeadlineExceeded,
+ }, gax.Backoff{
+ Initial: 1000 * time.Millisecond,
+ Max: 32000 * time.Millisecond,
+ Multiplier: 1.30,
+ })
+ }),
+ },
+ CreateDatabase: []gax.CallOption{
+ gax.WithTimeout(3600000 * time.Millisecond),
+ },
+ GetDatabase: []gax.CallOption{
+ gax.WithTimeout(3600000 * time.Millisecond),
+ gax.WithRetry(func() gax.Retryer {
+ return gax.OnCodes([]codes.Code{
+ codes.Unavailable,
+ codes.DeadlineExceeded,
+ }, gax.Backoff{
+ Initial: 1000 * time.Millisecond,
+ Max: 32000 * time.Millisecond,
+ Multiplier: 1.30,
+ })
+ }),
+ },
+ UpdateDatabase: []gax.CallOption{
+ gax.WithTimeout(3600000 * time.Millisecond),
+ gax.WithRetry(func() gax.Retryer {
+ return gax.OnCodes([]codes.Code{
+ codes.Unavailable,
+ codes.DeadlineExceeded,
+ }, gax.Backoff{
+ Initial: 1000 * time.Millisecond,
+ Max: 32000 * time.Millisecond,
+ Multiplier: 1.30,
+ })
+ }),
+ },
+ UpdateDatabaseDdl: []gax.CallOption{
+ gax.WithTimeout(3600000 * time.Millisecond),
+ gax.WithRetry(func() gax.Retryer {
+ return gax.OnCodes([]codes.Code{
+ codes.Unavailable,
+ codes.DeadlineExceeded,
+ }, gax.Backoff{
+ Initial: 1000 * time.Millisecond,
+ Max: 32000 * time.Millisecond,
+ Multiplier: 1.30,
+ })
+ }),
+ },
+ DropDatabase: []gax.CallOption{
+ gax.WithTimeout(3600000 * time.Millisecond),
+ gax.WithRetry(func() gax.Retryer {
+ return gax.OnCodes([]codes.Code{
+ codes.Unavailable,
+ codes.DeadlineExceeded,
+ }, gax.Backoff{
+ Initial: 1000 * time.Millisecond,
+ Max: 32000 * time.Millisecond,
+ Multiplier: 1.30,
+ })
+ }),
+ },
+ GetDatabaseDdl: []gax.CallOption{
+ gax.WithTimeout(3600000 * time.Millisecond),
+ gax.WithRetry(func() gax.Retryer {
+ return gax.OnCodes([]codes.Code{
+ codes.Unavailable,
+ codes.DeadlineExceeded,
+ }, gax.Backoff{
+ Initial: 1000 * time.Millisecond,
+ Max: 32000 * time.Millisecond,
+ Multiplier: 1.30,
+ })
+ }),
+ },
+ SetIamPolicy: []gax.CallOption{
+ gax.WithTimeout(30000 * time.Millisecond),
+ },
+ GetIamPolicy: []gax.CallOption{
+ gax.WithTimeout(30000 * time.Millisecond),
+ gax.WithRetry(func() gax.Retryer {
+ return gax.OnCodes([]codes.Code{
+ codes.Unavailable,
+ codes.DeadlineExceeded,
+ }, gax.Backoff{
+ Initial: 1000 * time.Millisecond,
+ Max: 32000 * time.Millisecond,
+ Multiplier: 1.30,
+ })
+ }),
+ },
+ TestIamPermissions: []gax.CallOption{
+ gax.WithTimeout(30000 * time.Millisecond),
+ },
+ CreateBackup: []gax.CallOption{
+ gax.WithTimeout(3600000 * time.Millisecond),
+ },
+ CopyBackup: []gax.CallOption{
+ gax.WithTimeout(3600000 * time.Millisecond),
+ },
+ GetBackup: []gax.CallOption{
+ gax.WithTimeout(3600000 * time.Millisecond),
+ gax.WithRetry(func() gax.Retryer {
+ return gax.OnCodes([]codes.Code{
+ codes.Unavailable,
+ codes.DeadlineExceeded,
+ }, gax.Backoff{
+ Initial: 1000 * time.Millisecond,
+ Max: 32000 * time.Millisecond,
+ Multiplier: 1.30,
+ })
+ }),
+ },
+ UpdateBackup: []gax.CallOption{
+ gax.WithTimeout(3600000 * time.Millisecond),
+ gax.WithRetry(func() gax.Retryer {
+ return gax.OnCodes([]codes.Code{
+ codes.Unavailable,
+ codes.DeadlineExceeded,
+ }, gax.Backoff{
+ Initial: 1000 * time.Millisecond,
+ Max: 32000 * time.Millisecond,
+ Multiplier: 1.30,
+ })
+ }),
+ },
+ DeleteBackup: []gax.CallOption{
+ gax.WithTimeout(3600000 * time.Millisecond),
+ gax.WithRetry(func() gax.Retryer {
+ return gax.OnCodes([]codes.Code{
+ codes.Unavailable,
+ codes.DeadlineExceeded,
+ }, gax.Backoff{
+ Initial: 1000 * time.Millisecond,
+ Max: 32000 * time.Millisecond,
+ Multiplier: 1.30,
+ })
+ }),
+ },
+ ListBackups: []gax.CallOption{
+ gax.WithTimeout(3600000 * time.Millisecond),
+ gax.WithRetry(func() gax.Retryer {
+ return gax.OnCodes([]codes.Code{
+ codes.Unavailable,
+ codes.DeadlineExceeded,
+ }, gax.Backoff{
+ Initial: 1000 * time.Millisecond,
+ Max: 32000 * time.Millisecond,
+ Multiplier: 1.30,
+ })
+ }),
+ },
+ RestoreDatabase: []gax.CallOption{
+ gax.WithTimeout(3600000 * time.Millisecond),
+ },
+ ListDatabaseOperations: []gax.CallOption{
+ gax.WithTimeout(3600000 * time.Millisecond),
+ gax.WithRetry(func() gax.Retryer {
+ return gax.OnCodes([]codes.Code{
+ codes.Unavailable,
+ codes.DeadlineExceeded,
+ }, gax.Backoff{
+ Initial: 1000 * time.Millisecond,
+ Max: 32000 * time.Millisecond,
+ Multiplier: 1.30,
+ })
+ }),
+ },
+ ListBackupOperations: []gax.CallOption{
+ gax.WithTimeout(3600000 * time.Millisecond),
+ gax.WithRetry(func() gax.Retryer {
+ return gax.OnCodes([]codes.Code{
+ codes.Unavailable,
+ codes.DeadlineExceeded,
+ }, gax.Backoff{
+ Initial: 1000 * time.Millisecond,
+ Max: 32000 * time.Millisecond,
+ Multiplier: 1.30,
+ })
+ }),
+ },
+ ListDatabaseRoles: []gax.CallOption{
+ gax.WithTimeout(3600000 * time.Millisecond),
+ gax.WithRetry(func() gax.Retryer {
+ return gax.OnCodes([]codes.Code{
+ codes.Unavailable,
+ codes.DeadlineExceeded,
+ }, gax.Backoff{
+ Initial: 1000 * time.Millisecond,
+ Max: 32000 * time.Millisecond,
+ Multiplier: 1.30,
+ })
+ }),
+ },
+ CreateBackupSchedule: []gax.CallOption{
+ gax.WithTimeout(3600000 * time.Millisecond),
+ gax.WithRetry(func() gax.Retryer {
+ return gax.OnCodes([]codes.Code{
+ codes.Unavailable,
+ codes.DeadlineExceeded,
+ }, gax.Backoff{
+ Initial: 1000 * time.Millisecond,
+ Max: 32000 * time.Millisecond,
+ Multiplier: 1.30,
+ })
+ }),
+ },
+ GetBackupSchedule: []gax.CallOption{
+ gax.WithTimeout(3600000 * time.Millisecond),
+ gax.WithRetry(func() gax.Retryer {
+ return gax.OnCodes([]codes.Code{
+ codes.Unavailable,
+ codes.DeadlineExceeded,
+ }, gax.Backoff{
+ Initial: 1000 * time.Millisecond,
+ Max: 32000 * time.Millisecond,
+ Multiplier: 1.30,
+ })
+ }),
+ },
+ UpdateBackupSchedule: []gax.CallOption{
+ gax.WithTimeout(3600000 * time.Millisecond),
+ gax.WithRetry(func() gax.Retryer {
+ return gax.OnCodes([]codes.Code{
+ codes.Unavailable,
+ codes.DeadlineExceeded,
+ }, gax.Backoff{
+ Initial: 1000 * time.Millisecond,
+ Max: 32000 * time.Millisecond,
+ Multiplier: 1.30,
+ })
+ }),
+ },
+ DeleteBackupSchedule: []gax.CallOption{
+ gax.WithTimeout(3600000 * time.Millisecond),
+ gax.WithRetry(func() gax.Retryer {
+ return gax.OnCodes([]codes.Code{
+ codes.Unavailable,
+ codes.DeadlineExceeded,
+ }, gax.Backoff{
+ Initial: 1000 * time.Millisecond,
+ Max: 32000 * time.Millisecond,
+ Multiplier: 1.30,
+ })
+ }),
+ },
+ ListBackupSchedules: []gax.CallOption{
+ gax.WithTimeout(3600000 * time.Millisecond),
+ gax.WithRetry(func() gax.Retryer {
+ return gax.OnCodes([]codes.Code{
+ codes.Unavailable,
+ codes.DeadlineExceeded,
+ }, gax.Backoff{
+ Initial: 1000 * time.Millisecond,
+ Max: 32000 * time.Millisecond,
+ Multiplier: 1.30,
+ })
+ }),
+ },
+ CancelOperation: []gax.CallOption{},
+ DeleteOperation: []gax.CallOption{},
+ GetOperation: []gax.CallOption{},
+ ListOperations: []gax.CallOption{},
+ }
+}
+
+func defaultDatabaseAdminRESTCallOptions() *DatabaseAdminCallOptions {
+ return &DatabaseAdminCallOptions{
+ ListDatabases: []gax.CallOption{
+ gax.WithTimeout(3600000 * time.Millisecond),
+ gax.WithRetry(func() gax.Retryer {
+ return gax.OnHTTPCodes(gax.Backoff{
+ Initial: 1000 * time.Millisecond,
+ Max: 32000 * time.Millisecond,
+ Multiplier: 1.30,
+ },
+ http.StatusServiceUnavailable,
+ http.StatusGatewayTimeout)
+ }),
+ },
+ CreateDatabase: []gax.CallOption{
+ gax.WithTimeout(3600000 * time.Millisecond),
+ },
+ GetDatabase: []gax.CallOption{
+ gax.WithTimeout(3600000 * time.Millisecond),
+ gax.WithRetry(func() gax.Retryer {
+ return gax.OnHTTPCodes(gax.Backoff{
+ Initial: 1000 * time.Millisecond,
+ Max: 32000 * time.Millisecond,
+ Multiplier: 1.30,
+ },
+ http.StatusServiceUnavailable,
+ http.StatusGatewayTimeout)
+ }),
+ },
+ UpdateDatabase: []gax.CallOption{
+ gax.WithTimeout(3600000 * time.Millisecond),
+ gax.WithRetry(func() gax.Retryer {
+ return gax.OnHTTPCodes(gax.Backoff{
+ Initial: 1000 * time.Millisecond,
+ Max: 32000 * time.Millisecond,
+ Multiplier: 1.30,
+ },
+ http.StatusServiceUnavailable,
+ http.StatusGatewayTimeout)
+ }),
+ },
+ UpdateDatabaseDdl: []gax.CallOption{
+ gax.WithTimeout(3600000 * time.Millisecond),
+ gax.WithRetry(func() gax.Retryer {
+ return gax.OnHTTPCodes(gax.Backoff{
+ Initial: 1000 * time.Millisecond,
+ Max: 32000 * time.Millisecond,
+ Multiplier: 1.30,
+ },
+ http.StatusServiceUnavailable,
+ http.StatusGatewayTimeout)
+ }),
+ },
+ DropDatabase: []gax.CallOption{
+ gax.WithTimeout(3600000 * time.Millisecond),
+ gax.WithRetry(func() gax.Retryer {
+ return gax.OnHTTPCodes(gax.Backoff{
+ Initial: 1000 * time.Millisecond,
+ Max: 32000 * time.Millisecond,
+ Multiplier: 1.30,
+ },
+ http.StatusServiceUnavailable,
+ http.StatusGatewayTimeout)
+ }),
+ },
+ GetDatabaseDdl: []gax.CallOption{
+ gax.WithTimeout(3600000 * time.Millisecond),
+ gax.WithRetry(func() gax.Retryer {
+ return gax.OnHTTPCodes(gax.Backoff{
+ Initial: 1000 * time.Millisecond,
+ Max: 32000 * time.Millisecond,
+ Multiplier: 1.30,
+ },
+ http.StatusServiceUnavailable,
+ http.StatusGatewayTimeout)
+ }),
+ },
+ SetIamPolicy: []gax.CallOption{
+ gax.WithTimeout(30000 * time.Millisecond),
+ },
+ GetIamPolicy: []gax.CallOption{
+ gax.WithTimeout(30000 * time.Millisecond),
+ gax.WithRetry(func() gax.Retryer {
+ return gax.OnHTTPCodes(gax.Backoff{
+ Initial: 1000 * time.Millisecond,
+ Max: 32000 * time.Millisecond,
+ Multiplier: 1.30,
+ },
+ http.StatusServiceUnavailable,
+ http.StatusGatewayTimeout)
+ }),
+ },
+ TestIamPermissions: []gax.CallOption{
+ gax.WithTimeout(30000 * time.Millisecond),
+ },
+ CreateBackup: []gax.CallOption{
+ gax.WithTimeout(3600000 * time.Millisecond),
+ },
+ CopyBackup: []gax.CallOption{
+ gax.WithTimeout(3600000 * time.Millisecond),
+ },
+ GetBackup: []gax.CallOption{
+ gax.WithTimeout(3600000 * time.Millisecond),
+ gax.WithRetry(func() gax.Retryer {
+ return gax.OnHTTPCodes(gax.Backoff{
+ Initial: 1000 * time.Millisecond,
+ Max: 32000 * time.Millisecond,
+ Multiplier: 1.30,
+ },
+ http.StatusServiceUnavailable,
+ http.StatusGatewayTimeout)
+ }),
+ },
+ UpdateBackup: []gax.CallOption{
+ gax.WithTimeout(3600000 * time.Millisecond),
+ gax.WithRetry(func() gax.Retryer {
+ return gax.OnHTTPCodes(gax.Backoff{
+ Initial: 1000 * time.Millisecond,
+ Max: 32000 * time.Millisecond,
+ Multiplier: 1.30,
+ },
+ http.StatusServiceUnavailable,
+ http.StatusGatewayTimeout)
+ }),
+ },
+ DeleteBackup: []gax.CallOption{
+ gax.WithTimeout(3600000 * time.Millisecond),
+ gax.WithRetry(func() gax.Retryer {
+ return gax.OnHTTPCodes(gax.Backoff{
+ Initial: 1000 * time.Millisecond,
+ Max: 32000 * time.Millisecond,
+ Multiplier: 1.30,
+ },
+ http.StatusServiceUnavailable,
+ http.StatusGatewayTimeout)
+ }),
+ },
+ ListBackups: []gax.CallOption{
+ gax.WithTimeout(3600000 * time.Millisecond),
+ gax.WithRetry(func() gax.Retryer {
+ return gax.OnHTTPCodes(gax.Backoff{
+ Initial: 1000 * time.Millisecond,
+ Max: 32000 * time.Millisecond,
+ Multiplier: 1.30,
+ },
+ http.StatusServiceUnavailable,
+ http.StatusGatewayTimeout)
+ }),
+ },
+ RestoreDatabase: []gax.CallOption{
+ gax.WithTimeout(3600000 * time.Millisecond),
+ },
+ ListDatabaseOperations: []gax.CallOption{
+ gax.WithTimeout(3600000 * time.Millisecond),
+ gax.WithRetry(func() gax.Retryer {
+ return gax.OnHTTPCodes(gax.Backoff{
+ Initial: 1000 * time.Millisecond,
+ Max: 32000 * time.Millisecond,
+ Multiplier: 1.30,
+ },
+ http.StatusServiceUnavailable,
+ http.StatusGatewayTimeout)
+ }),
+ },
+ ListBackupOperations: []gax.CallOption{
+ gax.WithTimeout(3600000 * time.Millisecond),
+ gax.WithRetry(func() gax.Retryer {
+ return gax.OnHTTPCodes(gax.Backoff{
+ Initial: 1000 * time.Millisecond,
+ Max: 32000 * time.Millisecond,
+ Multiplier: 1.30,
+ },
+ http.StatusServiceUnavailable,
+ http.StatusGatewayTimeout)
+ }),
+ },
+ ListDatabaseRoles: []gax.CallOption{
+ gax.WithTimeout(3600000 * time.Millisecond),
+ gax.WithRetry(func() gax.Retryer {
+ return gax.OnHTTPCodes(gax.Backoff{
+ Initial: 1000 * time.Millisecond,
+ Max: 32000 * time.Millisecond,
+ Multiplier: 1.30,
+ },
+ http.StatusServiceUnavailable,
+ http.StatusGatewayTimeout)
+ }),
+ },
+ CreateBackupSchedule: []gax.CallOption{
+ gax.WithTimeout(3600000 * time.Millisecond),
+ gax.WithRetry(func() gax.Retryer {
+ return gax.OnHTTPCodes(gax.Backoff{
+ Initial: 1000 * time.Millisecond,
+ Max: 32000 * time.Millisecond,
+ Multiplier: 1.30,
+ },
+ http.StatusServiceUnavailable,
+ http.StatusGatewayTimeout)
+ }),
+ },
+ GetBackupSchedule: []gax.CallOption{
+ gax.WithTimeout(3600000 * time.Millisecond),
+ gax.WithRetry(func() gax.Retryer {
+ return gax.OnHTTPCodes(gax.Backoff{
+ Initial: 1000 * time.Millisecond,
+ Max: 32000 * time.Millisecond,
+ Multiplier: 1.30,
+ },
+ http.StatusServiceUnavailable,
+ http.StatusGatewayTimeout)
+ }),
+ },
+ UpdateBackupSchedule: []gax.CallOption{
+ gax.WithTimeout(3600000 * time.Millisecond),
+ gax.WithRetry(func() gax.Retryer {
+ return gax.OnHTTPCodes(gax.Backoff{
+ Initial: 1000 * time.Millisecond,
+ Max: 32000 * time.Millisecond,
+ Multiplier: 1.30,
+ },
+ http.StatusServiceUnavailable,
+ http.StatusGatewayTimeout)
+ }),
+ },
+ DeleteBackupSchedule: []gax.CallOption{
+ gax.WithTimeout(3600000 * time.Millisecond),
+ gax.WithRetry(func() gax.Retryer {
+ return gax.OnHTTPCodes(gax.Backoff{
+ Initial: 1000 * time.Millisecond,
+ Max: 32000 * time.Millisecond,
+ Multiplier: 1.30,
+ },
+ http.StatusServiceUnavailable,
+ http.StatusGatewayTimeout)
+ }),
+ },
+ ListBackupSchedules: []gax.CallOption{
+ gax.WithTimeout(3600000 * time.Millisecond),
+ gax.WithRetry(func() gax.Retryer {
+ return gax.OnHTTPCodes(gax.Backoff{
+ Initial: 1000 * time.Millisecond,
+ Max: 32000 * time.Millisecond,
+ Multiplier: 1.30,
+ },
+ http.StatusServiceUnavailable,
+ http.StatusGatewayTimeout)
+ }),
+ },
+ CancelOperation: []gax.CallOption{},
+ DeleteOperation: []gax.CallOption{},
+ GetOperation: []gax.CallOption{},
+ ListOperations: []gax.CallOption{},
+ }
+}
+
+// internalDatabaseAdminClient is an interface that defines the methods available from Cloud Spanner API.
+type internalDatabaseAdminClient interface {
+ Close() error
+ setGoogleClientInfo(...string)
+ Connection() *grpc.ClientConn
+ ListDatabases(context.Context, *databasepb.ListDatabasesRequest, ...gax.CallOption) *DatabaseIterator
+ CreateDatabase(context.Context, *databasepb.CreateDatabaseRequest, ...gax.CallOption) (*CreateDatabaseOperation, error)
+ CreateDatabaseOperation(name string) *CreateDatabaseOperation
+ GetDatabase(context.Context, *databasepb.GetDatabaseRequest, ...gax.CallOption) (*databasepb.Database, error)
+ UpdateDatabase(context.Context, *databasepb.UpdateDatabaseRequest, ...gax.CallOption) (*UpdateDatabaseOperation, error)
+ UpdateDatabaseOperation(name string) *UpdateDatabaseOperation
+ UpdateDatabaseDdl(context.Context, *databasepb.UpdateDatabaseDdlRequest, ...gax.CallOption) (*UpdateDatabaseDdlOperation, error)
+ UpdateDatabaseDdlOperation(name string) *UpdateDatabaseDdlOperation
+ DropDatabase(context.Context, *databasepb.DropDatabaseRequest, ...gax.CallOption) error
+ GetDatabaseDdl(context.Context, *databasepb.GetDatabaseDdlRequest, ...gax.CallOption) (*databasepb.GetDatabaseDdlResponse, error)
+ SetIamPolicy(context.Context, *iampb.SetIamPolicyRequest, ...gax.CallOption) (*iampb.Policy, error)
+ GetIamPolicy(context.Context, *iampb.GetIamPolicyRequest, ...gax.CallOption) (*iampb.Policy, error)
+ TestIamPermissions(context.Context, *iampb.TestIamPermissionsRequest, ...gax.CallOption) (*iampb.TestIamPermissionsResponse, error)
+ CreateBackup(context.Context, *databasepb.CreateBackupRequest, ...gax.CallOption) (*CreateBackupOperation, error)
+ CreateBackupOperation(name string) *CreateBackupOperation
+ CopyBackup(context.Context, *databasepb.CopyBackupRequest, ...gax.CallOption) (*CopyBackupOperation, error)
+ CopyBackupOperation(name string) *CopyBackupOperation
+ GetBackup(context.Context, *databasepb.GetBackupRequest, ...gax.CallOption) (*databasepb.Backup, error)
+ UpdateBackup(context.Context, *databasepb.UpdateBackupRequest, ...gax.CallOption) (*databasepb.Backup, error)
+ DeleteBackup(context.Context, *databasepb.DeleteBackupRequest, ...gax.CallOption) error
+ ListBackups(context.Context, *databasepb.ListBackupsRequest, ...gax.CallOption) *BackupIterator
+ RestoreDatabase(context.Context, *databasepb.RestoreDatabaseRequest, ...gax.CallOption) (*RestoreDatabaseOperation, error)
+ RestoreDatabaseOperation(name string) *RestoreDatabaseOperation
+ ListDatabaseOperations(context.Context, *databasepb.ListDatabaseOperationsRequest, ...gax.CallOption) *OperationIterator
+ ListBackupOperations(context.Context, *databasepb.ListBackupOperationsRequest, ...gax.CallOption) *OperationIterator
+ ListDatabaseRoles(context.Context, *databasepb.ListDatabaseRolesRequest, ...gax.CallOption) *DatabaseRoleIterator
+ CreateBackupSchedule(context.Context, *databasepb.CreateBackupScheduleRequest, ...gax.CallOption) (*databasepb.BackupSchedule, error)
+ GetBackupSchedule(context.Context, *databasepb.GetBackupScheduleRequest, ...gax.CallOption) (*databasepb.BackupSchedule, error)
+ UpdateBackupSchedule(context.Context, *databasepb.UpdateBackupScheduleRequest, ...gax.CallOption) (*databasepb.BackupSchedule, error)
+ DeleteBackupSchedule(context.Context, *databasepb.DeleteBackupScheduleRequest, ...gax.CallOption) error
+ ListBackupSchedules(context.Context, *databasepb.ListBackupSchedulesRequest, ...gax.CallOption) *BackupScheduleIterator
+ CancelOperation(context.Context, *longrunningpb.CancelOperationRequest, ...gax.CallOption) error
+ DeleteOperation(context.Context, *longrunningpb.DeleteOperationRequest, ...gax.CallOption) error
+ GetOperation(context.Context, *longrunningpb.GetOperationRequest, ...gax.CallOption) (*longrunningpb.Operation, error)
+ ListOperations(context.Context, *longrunningpb.ListOperationsRequest, ...gax.CallOption) *OperationIterator
+}
+
+// DatabaseAdminClient is a client for interacting with Cloud Spanner API.
+// Methods, except Close, may be called concurrently. However, fields must not be modified concurrently with method calls.
+//
+// # Cloud Spanner Database Admin API
+//
+// The Cloud Spanner Database Admin API can be used to:
+//
+// create, drop, and list databases
+//
+// update the schema of pre-existing databases
+//
+// create, delete, copy and list backups for a database
+//
+// restore a database from an existing backup
+type DatabaseAdminClient struct {
+ // The internal transport-dependent client.
+ internalClient internalDatabaseAdminClient
+
+ // The call options for this service.
+ CallOptions *DatabaseAdminCallOptions
+
+ // LROClient is used internally to handle long-running operations.
+ // It is exposed so that its CallOptions can be modified if required.
+ // Users should not Close this client.
+ LROClient *lroauto.OperationsClient
+}
+
+// Wrapper methods routed to the internal client.
+
+// Close closes the connection to the API service. The user should invoke this when
+// the client is no longer required.
+func (c *DatabaseAdminClient) Close() error {
+ return c.internalClient.Close()
+}
+
+// setGoogleClientInfo sets the name and version of the application in
+// the `x-goog-api-client` header passed on each request. Intended for
+// use by Google-written clients.
+func (c *DatabaseAdminClient) setGoogleClientInfo(keyval ...string) {
+ c.internalClient.setGoogleClientInfo(keyval...)
+}
+
+// Connection returns a connection to the API service.
+//
+// Deprecated: Connections are now pooled so this method does not always
+// return the same resource.
+func (c *DatabaseAdminClient) Connection() *grpc.ClientConn {
+ return c.internalClient.Connection()
+}
+
+// ListDatabases lists Cloud Spanner databases.
+func (c *DatabaseAdminClient) ListDatabases(ctx context.Context, req *databasepb.ListDatabasesRequest, opts ...gax.CallOption) *DatabaseIterator {
+ return c.internalClient.ListDatabases(ctx, req, opts...)
+}
+
+// CreateDatabase creates a new Cloud Spanner database and starts to prepare it for serving.
+// The returned [long-running operation][google.longrunning.Operation] will
+// have a name of the format <database_name>/operations/<operation_id> and
+// can be used to track preparation of the database. The
+// metadata field type is
+// CreateDatabaseMetadata.
+// The response field type is
+// Database, if successful.
+func (c *DatabaseAdminClient) CreateDatabase(ctx context.Context, req *databasepb.CreateDatabaseRequest, opts ...gax.CallOption) (*CreateDatabaseOperation, error) {
+ return c.internalClient.CreateDatabase(ctx, req, opts...)
+}
+
+// CreateDatabaseOperation returns a new CreateDatabaseOperation from a given name.
+// The name must be that of a previously created CreateDatabaseOperation, possibly from a different process.
+func (c *DatabaseAdminClient) CreateDatabaseOperation(name string) *CreateDatabaseOperation {
+ return c.internalClient.CreateDatabaseOperation(name)
+}
+
+// GetDatabase gets the state of a Cloud Spanner database.
+func (c *DatabaseAdminClient) GetDatabase(ctx context.Context, req *databasepb.GetDatabaseRequest, opts ...gax.CallOption) (*databasepb.Database, error) {
+ return c.internalClient.GetDatabase(ctx, req, opts...)
+}
+
+// UpdateDatabase updates a Cloud Spanner database. The returned
+// [long-running operation][google.longrunning.Operation] can be used to track
+// the progress of updating the database. If the named database does not
+// exist, returns NOT_FOUND.
+//
+// While the operation is pending:
+//
+// The database’s
+// reconciling
+// field is set to true.
+//
+// Cancelling the operation is best-effort. If the cancellation succeeds,
+// the operation metadata’s
+// cancel_time
+// is set, the updates are reverted, and the operation terminates with a
+// CANCELLED status.
+//
+// New UpdateDatabase requests will return a FAILED_PRECONDITION error
+// until the pending operation is done (returns successfully or with
+// error).
+//
+// Reading the database via the API continues to give the pre-request
+// values.
+//
+// Upon completion of the returned operation:
+//
+// The new values are in effect and readable via the API.
+//
+// The database’s
+// reconciling
+// field becomes false.
+//
+// The returned [long-running operation][google.longrunning.Operation] will
+// have a name of the format
+// projects/<project>/instances/<instance>/databases/<database>/operations/<operation_id>
+// and can be used to track the database modification. The
+// metadata field type is
+// UpdateDatabaseMetadata.
+// The response field type is
+// Database, if successful.
+func (c *DatabaseAdminClient) UpdateDatabase(ctx context.Context, req *databasepb.UpdateDatabaseRequest, opts ...gax.CallOption) (*UpdateDatabaseOperation, error) {
+ return c.internalClient.UpdateDatabase(ctx, req, opts...)
+}
+
+// UpdateDatabaseOperation returns a new UpdateDatabaseOperation from a given name.
+// The name must be that of a previously created UpdateDatabaseOperation, possibly from a different process.
+func (c *DatabaseAdminClient) UpdateDatabaseOperation(name string) *UpdateDatabaseOperation {
+ return c.internalClient.UpdateDatabaseOperation(name)
+}
+
+// UpdateDatabaseDdl updates the schema of a Cloud Spanner database by
+// creating/altering/dropping tables, columns, indexes, etc. The returned
+// [long-running operation][google.longrunning.Operation] will have a name of
+// the format <database_name>/operations/<operation_id> and can be used to
+// track execution of the schema change(s). The
+// metadata field type is
+// UpdateDatabaseDdlMetadata.
+// The operation has no response.
+func (c *DatabaseAdminClient) UpdateDatabaseDdl(ctx context.Context, req *databasepb.UpdateDatabaseDdlRequest, opts ...gax.CallOption) (*UpdateDatabaseDdlOperation, error) {
+ return c.internalClient.UpdateDatabaseDdl(ctx, req, opts...)
+}
+
+// UpdateDatabaseDdlOperation returns a new UpdateDatabaseDdlOperation from a given name.
+// The name must be that of a previously created UpdateDatabaseDdlOperation, possibly from a different process.
+func (c *DatabaseAdminClient) UpdateDatabaseDdlOperation(name string) *UpdateDatabaseDdlOperation {
+ return c.internalClient.UpdateDatabaseDdlOperation(name)
+}
+
+// DropDatabase drops (aka deletes) a Cloud Spanner database.
+// Completed backups for the database will be retained according to their
+// expire_time.
+// Note: Cloud Spanner might continue to accept requests for a few seconds
+// after the database has been deleted.
+func (c *DatabaseAdminClient) DropDatabase(ctx context.Context, req *databasepb.DropDatabaseRequest, opts ...gax.CallOption) error {
+ return c.internalClient.DropDatabase(ctx, req, opts...)
+}
+
+// GetDatabaseDdl returns the schema of a Cloud Spanner database as a list of formatted
+// DDL statements. This method does not show pending schema updates, those may
+// be queried using the Operations API.
+func (c *DatabaseAdminClient) GetDatabaseDdl(ctx context.Context, req *databasepb.GetDatabaseDdlRequest, opts ...gax.CallOption) (*databasepb.GetDatabaseDdlResponse, error) {
+ return c.internalClient.GetDatabaseDdl(ctx, req, opts...)
+}
+
+// SetIamPolicy sets the access control policy on a database or backup resource.
+// Replaces any existing policy.
+//
+// Authorization requires spanner.databases.setIamPolicy
+// permission on resource.
+// For backups, authorization requires spanner.backups.setIamPolicy
+// permission on resource.
+func (c *DatabaseAdminClient) SetIamPolicy(ctx context.Context, req *iampb.SetIamPolicyRequest, opts ...gax.CallOption) (*iampb.Policy, error) {
+ return c.internalClient.SetIamPolicy(ctx, req, opts...)
+}
+
+// GetIamPolicy gets the access control policy for a database or backup resource.
+// Returns an empty policy if a database or backup exists but does not have a
+// policy set.
+//
+// Authorization requires spanner.databases.getIamPolicy permission on
+// resource.
+// For backups, authorization requires spanner.backups.getIamPolicy
+// permission on resource.
+func (c *DatabaseAdminClient) GetIamPolicy(ctx context.Context, req *iampb.GetIamPolicyRequest, opts ...gax.CallOption) (*iampb.Policy, error) {
+ return c.internalClient.GetIamPolicy(ctx, req, opts...)
+}
+
+// TestIamPermissions returns permissions that the caller has on the specified database or backup
+// resource.
+//
+// Attempting this RPC on a non-existent Cloud Spanner database will
+// result in a NOT_FOUND error if the user has
+// spanner.databases.list permission on the containing Cloud
+// Spanner instance. Otherwise returns an empty set of permissions.
+// Calling this method on a backup that does not exist will
+// result in a NOT_FOUND error if the user has
+// spanner.backups.list permission on the containing instance.
+func (c *DatabaseAdminClient) TestIamPermissions(ctx context.Context, req *iampb.TestIamPermissionsRequest, opts ...gax.CallOption) (*iampb.TestIamPermissionsResponse, error) {
+ return c.internalClient.TestIamPermissions(ctx, req, opts...)
+}
+
+// CreateBackup starts creating a new Cloud Spanner Backup.
+// The returned backup [long-running operation][google.longrunning.Operation]
+// will have a name of the format
+// projects/<project>/instances/<instance>/backups/<backup>/operations/<operation_id>
+// and can be used to track creation of the backup. The
+// metadata field type is
+// CreateBackupMetadata.
+// The response field type is
+// Backup, if successful.
+// Cancelling the returned operation will stop the creation and delete the
+// backup. There can be only one pending backup creation per database. Backup
+// creation of different databases can run concurrently.
+func (c *DatabaseAdminClient) CreateBackup(ctx context.Context, req *databasepb.CreateBackupRequest, opts ...gax.CallOption) (*CreateBackupOperation, error) {
+ return c.internalClient.CreateBackup(ctx, req, opts...)
+}
+
+// CreateBackupOperation returns a new CreateBackupOperation from a given name.
+// The name must be that of a previously created CreateBackupOperation, possibly from a different process.
+func (c *DatabaseAdminClient) CreateBackupOperation(name string) *CreateBackupOperation {
+ return c.internalClient.CreateBackupOperation(name)
+}
+
+// CopyBackup starts copying a Cloud Spanner Backup.
+// The returned backup [long-running operation][google.longrunning.Operation]
+// will have a name of the format
+// projects/<project>/instances/<instance>/backups/<backup>/operations/<operation_id>
+// and can be used to track copying of the backup. The operation is associated
+// with the destination backup.
+// The metadata field type is
+// CopyBackupMetadata.
+// The response field type is
+// Backup, if successful.
+// Cancelling the returned operation will stop the copying and delete the
+// destination backup. Concurrent CopyBackup requests can run on the same
+// source backup.
+func (c *DatabaseAdminClient) CopyBackup(ctx context.Context, req *databasepb.CopyBackupRequest, opts ...gax.CallOption) (*CopyBackupOperation, error) {
+ return c.internalClient.CopyBackup(ctx, req, opts...)
+}
+
+// CopyBackupOperation returns a new CopyBackupOperation from a given name.
+// The name must be that of a previously created CopyBackupOperation, possibly from a different process.
+func (c *DatabaseAdminClient) CopyBackupOperation(name string) *CopyBackupOperation {
+ return c.internalClient.CopyBackupOperation(name)
+}
+
+// GetBackup gets metadata on a pending or completed
+// Backup.
+func (c *DatabaseAdminClient) GetBackup(ctx context.Context, req *databasepb.GetBackupRequest, opts ...gax.CallOption) (*databasepb.Backup, error) {
+ return c.internalClient.GetBackup(ctx, req, opts...)
+}
+
+// UpdateBackup updates a pending or completed
+// Backup.
+func (c *DatabaseAdminClient) UpdateBackup(ctx context.Context, req *databasepb.UpdateBackupRequest, opts ...gax.CallOption) (*databasepb.Backup, error) {
+ return c.internalClient.UpdateBackup(ctx, req, opts...)
+}
+
+// DeleteBackup deletes a pending or completed
+// Backup.
+func (c *DatabaseAdminClient) DeleteBackup(ctx context.Context, req *databasepb.DeleteBackupRequest, opts ...gax.CallOption) error {
+ return c.internalClient.DeleteBackup(ctx, req, opts...)
+}
+
+// ListBackups lists completed and pending backups.
+// Backups returned are ordered by create_time in descending order,
+// starting from the most recent create_time.
+func (c *DatabaseAdminClient) ListBackups(ctx context.Context, req *databasepb.ListBackupsRequest, opts ...gax.CallOption) *BackupIterator {
+ return c.internalClient.ListBackups(ctx, req, opts...)
+}
+
+// RestoreDatabase create a new database by restoring from a completed backup. The new
+// database must be in the same project and in an instance with the same
+// instance configuration as the instance containing
+// the backup. The returned database [long-running
+// operation][google.longrunning.Operation] has a name of the format
+// projects/<project>/instances/<instance>/databases/<database>/operations/<operation_id>,
+// and can be used to track the progress of the operation, and to cancel it.
+// The metadata field type is
+// RestoreDatabaseMetadata.
+// The response type
+// is Database, if
+// successful. Cancelling the returned operation will stop the restore and
+// delete the database.
+// There can be only one database being restored into an instance at a time.
+// Once the restore operation completes, a new restore operation can be
+// initiated, without waiting for the optimize operation associated with the
+// first restore to complete.
+func (c *DatabaseAdminClient) RestoreDatabase(ctx context.Context, req *databasepb.RestoreDatabaseRequest, opts ...gax.CallOption) (*RestoreDatabaseOperation, error) {
+ return c.internalClient.RestoreDatabase(ctx, req, opts...)
+}
+
+// RestoreDatabaseOperation returns a new RestoreDatabaseOperation from a given name.
+// The name must be that of a previously created RestoreDatabaseOperation, possibly from a different process.
+func (c *DatabaseAdminClient) RestoreDatabaseOperation(name string) *RestoreDatabaseOperation {
+ return c.internalClient.RestoreDatabaseOperation(name)
+}
+
+// ListDatabaseOperations lists database [longrunning-operations][google.longrunning.Operation].
+// A database operation has a name of the form
+// projects/<project>/instances/<instance>/databases/<database>/operations/<operation>.
+// The long-running operation
+// metadata field type
+// metadata.type_url describes the type of the metadata. Operations returned
+// include those that have completed/failed/canceled within the last 7 days,
+// and pending operations.
+func (c *DatabaseAdminClient) ListDatabaseOperations(ctx context.Context, req *databasepb.ListDatabaseOperationsRequest, opts ...gax.CallOption) *OperationIterator {
+ return c.internalClient.ListDatabaseOperations(ctx, req, opts...)
+}
+
+// ListBackupOperations lists the backup [long-running operations][google.longrunning.Operation] in
+// the given instance. A backup operation has a name of the form
+// projects/<project>/instances/<instance>/backups/<backup>/operations/<operation>.
+// The long-running operation
+// metadata field type
+// metadata.type_url describes the type of the metadata. Operations returned
+// include those that have completed/failed/canceled within the last 7 days,
+// and pending operations. Operations returned are ordered by
+// operation.metadata.value.progress.start_time in descending order starting
+// from the most recently started operation.
+func (c *DatabaseAdminClient) ListBackupOperations(ctx context.Context, req *databasepb.ListBackupOperationsRequest, opts ...gax.CallOption) *OperationIterator {
+ return c.internalClient.ListBackupOperations(ctx, req, opts...)
+}
+
+// ListDatabaseRoles lists Cloud Spanner database roles.
+func (c *DatabaseAdminClient) ListDatabaseRoles(ctx context.Context, req *databasepb.ListDatabaseRolesRequest, opts ...gax.CallOption) *DatabaseRoleIterator {
+ return c.internalClient.ListDatabaseRoles(ctx, req, opts...)
+}
+
+// CreateBackupSchedule creates a new backup schedule.
+func (c *DatabaseAdminClient) CreateBackupSchedule(ctx context.Context, req *databasepb.CreateBackupScheduleRequest, opts ...gax.CallOption) (*databasepb.BackupSchedule, error) {
+ return c.internalClient.CreateBackupSchedule(ctx, req, opts...)
+}
+
+// GetBackupSchedule gets backup schedule for the input schedule name.
+func (c *DatabaseAdminClient) GetBackupSchedule(ctx context.Context, req *databasepb.GetBackupScheduleRequest, opts ...gax.CallOption) (*databasepb.BackupSchedule, error) {
+ return c.internalClient.GetBackupSchedule(ctx, req, opts...)
+}
+
+// UpdateBackupSchedule updates a backup schedule.
+func (c *DatabaseAdminClient) UpdateBackupSchedule(ctx context.Context, req *databasepb.UpdateBackupScheduleRequest, opts ...gax.CallOption) (*databasepb.BackupSchedule, error) {
+ return c.internalClient.UpdateBackupSchedule(ctx, req, opts...)
+}
+
+// DeleteBackupSchedule deletes a backup schedule.
+func (c *DatabaseAdminClient) DeleteBackupSchedule(ctx context.Context, req *databasepb.DeleteBackupScheduleRequest, opts ...gax.CallOption) error {
+ return c.internalClient.DeleteBackupSchedule(ctx, req, opts...)
+}
+
+// ListBackupSchedules lists all the backup schedules for the database.
+func (c *DatabaseAdminClient) ListBackupSchedules(ctx context.Context, req *databasepb.ListBackupSchedulesRequest, opts ...gax.CallOption) *BackupScheduleIterator {
+ return c.internalClient.ListBackupSchedules(ctx, req, opts...)
+}
+
+// CancelOperation is a utility method from google.longrunning.Operations.
+func (c *DatabaseAdminClient) CancelOperation(ctx context.Context, req *longrunningpb.CancelOperationRequest, opts ...gax.CallOption) error {
+ return c.internalClient.CancelOperation(ctx, req, opts...)
+}
+
+// DeleteOperation is a utility method from google.longrunning.Operations.
+func (c *DatabaseAdminClient) DeleteOperation(ctx context.Context, req *longrunningpb.DeleteOperationRequest, opts ...gax.CallOption) error {
+ return c.internalClient.DeleteOperation(ctx, req, opts...)
+}
+
+// GetOperation is a utility method from google.longrunning.Operations.
+func (c *DatabaseAdminClient) GetOperation(ctx context.Context, req *longrunningpb.GetOperationRequest, opts ...gax.CallOption) (*longrunningpb.Operation, error) {
+ return c.internalClient.GetOperation(ctx, req, opts...)
+}
+
+// ListOperations is a utility method from google.longrunning.Operations.
+func (c *DatabaseAdminClient) ListOperations(ctx context.Context, req *longrunningpb.ListOperationsRequest, opts ...gax.CallOption) *OperationIterator {
+ return c.internalClient.ListOperations(ctx, req, opts...)
+}
+
+// databaseAdminGRPCClient is a client for interacting with Cloud Spanner API over gRPC transport.
+//
+// Methods, except Close, may be called concurrently. However, fields must not be modified concurrently with method calls.
+type databaseAdminGRPCClient struct {
+ // Connection pool of gRPC connections to the service.
+ connPool gtransport.ConnPool
+
+ // Points back to the CallOptions field of the containing DatabaseAdminClient
+ CallOptions **DatabaseAdminCallOptions
+
+ // The gRPC API client.
+ databaseAdminClient databasepb.DatabaseAdminClient
+
+ // LROClient is used internally to handle long-running operations.
+ // It is exposed so that its CallOptions can be modified if required.
+ // Users should not Close this client.
+ LROClient **lroauto.OperationsClient
+
+ operationsClient longrunningpb.OperationsClient
+
+ // The x-goog-* metadata to be sent with each request.
+ xGoogHeaders []string
+}
+
+// NewDatabaseAdminClient creates a new database admin client based on gRPC.
+// The returned client must be Closed when it is done being used to clean up its underlying connections.
+//
+// # Cloud Spanner Database Admin API
+//
+// The Cloud Spanner Database Admin API can be used to:
+//
+// create, drop, and list databases
+//
+// update the schema of pre-existing databases
+//
+// create, delete, copy and list backups for a database
+//
+// restore a database from an existing backup
+func NewDatabaseAdminClient(ctx context.Context, opts ...option.ClientOption) (*DatabaseAdminClient, error) {
+ clientOpts := defaultDatabaseAdminGRPCClientOptions()
+ if newDatabaseAdminClientHook != nil {
+ hookOpts, err := newDatabaseAdminClientHook(ctx, clientHookParams{})
+ if err != nil {
+ return nil, err
+ }
+ clientOpts = append(clientOpts, hookOpts...)
+ }
+
+ connPool, err := gtransport.DialPool(ctx, append(clientOpts, opts...)...)
+ if err != nil {
+ return nil, err
+ }
+ client := DatabaseAdminClient{CallOptions: defaultDatabaseAdminCallOptions()}
+
+ c := &databaseAdminGRPCClient{
+ connPool: connPool,
+ databaseAdminClient: databasepb.NewDatabaseAdminClient(connPool),
+ CallOptions: &client.CallOptions,
+ operationsClient: longrunningpb.NewOperationsClient(connPool),
+ }
+ c.setGoogleClientInfo()
+
+ client.internalClient = c
+
+ client.LROClient, err = lroauto.NewOperationsClient(ctx, gtransport.WithConnPool(connPool))
+ if err != nil {
+ // This error "should not happen", since we are just reusing old connection pool
+ // and never actually need to dial.
+ // If this does happen, we could leak connp. However, we cannot close conn:
+ // If the user invoked the constructor with option.WithGRPCConn,
+ // we would close a connection that's still in use.
+ // TODO: investigate error conditions.
+ return nil, err
+ }
+ c.LROClient = &client.LROClient
+ return &client, nil
+}
+
+// Connection returns a connection to the API service.
+//
+// Deprecated: Connections are now pooled so this method does not always
+// return the same resource.
+func (c *databaseAdminGRPCClient) Connection() *grpc.ClientConn {
+ return c.connPool.Conn()
+}
+
+// setGoogleClientInfo sets the name and version of the application in
+// the `x-goog-api-client` header passed on each request. Intended for
+// use by Google-written clients.
+func (c *databaseAdminGRPCClient) setGoogleClientInfo(keyval ...string) {
+ kv := append([]string{"gl-go", gax.GoVersion}, keyval...)
+ kv = append(kv, "gapic", getVersionClient(), "gax", gax.Version, "grpc", grpc.Version)
+ c.xGoogHeaders = []string{
+ "x-goog-api-client", gax.XGoogHeader(kv...),
+ }
+}
+
+// Close closes the connection to the API service. The user should invoke this when
+// the client is no longer required.
+func (c *databaseAdminGRPCClient) Close() error {
+ return c.connPool.Close()
+}
+
+// Methods, except Close, may be called concurrently. However, fields must not be modified concurrently with method calls.
+type databaseAdminRESTClient struct {
+ // The http endpoint to connect to.
+ endpoint string
+
+ // The http client.
+ httpClient *http.Client
+
+ // LROClient is used internally to handle long-running operations.
+ // It is exposed so that its CallOptions can be modified if required.
+ // Users should not Close this client.
+ LROClient **lroauto.OperationsClient
+
+ // The x-goog-* headers to be sent with each request.
+ xGoogHeaders []string
+
+ // Points back to the CallOptions field of the containing DatabaseAdminClient
+ CallOptions **DatabaseAdminCallOptions
+}
+
+// NewDatabaseAdminRESTClient creates a new database admin rest client.
+//
+// # Cloud Spanner Database Admin API
+//
+// The Cloud Spanner Database Admin API can be used to:
+//
+// create, drop, and list databases
+//
+// update the schema of pre-existing databases
+//
+// create, delete, copy and list backups for a database
+//
+// restore a database from an existing backup
+func NewDatabaseAdminRESTClient(ctx context.Context, opts ...option.ClientOption) (*DatabaseAdminClient, error) {
+ clientOpts := append(defaultDatabaseAdminRESTClientOptions(), opts...)
+ httpClient, endpoint, err := httptransport.NewClient(ctx, clientOpts...)
+ if err != nil {
+ return nil, err
+ }
+
+ callOpts := defaultDatabaseAdminRESTCallOptions()
+ c := &databaseAdminRESTClient{
+ endpoint: endpoint,
+ httpClient: httpClient,
+ CallOptions: &callOpts,
+ }
+ c.setGoogleClientInfo()
+
+ lroOpts := []option.ClientOption{
+ option.WithHTTPClient(httpClient),
+ option.WithEndpoint(endpoint),
+ }
+ opClient, err := lroauto.NewOperationsRESTClient(ctx, lroOpts...)
+ if err != nil {
+ return nil, err
+ }
+ c.LROClient = &opClient
+
+ return &DatabaseAdminClient{internalClient: c, CallOptions: callOpts}, nil
+}
+
+func defaultDatabaseAdminRESTClientOptions() []option.ClientOption {
+ return []option.ClientOption{
+ internaloption.WithDefaultEndpoint("https://spanner.googleapis.com"),
+ internaloption.WithDefaultEndpointTemplate("https://spanner.UNIVERSE_DOMAIN"),
+ internaloption.WithDefaultMTLSEndpoint("https://spanner.mtls.googleapis.com"),
+ internaloption.WithDefaultUniverseDomain("googleapis.com"),
+ internaloption.WithDefaultAudience("https://spanner.googleapis.com/"),
+ internaloption.WithDefaultScopes(DefaultAuthScopes()...),
+ }
+}
+
+// setGoogleClientInfo sets the name and version of the application in
+// the `x-goog-api-client` header passed on each request. Intended for
+// use by Google-written clients.
+func (c *databaseAdminRESTClient) setGoogleClientInfo(keyval ...string) {
+ kv := append([]string{"gl-go", gax.GoVersion}, keyval...)
+ kv = append(kv, "gapic", getVersionClient(), "gax", gax.Version, "rest", "UNKNOWN")
+ c.xGoogHeaders = []string{
+ "x-goog-api-client", gax.XGoogHeader(kv...),
+ }
+}
+
+// Close closes the connection to the API service. The user should invoke this when
+// the client is no longer required.
+func (c *databaseAdminRESTClient) Close() error {
+ // Replace httpClient with nil to force cleanup.
+ c.httpClient = nil
+ return nil
+}
+
+// Connection returns a connection to the API service.
+//
+// Deprecated: This method always returns nil.
+func (c *databaseAdminRESTClient) Connection() *grpc.ClientConn {
+ return nil
+}
+func (c *databaseAdminGRPCClient) ListDatabases(ctx context.Context, req *databasepb.ListDatabasesRequest, opts ...gax.CallOption) *DatabaseIterator {
+ hds := []string{"x-goog-request-params", fmt.Sprintf("%s=%v", "parent", url.QueryEscape(req.GetParent()))}
+
+ hds = append(c.xGoogHeaders, hds...)
+ ctx = gax.InsertMetadataIntoOutgoingContext(ctx, hds...)
+ opts = append((*c.CallOptions).ListDatabases[0:len((*c.CallOptions).ListDatabases):len((*c.CallOptions).ListDatabases)], opts...)
+ it := &DatabaseIterator{}
+ req = proto.Clone(req).(*databasepb.ListDatabasesRequest)
+ it.InternalFetch = func(pageSize int, pageToken string) ([]*databasepb.Database, string, error) {
+ resp := &databasepb.ListDatabasesResponse{}
+ if pageToken != "" {
+ req.PageToken = pageToken
+ }
+ if pageSize > math.MaxInt32 {
+ req.PageSize = math.MaxInt32
+ } else if pageSize != 0 {
+ req.PageSize = int32(pageSize)
+ }
+ err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
+ var err error
+ resp, err = c.databaseAdminClient.ListDatabases(ctx, req, settings.GRPC...)
+ return err
+ }, opts...)
+ if err != nil {
+ return nil, "", err
+ }
+
+ it.Response = resp
+ return resp.GetDatabases(), resp.GetNextPageToken(), nil
+ }
+ fetch := func(pageSize int, pageToken string) (string, error) {
+ items, nextPageToken, err := it.InternalFetch(pageSize, pageToken)
+ if err != nil {
+ return "", err
+ }
+ it.items = append(it.items, items...)
+ return nextPageToken, nil
+ }
+
+ it.pageInfo, it.nextFunc = iterator.NewPageInfo(fetch, it.bufLen, it.takeBuf)
+ it.pageInfo.MaxSize = int(req.GetPageSize())
+ it.pageInfo.Token = req.GetPageToken()
+
+ return it
+}
+
+func (c *databaseAdminGRPCClient) CreateDatabase(ctx context.Context, req *databasepb.CreateDatabaseRequest, opts ...gax.CallOption) (*CreateDatabaseOperation, error) {
+ hds := []string{"x-goog-request-params", fmt.Sprintf("%s=%v", "parent", url.QueryEscape(req.GetParent()))}
+
+ hds = append(c.xGoogHeaders, hds...)
+ ctx = gax.InsertMetadataIntoOutgoingContext(ctx, hds...)
+ opts = append((*c.CallOptions).CreateDatabase[0:len((*c.CallOptions).CreateDatabase):len((*c.CallOptions).CreateDatabase)], opts...)
+ var resp *longrunningpb.Operation
+ err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
+ var err error
+ resp, err = c.databaseAdminClient.CreateDatabase(ctx, req, settings.GRPC...)
+ return err
+ }, opts...)
+ if err != nil {
+ return nil, err
+ }
+ return &CreateDatabaseOperation{
+ lro: longrunning.InternalNewOperation(*c.LROClient, resp),
+ }, nil
+}
+
+func (c *databaseAdminGRPCClient) GetDatabase(ctx context.Context, req *databasepb.GetDatabaseRequest, opts ...gax.CallOption) (*databasepb.Database, error) {
+ hds := []string{"x-goog-request-params", fmt.Sprintf("%s=%v", "name", url.QueryEscape(req.GetName()))}
+
+ hds = append(c.xGoogHeaders, hds...)
+ ctx = gax.InsertMetadataIntoOutgoingContext(ctx, hds...)
+ opts = append((*c.CallOptions).GetDatabase[0:len((*c.CallOptions).GetDatabase):len((*c.CallOptions).GetDatabase)], opts...)
+ var resp *databasepb.Database
+ err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
+ var err error
+ resp, err = c.databaseAdminClient.GetDatabase(ctx, req, settings.GRPC...)
+ return err
+ }, opts...)
+ if err != nil {
+ return nil, err
+ }
+ return resp, nil
+}
+
+func (c *databaseAdminGRPCClient) UpdateDatabase(ctx context.Context, req *databasepb.UpdateDatabaseRequest, opts ...gax.CallOption) (*UpdateDatabaseOperation, error) {
+ hds := []string{"x-goog-request-params", fmt.Sprintf("%s=%v", "database.name", url.QueryEscape(req.GetDatabase().GetName()))}
+
+ hds = append(c.xGoogHeaders, hds...)
+ ctx = gax.InsertMetadataIntoOutgoingContext(ctx, hds...)
+ opts = append((*c.CallOptions).UpdateDatabase[0:len((*c.CallOptions).UpdateDatabase):len((*c.CallOptions).UpdateDatabase)], opts...)
+ var resp *longrunningpb.Operation
+ err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
+ var err error
+ resp, err = c.databaseAdminClient.UpdateDatabase(ctx, req, settings.GRPC...)
+ return err
+ }, opts...)
+ if err != nil {
+ return nil, err
+ }
+ return &UpdateDatabaseOperation{
+ lro: longrunning.InternalNewOperation(*c.LROClient, resp),
+ }, nil
+}
+
+func (c *databaseAdminGRPCClient) UpdateDatabaseDdl(ctx context.Context, req *databasepb.UpdateDatabaseDdlRequest, opts ...gax.CallOption) (*UpdateDatabaseDdlOperation, error) {
+ hds := []string{"x-goog-request-params", fmt.Sprintf("%s=%v", "database", url.QueryEscape(req.GetDatabase()))}
+
+ hds = append(c.xGoogHeaders, hds...)
+ ctx = gax.InsertMetadataIntoOutgoingContext(ctx, hds...)
+ opts = append((*c.CallOptions).UpdateDatabaseDdl[0:len((*c.CallOptions).UpdateDatabaseDdl):len((*c.CallOptions).UpdateDatabaseDdl)], opts...)
+ var resp *longrunningpb.Operation
+ err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
+ var err error
+ resp, err = c.databaseAdminClient.UpdateDatabaseDdl(ctx, req, settings.GRPC...)
+ return err
+ }, opts...)
+ if err != nil {
+ return nil, err
+ }
+ return &UpdateDatabaseDdlOperation{
+ lro: longrunning.InternalNewOperation(*c.LROClient, resp),
+ }, nil
+}
+
+func (c *databaseAdminGRPCClient) DropDatabase(ctx context.Context, req *databasepb.DropDatabaseRequest, opts ...gax.CallOption) error {
+ hds := []string{"x-goog-request-params", fmt.Sprintf("%s=%v", "database", url.QueryEscape(req.GetDatabase()))}
+
+ hds = append(c.xGoogHeaders, hds...)
+ ctx = gax.InsertMetadataIntoOutgoingContext(ctx, hds...)
+ opts = append((*c.CallOptions).DropDatabase[0:len((*c.CallOptions).DropDatabase):len((*c.CallOptions).DropDatabase)], opts...)
+ err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
+ var err error
+ _, err = c.databaseAdminClient.DropDatabase(ctx, req, settings.GRPC...)
+ return err
+ }, opts...)
+ return err
+}
+
+func (c *databaseAdminGRPCClient) GetDatabaseDdl(ctx context.Context, req *databasepb.GetDatabaseDdlRequest, opts ...gax.CallOption) (*databasepb.GetDatabaseDdlResponse, error) {
+ hds := []string{"x-goog-request-params", fmt.Sprintf("%s=%v", "database", url.QueryEscape(req.GetDatabase()))}
+
+ hds = append(c.xGoogHeaders, hds...)
+ ctx = gax.InsertMetadataIntoOutgoingContext(ctx, hds...)
+ opts = append((*c.CallOptions).GetDatabaseDdl[0:len((*c.CallOptions).GetDatabaseDdl):len((*c.CallOptions).GetDatabaseDdl)], opts...)
+ var resp *databasepb.GetDatabaseDdlResponse
+ err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
+ var err error
+ resp, err = c.databaseAdminClient.GetDatabaseDdl(ctx, req, settings.GRPC...)
+ return err
+ }, opts...)
+ if err != nil {
+ return nil, err
+ }
+ return resp, nil
+}
+
+func (c *databaseAdminGRPCClient) SetIamPolicy(ctx context.Context, req *iampb.SetIamPolicyRequest, opts ...gax.CallOption) (*iampb.Policy, error) {
+ hds := []string{"x-goog-request-params", fmt.Sprintf("%s=%v", "resource", url.QueryEscape(req.GetResource()))}
+
+ hds = append(c.xGoogHeaders, hds...)
+ ctx = gax.InsertMetadataIntoOutgoingContext(ctx, hds...)
+ opts = append((*c.CallOptions).SetIamPolicy[0:len((*c.CallOptions).SetIamPolicy):len((*c.CallOptions).SetIamPolicy)], opts...)
+ var resp *iampb.Policy
+ err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
+ var err error
+ resp, err = c.databaseAdminClient.SetIamPolicy(ctx, req, settings.GRPC...)
+ return err
+ }, opts...)
+ if err != nil {
+ return nil, err
+ }
+ return resp, nil
+}
+
+func (c *databaseAdminGRPCClient) GetIamPolicy(ctx context.Context, req *iampb.GetIamPolicyRequest, opts ...gax.CallOption) (*iampb.Policy, error) {
+ hds := []string{"x-goog-request-params", fmt.Sprintf("%s=%v", "resource", url.QueryEscape(req.GetResource()))}
+
+ hds = append(c.xGoogHeaders, hds...)
+ ctx = gax.InsertMetadataIntoOutgoingContext(ctx, hds...)
+ opts = append((*c.CallOptions).GetIamPolicy[0:len((*c.CallOptions).GetIamPolicy):len((*c.CallOptions).GetIamPolicy)], opts...)
+ var resp *iampb.Policy
+ err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
+ var err error
+ resp, err = c.databaseAdminClient.GetIamPolicy(ctx, req, settings.GRPC...)
+ return err
+ }, opts...)
+ if err != nil {
+ return nil, err
+ }
+ return resp, nil
+}
+
+func (c *databaseAdminGRPCClient) TestIamPermissions(ctx context.Context, req *iampb.TestIamPermissionsRequest, opts ...gax.CallOption) (*iampb.TestIamPermissionsResponse, error) {
+ hds := []string{"x-goog-request-params", fmt.Sprintf("%s=%v", "resource", url.QueryEscape(req.GetResource()))}
+
+ hds = append(c.xGoogHeaders, hds...)
+ ctx = gax.InsertMetadataIntoOutgoingContext(ctx, hds...)
+ opts = append((*c.CallOptions).TestIamPermissions[0:len((*c.CallOptions).TestIamPermissions):len((*c.CallOptions).TestIamPermissions)], opts...)
+ var resp *iampb.TestIamPermissionsResponse
+ err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
+ var err error
+ resp, err = c.databaseAdminClient.TestIamPermissions(ctx, req, settings.GRPC...)
+ return err
+ }, opts...)
+ if err != nil {
+ return nil, err
+ }
+ return resp, nil
+}
+
+func (c *databaseAdminGRPCClient) CreateBackup(ctx context.Context, req *databasepb.CreateBackupRequest, opts ...gax.CallOption) (*CreateBackupOperation, error) {
+ hds := []string{"x-goog-request-params", fmt.Sprintf("%s=%v", "parent", url.QueryEscape(req.GetParent()))}
+
+ hds = append(c.xGoogHeaders, hds...)
+ ctx = gax.InsertMetadataIntoOutgoingContext(ctx, hds...)
+ opts = append((*c.CallOptions).CreateBackup[0:len((*c.CallOptions).CreateBackup):len((*c.CallOptions).CreateBackup)], opts...)
+ var resp *longrunningpb.Operation
+ err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
+ var err error
+ resp, err = c.databaseAdminClient.CreateBackup(ctx, req, settings.GRPC...)
+ return err
+ }, opts...)
+ if err != nil {
+ return nil, err
+ }
+ return &CreateBackupOperation{
+ lro: longrunning.InternalNewOperation(*c.LROClient, resp),
+ }, nil
+}
+
+func (c *databaseAdminGRPCClient) CopyBackup(ctx context.Context, req *databasepb.CopyBackupRequest, opts ...gax.CallOption) (*CopyBackupOperation, error) {
+ hds := []string{"x-goog-request-params", fmt.Sprintf("%s=%v", "parent", url.QueryEscape(req.GetParent()))}
+
+ hds = append(c.xGoogHeaders, hds...)
+ ctx = gax.InsertMetadataIntoOutgoingContext(ctx, hds...)
+ opts = append((*c.CallOptions).CopyBackup[0:len((*c.CallOptions).CopyBackup):len((*c.CallOptions).CopyBackup)], opts...)
+ var resp *longrunningpb.Operation
+ err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
+ var err error
+ resp, err = c.databaseAdminClient.CopyBackup(ctx, req, settings.GRPC...)
+ return err
+ }, opts...)
+ if err != nil {
+ return nil, err
+ }
+ return &CopyBackupOperation{
+ lro: longrunning.InternalNewOperation(*c.LROClient, resp),
+ }, nil
+}
+
+func (c *databaseAdminGRPCClient) GetBackup(ctx context.Context, req *databasepb.GetBackupRequest, opts ...gax.CallOption) (*databasepb.Backup, error) {
+ hds := []string{"x-goog-request-params", fmt.Sprintf("%s=%v", "name", url.QueryEscape(req.GetName()))}
+
+ hds = append(c.xGoogHeaders, hds...)
+ ctx = gax.InsertMetadataIntoOutgoingContext(ctx, hds...)
+ opts = append((*c.CallOptions).GetBackup[0:len((*c.CallOptions).GetBackup):len((*c.CallOptions).GetBackup)], opts...)
+ var resp *databasepb.Backup
+ err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
+ var err error
+ resp, err = c.databaseAdminClient.GetBackup(ctx, req, settings.GRPC...)
+ return err
+ }, opts...)
+ if err != nil {
+ return nil, err
+ }
+ return resp, nil
+}
+
+func (c *databaseAdminGRPCClient) UpdateBackup(ctx context.Context, req *databasepb.UpdateBackupRequest, opts ...gax.CallOption) (*databasepb.Backup, error) {
+ hds := []string{"x-goog-request-params", fmt.Sprintf("%s=%v", "backup.name", url.QueryEscape(req.GetBackup().GetName()))}
+
+ hds = append(c.xGoogHeaders, hds...)
+ ctx = gax.InsertMetadataIntoOutgoingContext(ctx, hds...)
+ opts = append((*c.CallOptions).UpdateBackup[0:len((*c.CallOptions).UpdateBackup):len((*c.CallOptions).UpdateBackup)], opts...)
+ var resp *databasepb.Backup
+ err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
+ var err error
+ resp, err = c.databaseAdminClient.UpdateBackup(ctx, req, settings.GRPC...)
+ return err
+ }, opts...)
+ if err != nil {
+ return nil, err
+ }
+ return resp, nil
+}
+
+func (c *databaseAdminGRPCClient) DeleteBackup(ctx context.Context, req *databasepb.DeleteBackupRequest, opts ...gax.CallOption) error {
+ hds := []string{"x-goog-request-params", fmt.Sprintf("%s=%v", "name", url.QueryEscape(req.GetName()))}
+
+ hds = append(c.xGoogHeaders, hds...)
+ ctx = gax.InsertMetadataIntoOutgoingContext(ctx, hds...)
+ opts = append((*c.CallOptions).DeleteBackup[0:len((*c.CallOptions).DeleteBackup):len((*c.CallOptions).DeleteBackup)], opts...)
+ err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
+ var err error
+ _, err = c.databaseAdminClient.DeleteBackup(ctx, req, settings.GRPC...)
+ return err
+ }, opts...)
+ return err
+}
+
+func (c *databaseAdminGRPCClient) ListBackups(ctx context.Context, req *databasepb.ListBackupsRequest, opts ...gax.CallOption) *BackupIterator {
+ hds := []string{"x-goog-request-params", fmt.Sprintf("%s=%v", "parent", url.QueryEscape(req.GetParent()))}
+
+ hds = append(c.xGoogHeaders, hds...)
+ ctx = gax.InsertMetadataIntoOutgoingContext(ctx, hds...)
+ opts = append((*c.CallOptions).ListBackups[0:len((*c.CallOptions).ListBackups):len((*c.CallOptions).ListBackups)], opts...)
+ it := &BackupIterator{}
+ req = proto.Clone(req).(*databasepb.ListBackupsRequest)
+ it.InternalFetch = func(pageSize int, pageToken string) ([]*databasepb.Backup, string, error) {
+ resp := &databasepb.ListBackupsResponse{}
+ if pageToken != "" {
+ req.PageToken = pageToken
+ }
+ if pageSize > math.MaxInt32 {
+ req.PageSize = math.MaxInt32
+ } else if pageSize != 0 {
+ req.PageSize = int32(pageSize)
+ }
+ err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
+ var err error
+ resp, err = c.databaseAdminClient.ListBackups(ctx, req, settings.GRPC...)
+ return err
+ }, opts...)
+ if err != nil {
+ return nil, "", err
+ }
+
+ it.Response = resp
+ return resp.GetBackups(), resp.GetNextPageToken(), nil
+ }
+ fetch := func(pageSize int, pageToken string) (string, error) {
+ items, nextPageToken, err := it.InternalFetch(pageSize, pageToken)
+ if err != nil {
+ return "", err
+ }
+ it.items = append(it.items, items...)
+ return nextPageToken, nil
+ }
+
+ it.pageInfo, it.nextFunc = iterator.NewPageInfo(fetch, it.bufLen, it.takeBuf)
+ it.pageInfo.MaxSize = int(req.GetPageSize())
+ it.pageInfo.Token = req.GetPageToken()
+
+ return it
+}
+
+func (c *databaseAdminGRPCClient) RestoreDatabase(ctx context.Context, req *databasepb.RestoreDatabaseRequest, opts ...gax.CallOption) (*RestoreDatabaseOperation, error) {
+ hds := []string{"x-goog-request-params", fmt.Sprintf("%s=%v", "parent", url.QueryEscape(req.GetParent()))}
+
+ hds = append(c.xGoogHeaders, hds...)
+ ctx = gax.InsertMetadataIntoOutgoingContext(ctx, hds...)
+ opts = append((*c.CallOptions).RestoreDatabase[0:len((*c.CallOptions).RestoreDatabase):len((*c.CallOptions).RestoreDatabase)], opts...)
+ var resp *longrunningpb.Operation
+ err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
+ var err error
+ resp, err = c.databaseAdminClient.RestoreDatabase(ctx, req, settings.GRPC...)
+ return err
+ }, opts...)
+ if err != nil {
+ return nil, err
+ }
+ return &RestoreDatabaseOperation{
+ lro: longrunning.InternalNewOperation(*c.LROClient, resp),
+ }, nil
+}
+
+func (c *databaseAdminGRPCClient) ListDatabaseOperations(ctx context.Context, req *databasepb.ListDatabaseOperationsRequest, opts ...gax.CallOption) *OperationIterator {
+ hds := []string{"x-goog-request-params", fmt.Sprintf("%s=%v", "parent", url.QueryEscape(req.GetParent()))}
+
+ hds = append(c.xGoogHeaders, hds...)
+ ctx = gax.InsertMetadataIntoOutgoingContext(ctx, hds...)
+ opts = append((*c.CallOptions).ListDatabaseOperations[0:len((*c.CallOptions).ListDatabaseOperations):len((*c.CallOptions).ListDatabaseOperations)], opts...)
+ it := &OperationIterator{}
+ req = proto.Clone(req).(*databasepb.ListDatabaseOperationsRequest)
+ it.InternalFetch = func(pageSize int, pageToken string) ([]*longrunningpb.Operation, string, error) {
+ resp := &databasepb.ListDatabaseOperationsResponse{}
+ if pageToken != "" {
+ req.PageToken = pageToken
+ }
+ if pageSize > math.MaxInt32 {
+ req.PageSize = math.MaxInt32
+ } else if pageSize != 0 {
+ req.PageSize = int32(pageSize)
+ }
+ err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
+ var err error
+ resp, err = c.databaseAdminClient.ListDatabaseOperations(ctx, req, settings.GRPC...)
+ return err
+ }, opts...)
+ if err != nil {
+ return nil, "", err
+ }
+
+ it.Response = resp
+ return resp.GetOperations(), resp.GetNextPageToken(), nil
+ }
+ fetch := func(pageSize int, pageToken string) (string, error) {
+ items, nextPageToken, err := it.InternalFetch(pageSize, pageToken)
+ if err != nil {
+ return "", err
+ }
+ it.items = append(it.items, items...)
+ return nextPageToken, nil
+ }
+
+ it.pageInfo, it.nextFunc = iterator.NewPageInfo(fetch, it.bufLen, it.takeBuf)
+ it.pageInfo.MaxSize = int(req.GetPageSize())
+ it.pageInfo.Token = req.GetPageToken()
+
+ return it
+}
+
+func (c *databaseAdminGRPCClient) ListBackupOperations(ctx context.Context, req *databasepb.ListBackupOperationsRequest, opts ...gax.CallOption) *OperationIterator {
+ hds := []string{"x-goog-request-params", fmt.Sprintf("%s=%v", "parent", url.QueryEscape(req.GetParent()))}
+
+ hds = append(c.xGoogHeaders, hds...)
+ ctx = gax.InsertMetadataIntoOutgoingContext(ctx, hds...)
+ opts = append((*c.CallOptions).ListBackupOperations[0:len((*c.CallOptions).ListBackupOperations):len((*c.CallOptions).ListBackupOperations)], opts...)
+ it := &OperationIterator{}
+ req = proto.Clone(req).(*databasepb.ListBackupOperationsRequest)
+ it.InternalFetch = func(pageSize int, pageToken string) ([]*longrunningpb.Operation, string, error) {
+ resp := &databasepb.ListBackupOperationsResponse{}
+ if pageToken != "" {
+ req.PageToken = pageToken
+ }
+ if pageSize > math.MaxInt32 {
+ req.PageSize = math.MaxInt32
+ } else if pageSize != 0 {
+ req.PageSize = int32(pageSize)
+ }
+ err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
+ var err error
+ resp, err = c.databaseAdminClient.ListBackupOperations(ctx, req, settings.GRPC...)
+ return err
+ }, opts...)
+ if err != nil {
+ return nil, "", err
+ }
+
+ it.Response = resp
+ return resp.GetOperations(), resp.GetNextPageToken(), nil
+ }
+ fetch := func(pageSize int, pageToken string) (string, error) {
+ items, nextPageToken, err := it.InternalFetch(pageSize, pageToken)
+ if err != nil {
+ return "", err
+ }
+ it.items = append(it.items, items...)
+ return nextPageToken, nil
+ }
+
+ it.pageInfo, it.nextFunc = iterator.NewPageInfo(fetch, it.bufLen, it.takeBuf)
+ it.pageInfo.MaxSize = int(req.GetPageSize())
+ it.pageInfo.Token = req.GetPageToken()
+
+ return it
+}
+
+func (c *databaseAdminGRPCClient) ListDatabaseRoles(ctx context.Context, req *databasepb.ListDatabaseRolesRequest, opts ...gax.CallOption) *DatabaseRoleIterator {
+ hds := []string{"x-goog-request-params", fmt.Sprintf("%s=%v", "parent", url.QueryEscape(req.GetParent()))}
+
+ hds = append(c.xGoogHeaders, hds...)
+ ctx = gax.InsertMetadataIntoOutgoingContext(ctx, hds...)
+ opts = append((*c.CallOptions).ListDatabaseRoles[0:len((*c.CallOptions).ListDatabaseRoles):len((*c.CallOptions).ListDatabaseRoles)], opts...)
+ it := &DatabaseRoleIterator{}
+ req = proto.Clone(req).(*databasepb.ListDatabaseRolesRequest)
+ it.InternalFetch = func(pageSize int, pageToken string) ([]*databasepb.DatabaseRole, string, error) {
+ resp := &databasepb.ListDatabaseRolesResponse{}
+ if pageToken != "" {
+ req.PageToken = pageToken
+ }
+ if pageSize > math.MaxInt32 {
+ req.PageSize = math.MaxInt32
+ } else if pageSize != 0 {
+ req.PageSize = int32(pageSize)
+ }
+ err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
+ var err error
+ resp, err = c.databaseAdminClient.ListDatabaseRoles(ctx, req, settings.GRPC...)
+ return err
+ }, opts...)
+ if err != nil {
+ return nil, "", err
+ }
+
+ it.Response = resp
+ return resp.GetDatabaseRoles(), resp.GetNextPageToken(), nil
+ }
+ fetch := func(pageSize int, pageToken string) (string, error) {
+ items, nextPageToken, err := it.InternalFetch(pageSize, pageToken)
+ if err != nil {
+ return "", err
+ }
+ it.items = append(it.items, items...)
+ return nextPageToken, nil
+ }
+
+ it.pageInfo, it.nextFunc = iterator.NewPageInfo(fetch, it.bufLen, it.takeBuf)
+ it.pageInfo.MaxSize = int(req.GetPageSize())
+ it.pageInfo.Token = req.GetPageToken()
+
+ return it
+}
+
+func (c *databaseAdminGRPCClient) CreateBackupSchedule(ctx context.Context, req *databasepb.CreateBackupScheduleRequest, opts ...gax.CallOption) (*databasepb.BackupSchedule, error) {
+ hds := []string{"x-goog-request-params", fmt.Sprintf("%s=%v", "parent", url.QueryEscape(req.GetParent()))}
+
+ hds = append(c.xGoogHeaders, hds...)
+ ctx = gax.InsertMetadataIntoOutgoingContext(ctx, hds...)
+ opts = append((*c.CallOptions).CreateBackupSchedule[0:len((*c.CallOptions).CreateBackupSchedule):len((*c.CallOptions).CreateBackupSchedule)], opts...)
+ var resp *databasepb.BackupSchedule
+ err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
+ var err error
+ resp, err = c.databaseAdminClient.CreateBackupSchedule(ctx, req, settings.GRPC...)
+ return err
+ }, opts...)
+ if err != nil {
+ return nil, err
+ }
+ return resp, nil
+}
+
+func (c *databaseAdminGRPCClient) GetBackupSchedule(ctx context.Context, req *databasepb.GetBackupScheduleRequest, opts ...gax.CallOption) (*databasepb.BackupSchedule, error) {
+ hds := []string{"x-goog-request-params", fmt.Sprintf("%s=%v", "name", url.QueryEscape(req.GetName()))}
+
+ hds = append(c.xGoogHeaders, hds...)
+ ctx = gax.InsertMetadataIntoOutgoingContext(ctx, hds...)
+ opts = append((*c.CallOptions).GetBackupSchedule[0:len((*c.CallOptions).GetBackupSchedule):len((*c.CallOptions).GetBackupSchedule)], opts...)
+ var resp *databasepb.BackupSchedule
+ err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
+ var err error
+ resp, err = c.databaseAdminClient.GetBackupSchedule(ctx, req, settings.GRPC...)
+ return err
+ }, opts...)
+ if err != nil {
+ return nil, err
+ }
+ return resp, nil
+}
+
+func (c *databaseAdminGRPCClient) UpdateBackupSchedule(ctx context.Context, req *databasepb.UpdateBackupScheduleRequest, opts ...gax.CallOption) (*databasepb.BackupSchedule, error) {
+ hds := []string{"x-goog-request-params", fmt.Sprintf("%s=%v", "backup_schedule.name", url.QueryEscape(req.GetBackupSchedule().GetName()))}
+
+ hds = append(c.xGoogHeaders, hds...)
+ ctx = gax.InsertMetadataIntoOutgoingContext(ctx, hds...)
+ opts = append((*c.CallOptions).UpdateBackupSchedule[0:len((*c.CallOptions).UpdateBackupSchedule):len((*c.CallOptions).UpdateBackupSchedule)], opts...)
+ var resp *databasepb.BackupSchedule
+ err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
+ var err error
+ resp, err = c.databaseAdminClient.UpdateBackupSchedule(ctx, req, settings.GRPC...)
+ return err
+ }, opts...)
+ if err != nil {
+ return nil, err
+ }
+ return resp, nil
+}
+
+func (c *databaseAdminGRPCClient) DeleteBackupSchedule(ctx context.Context, req *databasepb.DeleteBackupScheduleRequest, opts ...gax.CallOption) error {
+ hds := []string{"x-goog-request-params", fmt.Sprintf("%s=%v", "name", url.QueryEscape(req.GetName()))}
+
+ hds = append(c.xGoogHeaders, hds...)
+ ctx = gax.InsertMetadataIntoOutgoingContext(ctx, hds...)
+ opts = append((*c.CallOptions).DeleteBackupSchedule[0:len((*c.CallOptions).DeleteBackupSchedule):len((*c.CallOptions).DeleteBackupSchedule)], opts...)
+ err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
+ var err error
+ _, err = c.databaseAdminClient.DeleteBackupSchedule(ctx, req, settings.GRPC...)
+ return err
+ }, opts...)
+ return err
+}
+
+func (c *databaseAdminGRPCClient) ListBackupSchedules(ctx context.Context, req *databasepb.ListBackupSchedulesRequest, opts ...gax.CallOption) *BackupScheduleIterator {
+ hds := []string{"x-goog-request-params", fmt.Sprintf("%s=%v", "parent", url.QueryEscape(req.GetParent()))}
+
+ hds = append(c.xGoogHeaders, hds...)
+ ctx = gax.InsertMetadataIntoOutgoingContext(ctx, hds...)
+ opts = append((*c.CallOptions).ListBackupSchedules[0:len((*c.CallOptions).ListBackupSchedules):len((*c.CallOptions).ListBackupSchedules)], opts...)
+ it := &BackupScheduleIterator{}
+ req = proto.Clone(req).(*databasepb.ListBackupSchedulesRequest)
+ it.InternalFetch = func(pageSize int, pageToken string) ([]*databasepb.BackupSchedule, string, error) {
+ resp := &databasepb.ListBackupSchedulesResponse{}
+ if pageToken != "" {
+ req.PageToken = pageToken
+ }
+ if pageSize > math.MaxInt32 {
+ req.PageSize = math.MaxInt32
+ } else if pageSize != 0 {
+ req.PageSize = int32(pageSize)
+ }
+ err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
+ var err error
+ resp, err = c.databaseAdminClient.ListBackupSchedules(ctx, req, settings.GRPC...)
+ return err
+ }, opts...)
+ if err != nil {
+ return nil, "", err
+ }
+
+ it.Response = resp
+ return resp.GetBackupSchedules(), resp.GetNextPageToken(), nil
+ }
+ fetch := func(pageSize int, pageToken string) (string, error) {
+ items, nextPageToken, err := it.InternalFetch(pageSize, pageToken)
+ if err != nil {
+ return "", err
+ }
+ it.items = append(it.items, items...)
+ return nextPageToken, nil
+ }
+
+ it.pageInfo, it.nextFunc = iterator.NewPageInfo(fetch, it.bufLen, it.takeBuf)
+ it.pageInfo.MaxSize = int(req.GetPageSize())
+ it.pageInfo.Token = req.GetPageToken()
+
+ return it
+}
+
+func (c *databaseAdminGRPCClient) CancelOperation(ctx context.Context, req *longrunningpb.CancelOperationRequest, opts ...gax.CallOption) error {
+ hds := []string{"x-goog-request-params", fmt.Sprintf("%s=%v", "name", url.QueryEscape(req.GetName()))}
+
+ hds = append(c.xGoogHeaders, hds...)
+ ctx = gax.InsertMetadataIntoOutgoingContext(ctx, hds...)
+ opts = append((*c.CallOptions).CancelOperation[0:len((*c.CallOptions).CancelOperation):len((*c.CallOptions).CancelOperation)], opts...)
+ err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
+ var err error
+ _, err = c.operationsClient.CancelOperation(ctx, req, settings.GRPC...)
+ return err
+ }, opts...)
+ return err
+}
+
+func (c *databaseAdminGRPCClient) DeleteOperation(ctx context.Context, req *longrunningpb.DeleteOperationRequest, opts ...gax.CallOption) error {
+ hds := []string{"x-goog-request-params", fmt.Sprintf("%s=%v", "name", url.QueryEscape(req.GetName()))}
+
+ hds = append(c.xGoogHeaders, hds...)
+ ctx = gax.InsertMetadataIntoOutgoingContext(ctx, hds...)
+ opts = append((*c.CallOptions).DeleteOperation[0:len((*c.CallOptions).DeleteOperation):len((*c.CallOptions).DeleteOperation)], opts...)
+ err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
+ var err error
+ _, err = c.operationsClient.DeleteOperation(ctx, req, settings.GRPC...)
+ return err
+ }, opts...)
+ return err
+}
+
+func (c *databaseAdminGRPCClient) GetOperation(ctx context.Context, req *longrunningpb.GetOperationRequest, opts ...gax.CallOption) (*longrunningpb.Operation, error) {
+ hds := []string{"x-goog-request-params", fmt.Sprintf("%s=%v", "name", url.QueryEscape(req.GetName()))}
+
+ hds = append(c.xGoogHeaders, hds...)
+ ctx = gax.InsertMetadataIntoOutgoingContext(ctx, hds...)
+ opts = append((*c.CallOptions).GetOperation[0:len((*c.CallOptions).GetOperation):len((*c.CallOptions).GetOperation)], opts...)
+ var resp *longrunningpb.Operation
+ err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
+ var err error
+ resp, err = c.operationsClient.GetOperation(ctx, req, settings.GRPC...)
+ return err
+ }, opts...)
+ if err != nil {
+ return nil, err
+ }
+ return resp, nil
+}
+
+func (c *databaseAdminGRPCClient) ListOperations(ctx context.Context, req *longrunningpb.ListOperationsRequest, opts ...gax.CallOption) *OperationIterator {
+ hds := []string{"x-goog-request-params", fmt.Sprintf("%s=%v", "name", url.QueryEscape(req.GetName()))}
+
+ hds = append(c.xGoogHeaders, hds...)
+ ctx = gax.InsertMetadataIntoOutgoingContext(ctx, hds...)
+ opts = append((*c.CallOptions).ListOperations[0:len((*c.CallOptions).ListOperations):len((*c.CallOptions).ListOperations)], opts...)
+ it := &OperationIterator{}
+ req = proto.Clone(req).(*longrunningpb.ListOperationsRequest)
+ it.InternalFetch = func(pageSize int, pageToken string) ([]*longrunningpb.Operation, string, error) {
+ resp := &longrunningpb.ListOperationsResponse{}
+ if pageToken != "" {
+ req.PageToken = pageToken
+ }
+ if pageSize > math.MaxInt32 {
+ req.PageSize = math.MaxInt32
+ } else if pageSize != 0 {
+ req.PageSize = int32(pageSize)
+ }
+ err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
+ var err error
+ resp, err = c.operationsClient.ListOperations(ctx, req, settings.GRPC...)
+ return err
+ }, opts...)
+ if err != nil {
+ return nil, "", err
+ }
+
+ it.Response = resp
+ return resp.GetOperations(), resp.GetNextPageToken(), nil
+ }
+ fetch := func(pageSize int, pageToken string) (string, error) {
+ items, nextPageToken, err := it.InternalFetch(pageSize, pageToken)
+ if err != nil {
+ return "", err
+ }
+ it.items = append(it.items, items...)
+ return nextPageToken, nil
+ }
+
+ it.pageInfo, it.nextFunc = iterator.NewPageInfo(fetch, it.bufLen, it.takeBuf)
+ it.pageInfo.MaxSize = int(req.GetPageSize())
+ it.pageInfo.Token = req.GetPageToken()
+
+ return it
+}
+
+// ListDatabases lists Cloud Spanner databases.
+func (c *databaseAdminRESTClient) ListDatabases(ctx context.Context, req *databasepb.ListDatabasesRequest, opts ...gax.CallOption) *DatabaseIterator {
+ it := &DatabaseIterator{}
+ req = proto.Clone(req).(*databasepb.ListDatabasesRequest)
+ unm := protojson.UnmarshalOptions{AllowPartial: true, DiscardUnknown: true}
+ it.InternalFetch = func(pageSize int, pageToken string) ([]*databasepb.Database, string, error) {
+ resp := &databasepb.ListDatabasesResponse{}
+ if pageToken != "" {
+ req.PageToken = pageToken
+ }
+ if pageSize > math.MaxInt32 {
+ req.PageSize = math.MaxInt32
+ } else if pageSize != 0 {
+ req.PageSize = int32(pageSize)
+ }
+ baseUrl, err := url.Parse(c.endpoint)
+ if err != nil {
+ return nil, "", err
+ }
+ baseUrl.Path += fmt.Sprintf("/v1/%v/databases", req.GetParent())
+
+ params := url.Values{}
+ params.Add("$alt", "json;enum-encoding=int")
+ if req.GetPageSize() != 0 {
+ params.Add("pageSize", fmt.Sprintf("%v", req.GetPageSize()))
+ }
+ if req.GetPageToken() != "" {
+ params.Add("pageToken", fmt.Sprintf("%v", req.GetPageToken()))
+ }
+
+ baseUrl.RawQuery = params.Encode()
+
+ // Build HTTP headers from client and context metadata.
+ hds := append(c.xGoogHeaders, "Content-Type", "application/json")
+ headers := gax.BuildHeaders(ctx, hds...)
+ e := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
+ if settings.Path != "" {
+ baseUrl.Path = settings.Path
+ }
+ httpReq, err := http.NewRequest("GET", baseUrl.String(), nil)
+ if err != nil {
+ return err
+ }
+ httpReq.Header = headers
+
+ httpRsp, err := c.httpClient.Do(httpReq)
+ if err != nil {
+ return err
+ }
+ defer httpRsp.Body.Close()
+
+ if err = googleapi.CheckResponse(httpRsp); err != nil {
+ return err
+ }
+
+ buf, err := io.ReadAll(httpRsp.Body)
+ if err != nil {
+ return err
+ }
+
+ if err := unm.Unmarshal(buf, resp); err != nil {
+ return err
+ }
+
+ return nil
+ }, opts...)
+ if e != nil {
+ return nil, "", e
+ }
+ it.Response = resp
+ return resp.GetDatabases(), resp.GetNextPageToken(), nil
+ }
+
+ fetch := func(pageSize int, pageToken string) (string, error) {
+ items, nextPageToken, err := it.InternalFetch(pageSize, pageToken)
+ if err != nil {
+ return "", err
+ }
+ it.items = append(it.items, items...)
+ return nextPageToken, nil
+ }
+
+ it.pageInfo, it.nextFunc = iterator.NewPageInfo(fetch, it.bufLen, it.takeBuf)
+ it.pageInfo.MaxSize = int(req.GetPageSize())
+ it.pageInfo.Token = req.GetPageToken()
+
+ return it
+}
+
+// CreateDatabase creates a new Cloud Spanner database and starts to prepare it for serving.
+// The returned [long-running operation][google.longrunning.Operation] will
+// have a name of the format <database_name>/operations/<operation_id> and
+// can be used to track preparation of the database. The
+// metadata field type is
+// CreateDatabaseMetadata.
+// The response field type is
+// Database, if successful.
+func (c *databaseAdminRESTClient) CreateDatabase(ctx context.Context, req *databasepb.CreateDatabaseRequest, opts ...gax.CallOption) (*CreateDatabaseOperation, error) {
+ m := protojson.MarshalOptions{AllowPartial: true, UseEnumNumbers: true}
+ jsonReq, err := m.Marshal(req)
+ if err != nil {
+ return nil, err
+ }
+
+ baseUrl, err := url.Parse(c.endpoint)
+ if err != nil {
+ return nil, err
+ }
+ baseUrl.Path += fmt.Sprintf("/v1/%v/databases", req.GetParent())
+
+ params := url.Values{}
+ params.Add("$alt", "json;enum-encoding=int")
+
+ baseUrl.RawQuery = params.Encode()
+
+ // Build HTTP headers from client and context metadata.
+ hds := []string{"x-goog-request-params", fmt.Sprintf("%s=%v", "parent", url.QueryEscape(req.GetParent()))}
+
+ hds = append(c.xGoogHeaders, hds...)
+ hds = append(hds, "Content-Type", "application/json")
+ headers := gax.BuildHeaders(ctx, hds...)
+ unm := protojson.UnmarshalOptions{AllowPartial: true, DiscardUnknown: true}
+ resp := &longrunningpb.Operation{}
+ e := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
+ if settings.Path != "" {
+ baseUrl.Path = settings.Path
+ }
+ httpReq, err := http.NewRequest("POST", baseUrl.String(), bytes.NewReader(jsonReq))
+ if err != nil {
+ return err
+ }
+ httpReq = httpReq.WithContext(ctx)
+ httpReq.Header = headers
+
+ httpRsp, err := c.httpClient.Do(httpReq)
+ if err != nil {
+ return err
+ }
+ defer httpRsp.Body.Close()
+
+ if err = googleapi.CheckResponse(httpRsp); err != nil {
+ return err
+ }
+
+ buf, err := io.ReadAll(httpRsp.Body)
+ if err != nil {
+ return err
+ }
+
+ if err := unm.Unmarshal(buf, resp); err != nil {
+ return err
+ }
+
+ return nil
+ }, opts...)
+ if e != nil {
+ return nil, e
+ }
+
+ override := fmt.Sprintf("/v1/%s", resp.GetName())
+ return &CreateDatabaseOperation{
+ lro: longrunning.InternalNewOperation(*c.LROClient, resp),
+ pollPath: override,
+ }, nil
+}
+
+// GetDatabase gets the state of a Cloud Spanner database.
+func (c *databaseAdminRESTClient) GetDatabase(ctx context.Context, req *databasepb.GetDatabaseRequest, opts ...gax.CallOption) (*databasepb.Database, error) {
+ baseUrl, err := url.Parse(c.endpoint)
+ if err != nil {
+ return nil, err
+ }
+ baseUrl.Path += fmt.Sprintf("/v1/%v", req.GetName())
+
+ params := url.Values{}
+ params.Add("$alt", "json;enum-encoding=int")
+
+ baseUrl.RawQuery = params.Encode()
+
+ // Build HTTP headers from client and context metadata.
+ hds := []string{"x-goog-request-params", fmt.Sprintf("%s=%v", "name", url.QueryEscape(req.GetName()))}
+
+ hds = append(c.xGoogHeaders, hds...)
+ hds = append(hds, "Content-Type", "application/json")
+ headers := gax.BuildHeaders(ctx, hds...)
+ opts = append((*c.CallOptions).GetDatabase[0:len((*c.CallOptions).GetDatabase):len((*c.CallOptions).GetDatabase)], opts...)
+ unm := protojson.UnmarshalOptions{AllowPartial: true, DiscardUnknown: true}
+ resp := &databasepb.Database{}
+ e := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
+ if settings.Path != "" {
+ baseUrl.Path = settings.Path
+ }
+ httpReq, err := http.NewRequest("GET", baseUrl.String(), nil)
+ if err != nil {
+ return err
+ }
+ httpReq = httpReq.WithContext(ctx)
+ httpReq.Header = headers
+
+ httpRsp, err := c.httpClient.Do(httpReq)
+ if err != nil {
+ return err
+ }
+ defer httpRsp.Body.Close()
+
+ if err = googleapi.CheckResponse(httpRsp); err != nil {
+ return err
+ }
+
+ buf, err := io.ReadAll(httpRsp.Body)
+ if err != nil {
+ return err
+ }
+
+ if err := unm.Unmarshal(buf, resp); err != nil {
+ return err
+ }
+
+ return nil
+ }, opts...)
+ if e != nil {
+ return nil, e
+ }
+ return resp, nil
+}
+
+// UpdateDatabase updates a Cloud Spanner database. The returned
+// [long-running operation][google.longrunning.Operation] can be used to track
+// the progress of updating the database. If the named database does not
+// exist, returns NOT_FOUND.
+//
+// While the operation is pending:
+//
+// The database’s
+// reconciling
+// field is set to true.
+//
+// Cancelling the operation is best-effort. If the cancellation succeeds,
+// the operation metadata’s
+// cancel_time
+// is set, the updates are reverted, and the operation terminates with a
+// CANCELLED status.
+//
+// New UpdateDatabase requests will return a FAILED_PRECONDITION error
+// until the pending operation is done (returns successfully or with
+// error).
+//
+// Reading the database via the API continues to give the pre-request
+// values.
+//
+// Upon completion of the returned operation:
+//
+// The new values are in effect and readable via the API.
+//
+// The database’s
+// reconciling
+// field becomes false.
+//
+// The returned [long-running operation][google.longrunning.Operation] will
+// have a name of the format
+// projects/<project>/instances/<instance>/databases/<database>/operations/<operation_id>
+// and can be used to track the database modification. The
+// metadata field type is
+// UpdateDatabaseMetadata.
+// The response field type is
+// Database, if successful.
+func (c *databaseAdminRESTClient) UpdateDatabase(ctx context.Context, req *databasepb.UpdateDatabaseRequest, opts ...gax.CallOption) (*UpdateDatabaseOperation, error) {
+ m := protojson.MarshalOptions{AllowPartial: true, UseEnumNumbers: true}
+ body := req.GetDatabase()
+ jsonReq, err := m.Marshal(body)
+ if err != nil {
+ return nil, err
+ }
+
+ baseUrl, err := url.Parse(c.endpoint)
+ if err != nil {
+ return nil, err
+ }
+ baseUrl.Path += fmt.Sprintf("/v1/%v", req.GetDatabase().GetName())
+
+ params := url.Values{}
+ params.Add("$alt", "json;enum-encoding=int")
+ if req.GetUpdateMask() != nil {
+ updateMask, err := protojson.Marshal(req.GetUpdateMask())
+ if err != nil {
+ return nil, err
+ }
+ params.Add("updateMask", string(updateMask[1:len(updateMask)-1]))
+ }
+
+ baseUrl.RawQuery = params.Encode()
+
+ // Build HTTP headers from client and context metadata.
+ hds := []string{"x-goog-request-params", fmt.Sprintf("%s=%v", "database.name", url.QueryEscape(req.GetDatabase().GetName()))}
+
+ hds = append(c.xGoogHeaders, hds...)
+ hds = append(hds, "Content-Type", "application/json")
+ headers := gax.BuildHeaders(ctx, hds...)
+ unm := protojson.UnmarshalOptions{AllowPartial: true, DiscardUnknown: true}
+ resp := &longrunningpb.Operation{}
+ e := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
+ if settings.Path != "" {
+ baseUrl.Path = settings.Path
+ }
+ httpReq, err := http.NewRequest("PATCH", baseUrl.String(), bytes.NewReader(jsonReq))
+ if err != nil {
+ return err
+ }
+ httpReq = httpReq.WithContext(ctx)
+ httpReq.Header = headers
+
+ httpRsp, err := c.httpClient.Do(httpReq)
+ if err != nil {
+ return err
+ }
+ defer httpRsp.Body.Close()
+
+ if err = googleapi.CheckResponse(httpRsp); err != nil {
+ return err
+ }
+
+ buf, err := io.ReadAll(httpRsp.Body)
+ if err != nil {
+ return err
+ }
+
+ if err := unm.Unmarshal(buf, resp); err != nil {
+ return err
+ }
+
+ return nil
+ }, opts...)
+ if e != nil {
+ return nil, e
+ }
+
+ override := fmt.Sprintf("/v1/%s", resp.GetName())
+ return &UpdateDatabaseOperation{
+ lro: longrunning.InternalNewOperation(*c.LROClient, resp),
+ pollPath: override,
+ }, nil
+}
+
+// UpdateDatabaseDdl updates the schema of a Cloud Spanner database by
+// creating/altering/dropping tables, columns, indexes, etc. The returned
+// [long-running operation][google.longrunning.Operation] will have a name of
+// the format <database_name>/operations/<operation_id> and can be used to
+// track execution of the schema change(s). The
+// metadata field type is
+// UpdateDatabaseDdlMetadata.
+// The operation has no response.
+func (c *databaseAdminRESTClient) UpdateDatabaseDdl(ctx context.Context, req *databasepb.UpdateDatabaseDdlRequest, opts ...gax.CallOption) (*UpdateDatabaseDdlOperation, error) {
+ m := protojson.MarshalOptions{AllowPartial: true, UseEnumNumbers: true}
+ jsonReq, err := m.Marshal(req)
+ if err != nil {
+ return nil, err
+ }
+
+ baseUrl, err := url.Parse(c.endpoint)
+ if err != nil {
+ return nil, err
+ }
+ baseUrl.Path += fmt.Sprintf("/v1/%v/ddl", req.GetDatabase())
+
+ params := url.Values{}
+ params.Add("$alt", "json;enum-encoding=int")
+
+ baseUrl.RawQuery = params.Encode()
+
+ // Build HTTP headers from client and context metadata.
+ hds := []string{"x-goog-request-params", fmt.Sprintf("%s=%v", "database", url.QueryEscape(req.GetDatabase()))}
+
+ hds = append(c.xGoogHeaders, hds...)
+ hds = append(hds, "Content-Type", "application/json")
+ headers := gax.BuildHeaders(ctx, hds...)
+ unm := protojson.UnmarshalOptions{AllowPartial: true, DiscardUnknown: true}
+ resp := &longrunningpb.Operation{}
+ e := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
+ if settings.Path != "" {
+ baseUrl.Path = settings.Path
+ }
+ httpReq, err := http.NewRequest("PATCH", baseUrl.String(), bytes.NewReader(jsonReq))
+ if err != nil {
+ return err
+ }
+ httpReq = httpReq.WithContext(ctx)
+ httpReq.Header = headers
+
+ httpRsp, err := c.httpClient.Do(httpReq)
+ if err != nil {
+ return err
+ }
+ defer httpRsp.Body.Close()
+
+ if err = googleapi.CheckResponse(httpRsp); err != nil {
+ return err
+ }
+
+ buf, err := io.ReadAll(httpRsp.Body)
+ if err != nil {
+ return err
+ }
+
+ if err := unm.Unmarshal(buf, resp); err != nil {
+ return err
+ }
+
+ return nil
+ }, opts...)
+ if e != nil {
+ return nil, e
+ }
+
+ override := fmt.Sprintf("/v1/%s", resp.GetName())
+ return &UpdateDatabaseDdlOperation{
+ lro: longrunning.InternalNewOperation(*c.LROClient, resp),
+ pollPath: override,
+ }, nil
+}
+
+// DropDatabase drops (aka deletes) a Cloud Spanner database.
+// Completed backups for the database will be retained according to their
+// expire_time.
+// Note: Cloud Spanner might continue to accept requests for a few seconds
+// after the database has been deleted.
+func (c *databaseAdminRESTClient) DropDatabase(ctx context.Context, req *databasepb.DropDatabaseRequest, opts ...gax.CallOption) error {
+ baseUrl, err := url.Parse(c.endpoint)
+ if err != nil {
+ return err
+ }
+ baseUrl.Path += fmt.Sprintf("/v1/%v", req.GetDatabase())
+
+ params := url.Values{}
+ params.Add("$alt", "json;enum-encoding=int")
+
+ baseUrl.RawQuery = params.Encode()
+
+ // Build HTTP headers from client and context metadata.
+ hds := []string{"x-goog-request-params", fmt.Sprintf("%s=%v", "database", url.QueryEscape(req.GetDatabase()))}
+
+ hds = append(c.xGoogHeaders, hds...)
+ hds = append(hds, "Content-Type", "application/json")
+ headers := gax.BuildHeaders(ctx, hds...)
+ return gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
+ if settings.Path != "" {
+ baseUrl.Path = settings.Path
+ }
+ httpReq, err := http.NewRequest("DELETE", baseUrl.String(), nil)
+ if err != nil {
+ return err
+ }
+ httpReq = httpReq.WithContext(ctx)
+ httpReq.Header = headers
+
+ httpRsp, err := c.httpClient.Do(httpReq)
+ if err != nil {
+ return err
+ }
+ defer httpRsp.Body.Close()
+
+ // Returns nil if there is no error, otherwise wraps
+ // the response code and body into a non-nil error
+ return googleapi.CheckResponse(httpRsp)
+ }, opts...)
+}
+
+// GetDatabaseDdl returns the schema of a Cloud Spanner database as a list of formatted
+// DDL statements. This method does not show pending schema updates, those may
+// be queried using the Operations API.
+func (c *databaseAdminRESTClient) GetDatabaseDdl(ctx context.Context, req *databasepb.GetDatabaseDdlRequest, opts ...gax.CallOption) (*databasepb.GetDatabaseDdlResponse, error) {
+ baseUrl, err := url.Parse(c.endpoint)
+ if err != nil {
+ return nil, err
+ }
+ baseUrl.Path += fmt.Sprintf("/v1/%v/ddl", req.GetDatabase())
+
+ params := url.Values{}
+ params.Add("$alt", "json;enum-encoding=int")
+
+ baseUrl.RawQuery = params.Encode()
+
+ // Build HTTP headers from client and context metadata.
+ hds := []string{"x-goog-request-params", fmt.Sprintf("%s=%v", "database", url.QueryEscape(req.GetDatabase()))}
+
+ hds = append(c.xGoogHeaders, hds...)
+ hds = append(hds, "Content-Type", "application/json")
+ headers := gax.BuildHeaders(ctx, hds...)
+ opts = append((*c.CallOptions).GetDatabaseDdl[0:len((*c.CallOptions).GetDatabaseDdl):len((*c.CallOptions).GetDatabaseDdl)], opts...)
+ unm := protojson.UnmarshalOptions{AllowPartial: true, DiscardUnknown: true}
+ resp := &databasepb.GetDatabaseDdlResponse{}
+ e := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
+ if settings.Path != "" {
+ baseUrl.Path = settings.Path
+ }
+ httpReq, err := http.NewRequest("GET", baseUrl.String(), nil)
+ if err != nil {
+ return err
+ }
+ httpReq = httpReq.WithContext(ctx)
+ httpReq.Header = headers
+
+ httpRsp, err := c.httpClient.Do(httpReq)
+ if err != nil {
+ return err
+ }
+ defer httpRsp.Body.Close()
+
+ if err = googleapi.CheckResponse(httpRsp); err != nil {
+ return err
+ }
+
+ buf, err := io.ReadAll(httpRsp.Body)
+ if err != nil {
+ return err
+ }
+
+ if err := unm.Unmarshal(buf, resp); err != nil {
+ return err
+ }
+
+ return nil
+ }, opts...)
+ if e != nil {
+ return nil, e
+ }
+ return resp, nil
+}
+
+// SetIamPolicy sets the access control policy on a database or backup resource.
+// Replaces any existing policy.
+//
+// Authorization requires spanner.databases.setIamPolicy
+// permission on resource.
+// For backups, authorization requires spanner.backups.setIamPolicy
+// permission on resource.
+func (c *databaseAdminRESTClient) SetIamPolicy(ctx context.Context, req *iampb.SetIamPolicyRequest, opts ...gax.CallOption) (*iampb.Policy, error) {
+ m := protojson.MarshalOptions{AllowPartial: true, UseEnumNumbers: true}
+ jsonReq, err := m.Marshal(req)
+ if err != nil {
+ return nil, err
+ }
+
+ baseUrl, err := url.Parse(c.endpoint)
+ if err != nil {
+ return nil, err
+ }
+ baseUrl.Path += fmt.Sprintf("/v1/%v:setIamPolicy", req.GetResource())
+
+ params := url.Values{}
+ params.Add("$alt", "json;enum-encoding=int")
+
+ baseUrl.RawQuery = params.Encode()
+
+ // Build HTTP headers from client and context metadata.
+ hds := []string{"x-goog-request-params", fmt.Sprintf("%s=%v", "resource", url.QueryEscape(req.GetResource()))}
+
+ hds = append(c.xGoogHeaders, hds...)
+ hds = append(hds, "Content-Type", "application/json")
+ headers := gax.BuildHeaders(ctx, hds...)
+ opts = append((*c.CallOptions).SetIamPolicy[0:len((*c.CallOptions).SetIamPolicy):len((*c.CallOptions).SetIamPolicy)], opts...)
+ unm := protojson.UnmarshalOptions{AllowPartial: true, DiscardUnknown: true}
+ resp := &iampb.Policy{}
+ e := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
+ if settings.Path != "" {
+ baseUrl.Path = settings.Path
+ }
+ httpReq, err := http.NewRequest("POST", baseUrl.String(), bytes.NewReader(jsonReq))
+ if err != nil {
+ return err
+ }
+ httpReq = httpReq.WithContext(ctx)
+ httpReq.Header = headers
+
+ httpRsp, err := c.httpClient.Do(httpReq)
+ if err != nil {
+ return err
+ }
+ defer httpRsp.Body.Close()
+
+ if err = googleapi.CheckResponse(httpRsp); err != nil {
+ return err
+ }
+
+ buf, err := io.ReadAll(httpRsp.Body)
+ if err != nil {
+ return err
+ }
+
+ if err := unm.Unmarshal(buf, resp); err != nil {
+ return err
+ }
+
+ return nil
+ }, opts...)
+ if e != nil {
+ return nil, e
+ }
+ return resp, nil
+}
+
+// GetIamPolicy gets the access control policy for a database or backup resource.
+// Returns an empty policy if a database or backup exists but does not have a
+// policy set.
+//
+// Authorization requires spanner.databases.getIamPolicy permission on
+// resource.
+// For backups, authorization requires spanner.backups.getIamPolicy
+// permission on resource.
+func (c *databaseAdminRESTClient) GetIamPolicy(ctx context.Context, req *iampb.GetIamPolicyRequest, opts ...gax.CallOption) (*iampb.Policy, error) {
+ m := protojson.MarshalOptions{AllowPartial: true, UseEnumNumbers: true}
+ jsonReq, err := m.Marshal(req)
+ if err != nil {
+ return nil, err
+ }
+
+ baseUrl, err := url.Parse(c.endpoint)
+ if err != nil {
+ return nil, err
+ }
+ baseUrl.Path += fmt.Sprintf("/v1/%v:getIamPolicy", req.GetResource())
+
+ params := url.Values{}
+ params.Add("$alt", "json;enum-encoding=int")
+
+ baseUrl.RawQuery = params.Encode()
+
+ // Build HTTP headers from client and context metadata.
+ hds := []string{"x-goog-request-params", fmt.Sprintf("%s=%v", "resource", url.QueryEscape(req.GetResource()))}
+
+ hds = append(c.xGoogHeaders, hds...)
+ hds = append(hds, "Content-Type", "application/json")
+ headers := gax.BuildHeaders(ctx, hds...)
+ opts = append((*c.CallOptions).GetIamPolicy[0:len((*c.CallOptions).GetIamPolicy):len((*c.CallOptions).GetIamPolicy)], opts...)
+ unm := protojson.UnmarshalOptions{AllowPartial: true, DiscardUnknown: true}
+ resp := &iampb.Policy{}
+ e := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
+ if settings.Path != "" {
+ baseUrl.Path = settings.Path
+ }
+ httpReq, err := http.NewRequest("POST", baseUrl.String(), bytes.NewReader(jsonReq))
+ if err != nil {
+ return err
+ }
+ httpReq = httpReq.WithContext(ctx)
+ httpReq.Header = headers
+
+ httpRsp, err := c.httpClient.Do(httpReq)
+ if err != nil {
+ return err
+ }
+ defer httpRsp.Body.Close()
+
+ if err = googleapi.CheckResponse(httpRsp); err != nil {
+ return err
+ }
+
+ buf, err := io.ReadAll(httpRsp.Body)
+ if err != nil {
+ return err
+ }
+
+ if err := unm.Unmarshal(buf, resp); err != nil {
+ return err
+ }
+
+ return nil
+ }, opts...)
+ if e != nil {
+ return nil, e
+ }
+ return resp, nil
+}
+
+// TestIamPermissions returns permissions that the caller has on the specified database or backup
+// resource.
+//
+// Attempting this RPC on a non-existent Cloud Spanner database will
+// result in a NOT_FOUND error if the user has
+// spanner.databases.list permission on the containing Cloud
+// Spanner instance. Otherwise returns an empty set of permissions.
+// Calling this method on a backup that does not exist will
+// result in a NOT_FOUND error if the user has
+// spanner.backups.list permission on the containing instance.
+func (c *databaseAdminRESTClient) TestIamPermissions(ctx context.Context, req *iampb.TestIamPermissionsRequest, opts ...gax.CallOption) (*iampb.TestIamPermissionsResponse, error) {
+ m := protojson.MarshalOptions{AllowPartial: true, UseEnumNumbers: true}
+ jsonReq, err := m.Marshal(req)
+ if err != nil {
+ return nil, err
+ }
+
+ baseUrl, err := url.Parse(c.endpoint)
+ if err != nil {
+ return nil, err
+ }
+ baseUrl.Path += fmt.Sprintf("/v1/%v:testIamPermissions", req.GetResource())
+
+ params := url.Values{}
+ params.Add("$alt", "json;enum-encoding=int")
+
+ baseUrl.RawQuery = params.Encode()
+
+ // Build HTTP headers from client and context metadata.
+ hds := []string{"x-goog-request-params", fmt.Sprintf("%s=%v", "resource", url.QueryEscape(req.GetResource()))}
+
+ hds = append(c.xGoogHeaders, hds...)
+ hds = append(hds, "Content-Type", "application/json")
+ headers := gax.BuildHeaders(ctx, hds...)
+ opts = append((*c.CallOptions).TestIamPermissions[0:len((*c.CallOptions).TestIamPermissions):len((*c.CallOptions).TestIamPermissions)], opts...)
+ unm := protojson.UnmarshalOptions{AllowPartial: true, DiscardUnknown: true}
+ resp := &iampb.TestIamPermissionsResponse{}
+ e := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
+ if settings.Path != "" {
+ baseUrl.Path = settings.Path
+ }
+ httpReq, err := http.NewRequest("POST", baseUrl.String(), bytes.NewReader(jsonReq))
+ if err != nil {
+ return err
+ }
+ httpReq = httpReq.WithContext(ctx)
+ httpReq.Header = headers
+
+ httpRsp, err := c.httpClient.Do(httpReq)
+ if err != nil {
+ return err
+ }
+ defer httpRsp.Body.Close()
+
+ if err = googleapi.CheckResponse(httpRsp); err != nil {
+ return err
+ }
+
+ buf, err := io.ReadAll(httpRsp.Body)
+ if err != nil {
+ return err
+ }
+
+ if err := unm.Unmarshal(buf, resp); err != nil {
+ return err
+ }
+
+ return nil
+ }, opts...)
+ if e != nil {
+ return nil, e
+ }
+ return resp, nil
+}
+
+// CreateBackup starts creating a new Cloud Spanner Backup.
+// The returned backup [long-running operation][google.longrunning.Operation]
+// will have a name of the format
+// projects/<project>/instances/<instance>/backups/<backup>/operations/<operation_id>
+// and can be used to track creation of the backup. The
+// metadata field type is
+// CreateBackupMetadata.
+// The response field type is
+// Backup, if successful.
+// Cancelling the returned operation will stop the creation and delete the
+// backup. There can be only one pending backup creation per database. Backup
+// creation of different databases can run concurrently.
+func (c *databaseAdminRESTClient) CreateBackup(ctx context.Context, req *databasepb.CreateBackupRequest, opts ...gax.CallOption) (*CreateBackupOperation, error) {
+ m := protojson.MarshalOptions{AllowPartial: true, UseEnumNumbers: true}
+ body := req.GetBackup()
+ jsonReq, err := m.Marshal(body)
+ if err != nil {
+ return nil, err
+ }
+
+ baseUrl, err := url.Parse(c.endpoint)
+ if err != nil {
+ return nil, err
+ }
+ baseUrl.Path += fmt.Sprintf("/v1/%v/backups", req.GetParent())
+
+ params := url.Values{}
+ params.Add("$alt", "json;enum-encoding=int")
+ params.Add("backupId", fmt.Sprintf("%v", req.GetBackupId()))
+ params.Add("encryptionConfig.encryptionType", fmt.Sprintf("%v", req.GetEncryptionConfig().GetEncryptionType()))
+ if req.GetEncryptionConfig().GetKmsKeyName() != "" {
+ params.Add("encryptionConfig.kmsKeyName", fmt.Sprintf("%v", req.GetEncryptionConfig().GetKmsKeyName()))
+ }
+ if items := req.GetEncryptionConfig().GetKmsKeyNames(); len(items) > 0 {
+ for _, item := range items {
+ params.Add("encryptionConfig.kmsKeyNames", fmt.Sprintf("%v", item))
+ }
+ }
+
+ baseUrl.RawQuery = params.Encode()
+
+ // Build HTTP headers from client and context metadata.
+ hds := []string{"x-goog-request-params", fmt.Sprintf("%s=%v", "parent", url.QueryEscape(req.GetParent()))}
+
+ hds = append(c.xGoogHeaders, hds...)
+ hds = append(hds, "Content-Type", "application/json")
+ headers := gax.BuildHeaders(ctx, hds...)
+ unm := protojson.UnmarshalOptions{AllowPartial: true, DiscardUnknown: true}
+ resp := &longrunningpb.Operation{}
+ e := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
+ if settings.Path != "" {
+ baseUrl.Path = settings.Path
+ }
+ httpReq, err := http.NewRequest("POST", baseUrl.String(), bytes.NewReader(jsonReq))
+ if err != nil {
+ return err
+ }
+ httpReq = httpReq.WithContext(ctx)
+ httpReq.Header = headers
+
+ httpRsp, err := c.httpClient.Do(httpReq)
+ if err != nil {
+ return err
+ }
+ defer httpRsp.Body.Close()
+
+ if err = googleapi.CheckResponse(httpRsp); err != nil {
+ return err
+ }
+
+ buf, err := io.ReadAll(httpRsp.Body)
+ if err != nil {
+ return err
+ }
+
+ if err := unm.Unmarshal(buf, resp); err != nil {
+ return err
+ }
+
+ return nil
+ }, opts...)
+ if e != nil {
+ return nil, e
+ }
+
+ override := fmt.Sprintf("/v1/%s", resp.GetName())
+ return &CreateBackupOperation{
+ lro: longrunning.InternalNewOperation(*c.LROClient, resp),
+ pollPath: override,
+ }, nil
+}
+
+// CopyBackup starts copying a Cloud Spanner Backup.
+// The returned backup [long-running operation][google.longrunning.Operation]
+// will have a name of the format
+// projects/<project>/instances/<instance>/backups/<backup>/operations/<operation_id>
+// and can be used to track copying of the backup. The operation is associated
+// with the destination backup.
+// The metadata field type is
+// CopyBackupMetadata.
+// The response field type is
+// Backup, if successful.
+// Cancelling the returned operation will stop the copying and delete the
+// destination backup. Concurrent CopyBackup requests can run on the same
+// source backup.
+func (c *databaseAdminRESTClient) CopyBackup(ctx context.Context, req *databasepb.CopyBackupRequest, opts ...gax.CallOption) (*CopyBackupOperation, error) {
+ m := protojson.MarshalOptions{AllowPartial: true, UseEnumNumbers: true}
+ jsonReq, err := m.Marshal(req)
+ if err != nil {
+ return nil, err
+ }
+
+ baseUrl, err := url.Parse(c.endpoint)
+ if err != nil {
+ return nil, err
+ }
+ baseUrl.Path += fmt.Sprintf("/v1/%v/backups:copy", req.GetParent())
+
+ params := url.Values{}
+ params.Add("$alt", "json;enum-encoding=int")
+
+ baseUrl.RawQuery = params.Encode()
+
+ // Build HTTP headers from client and context metadata.
+ hds := []string{"x-goog-request-params", fmt.Sprintf("%s=%v", "parent", url.QueryEscape(req.GetParent()))}
+
+ hds = append(c.xGoogHeaders, hds...)
+ hds = append(hds, "Content-Type", "application/json")
+ headers := gax.BuildHeaders(ctx, hds...)
+ unm := protojson.UnmarshalOptions{AllowPartial: true, DiscardUnknown: true}
+ resp := &longrunningpb.Operation{}
+ e := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
+ if settings.Path != "" {
+ baseUrl.Path = settings.Path
+ }
+ httpReq, err := http.NewRequest("POST", baseUrl.String(), bytes.NewReader(jsonReq))
+ if err != nil {
+ return err
+ }
+ httpReq = httpReq.WithContext(ctx)
+ httpReq.Header = headers
+
+ httpRsp, err := c.httpClient.Do(httpReq)
+ if err != nil {
+ return err
+ }
+ defer httpRsp.Body.Close()
+
+ if err = googleapi.CheckResponse(httpRsp); err != nil {
+ return err
+ }
+
+ buf, err := io.ReadAll(httpRsp.Body)
+ if err != nil {
+ return err
+ }
+
+ if err := unm.Unmarshal(buf, resp); err != nil {
+ return err
+ }
+
+ return nil
+ }, opts...)
+ if e != nil {
+ return nil, e
+ }
+
+ override := fmt.Sprintf("/v1/%s", resp.GetName())
+ return &CopyBackupOperation{
+ lro: longrunning.InternalNewOperation(*c.LROClient, resp),
+ pollPath: override,
+ }, nil
+}
+
+// GetBackup gets metadata on a pending or completed
+// Backup.
+func (c *databaseAdminRESTClient) GetBackup(ctx context.Context, req *databasepb.GetBackupRequest, opts ...gax.CallOption) (*databasepb.Backup, error) {
+ baseUrl, err := url.Parse(c.endpoint)
+ if err != nil {
+ return nil, err
+ }
+ baseUrl.Path += fmt.Sprintf("/v1/%v", req.GetName())
+
+ params := url.Values{}
+ params.Add("$alt", "json;enum-encoding=int")
+
+ baseUrl.RawQuery = params.Encode()
+
+ // Build HTTP headers from client and context metadata.
+ hds := []string{"x-goog-request-params", fmt.Sprintf("%s=%v", "name", url.QueryEscape(req.GetName()))}
+
+ hds = append(c.xGoogHeaders, hds...)
+ hds = append(hds, "Content-Type", "application/json")
+ headers := gax.BuildHeaders(ctx, hds...)
+ opts = append((*c.CallOptions).GetBackup[0:len((*c.CallOptions).GetBackup):len((*c.CallOptions).GetBackup)], opts...)
+ unm := protojson.UnmarshalOptions{AllowPartial: true, DiscardUnknown: true}
+ resp := &databasepb.Backup{}
+ e := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
+ if settings.Path != "" {
+ baseUrl.Path = settings.Path
+ }
+ httpReq, err := http.NewRequest("GET", baseUrl.String(), nil)
+ if err != nil {
+ return err
+ }
+ httpReq = httpReq.WithContext(ctx)
+ httpReq.Header = headers
+
+ httpRsp, err := c.httpClient.Do(httpReq)
+ if err != nil {
+ return err
+ }
+ defer httpRsp.Body.Close()
+
+ if err = googleapi.CheckResponse(httpRsp); err != nil {
+ return err
+ }
+
+ buf, err := io.ReadAll(httpRsp.Body)
+ if err != nil {
+ return err
+ }
+
+ if err := unm.Unmarshal(buf, resp); err != nil {
+ return err
+ }
+
+ return nil
+ }, opts...)
+ if e != nil {
+ return nil, e
+ }
+ return resp, nil
+}
+
+// UpdateBackup updates a pending or completed
+// Backup.
+func (c *databaseAdminRESTClient) UpdateBackup(ctx context.Context, req *databasepb.UpdateBackupRequest, opts ...gax.CallOption) (*databasepb.Backup, error) {
+ m := protojson.MarshalOptions{AllowPartial: true, UseEnumNumbers: true}
+ body := req.GetBackup()
+ jsonReq, err := m.Marshal(body)
+ if err != nil {
+ return nil, err
+ }
+
+ baseUrl, err := url.Parse(c.endpoint)
+ if err != nil {
+ return nil, err
+ }
+ baseUrl.Path += fmt.Sprintf("/v1/%v", req.GetBackup().GetName())
+
+ params := url.Values{}
+ params.Add("$alt", "json;enum-encoding=int")
+ if req.GetUpdateMask() != nil {
+ updateMask, err := protojson.Marshal(req.GetUpdateMask())
+ if err != nil {
+ return nil, err
+ }
+ params.Add("updateMask", string(updateMask[1:len(updateMask)-1]))
+ }
+
+ baseUrl.RawQuery = params.Encode()
+
+ // Build HTTP headers from client and context metadata.
+ hds := []string{"x-goog-request-params", fmt.Sprintf("%s=%v", "backup.name", url.QueryEscape(req.GetBackup().GetName()))}
+
+ hds = append(c.xGoogHeaders, hds...)
+ hds = append(hds, "Content-Type", "application/json")
+ headers := gax.BuildHeaders(ctx, hds...)
+ opts = append((*c.CallOptions).UpdateBackup[0:len((*c.CallOptions).UpdateBackup):len((*c.CallOptions).UpdateBackup)], opts...)
+ unm := protojson.UnmarshalOptions{AllowPartial: true, DiscardUnknown: true}
+ resp := &databasepb.Backup{}
+ e := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
+ if settings.Path != "" {
+ baseUrl.Path = settings.Path
+ }
+ httpReq, err := http.NewRequest("PATCH", baseUrl.String(), bytes.NewReader(jsonReq))
+ if err != nil {
+ return err
+ }
+ httpReq = httpReq.WithContext(ctx)
+ httpReq.Header = headers
+
+ httpRsp, err := c.httpClient.Do(httpReq)
+ if err != nil {
+ return err
+ }
+ defer httpRsp.Body.Close()
+
+ if err = googleapi.CheckResponse(httpRsp); err != nil {
+ return err
+ }
+
+ buf, err := io.ReadAll(httpRsp.Body)
+ if err != nil {
+ return err
+ }
+
+ if err := unm.Unmarshal(buf, resp); err != nil {
+ return err
+ }
+
+ return nil
+ }, opts...)
+ if e != nil {
+ return nil, e
+ }
+ return resp, nil
+}
+
+// DeleteBackup deletes a pending or completed
+// Backup.
+func (c *databaseAdminRESTClient) DeleteBackup(ctx context.Context, req *databasepb.DeleteBackupRequest, opts ...gax.CallOption) error {
+ baseUrl, err := url.Parse(c.endpoint)
+ if err != nil {
+ return err
+ }
+ baseUrl.Path += fmt.Sprintf("/v1/%v", req.GetName())
+
+ params := url.Values{}
+ params.Add("$alt", "json;enum-encoding=int")
+
+ baseUrl.RawQuery = params.Encode()
+
+ // Build HTTP headers from client and context metadata.
+ hds := []string{"x-goog-request-params", fmt.Sprintf("%s=%v", "name", url.QueryEscape(req.GetName()))}
+
+ hds = append(c.xGoogHeaders, hds...)
+ hds = append(hds, "Content-Type", "application/json")
+ headers := gax.BuildHeaders(ctx, hds...)
+ return gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
+ if settings.Path != "" {
+ baseUrl.Path = settings.Path
+ }
+ httpReq, err := http.NewRequest("DELETE", baseUrl.String(), nil)
+ if err != nil {
+ return err
+ }
+ httpReq = httpReq.WithContext(ctx)
+ httpReq.Header = headers
+
+ httpRsp, err := c.httpClient.Do(httpReq)
+ if err != nil {
+ return err
+ }
+ defer httpRsp.Body.Close()
+
+ // Returns nil if there is no error, otherwise wraps
+ // the response code and body into a non-nil error
+ return googleapi.CheckResponse(httpRsp)
+ }, opts...)
+}
+
+// ListBackups lists completed and pending backups.
+// Backups returned are ordered by create_time in descending order,
+// starting from the most recent create_time.
+func (c *databaseAdminRESTClient) ListBackups(ctx context.Context, req *databasepb.ListBackupsRequest, opts ...gax.CallOption) *BackupIterator {
+ it := &BackupIterator{}
+ req = proto.Clone(req).(*databasepb.ListBackupsRequest)
+ unm := protojson.UnmarshalOptions{AllowPartial: true, DiscardUnknown: true}
+ it.InternalFetch = func(pageSize int, pageToken string) ([]*databasepb.Backup, string, error) {
+ resp := &databasepb.ListBackupsResponse{}
+ if pageToken != "" {
+ req.PageToken = pageToken
+ }
+ if pageSize > math.MaxInt32 {
+ req.PageSize = math.MaxInt32
+ } else if pageSize != 0 {
+ req.PageSize = int32(pageSize)
+ }
+ baseUrl, err := url.Parse(c.endpoint)
+ if err != nil {
+ return nil, "", err
+ }
+ baseUrl.Path += fmt.Sprintf("/v1/%v/backups", req.GetParent())
+
+ params := url.Values{}
+ params.Add("$alt", "json;enum-encoding=int")
+ if req.GetFilter() != "" {
+ params.Add("filter", fmt.Sprintf("%v", req.GetFilter()))
+ }
+ if req.GetPageSize() != 0 {
+ params.Add("pageSize", fmt.Sprintf("%v", req.GetPageSize()))
+ }
+ if req.GetPageToken() != "" {
+ params.Add("pageToken", fmt.Sprintf("%v", req.GetPageToken()))
+ }
+
+ baseUrl.RawQuery = params.Encode()
+
+ // Build HTTP headers from client and context metadata.
+ hds := append(c.xGoogHeaders, "Content-Type", "application/json")
+ headers := gax.BuildHeaders(ctx, hds...)
+ e := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
+ if settings.Path != "" {
+ baseUrl.Path = settings.Path
+ }
+ httpReq, err := http.NewRequest("GET", baseUrl.String(), nil)
+ if err != nil {
+ return err
+ }
+ httpReq.Header = headers
+
+ httpRsp, err := c.httpClient.Do(httpReq)
+ if err != nil {
+ return err
+ }
+ defer httpRsp.Body.Close()
+
+ if err = googleapi.CheckResponse(httpRsp); err != nil {
+ return err
+ }
+
+ buf, err := io.ReadAll(httpRsp.Body)
+ if err != nil {
+ return err
+ }
+
+ if err := unm.Unmarshal(buf, resp); err != nil {
+ return err
+ }
+
+ return nil
+ }, opts...)
+ if e != nil {
+ return nil, "", e
+ }
+ it.Response = resp
+ return resp.GetBackups(), resp.GetNextPageToken(), nil
+ }
+
+ fetch := func(pageSize int, pageToken string) (string, error) {
+ items, nextPageToken, err := it.InternalFetch(pageSize, pageToken)
+ if err != nil {
+ return "", err
+ }
+ it.items = append(it.items, items...)
+ return nextPageToken, nil
+ }
+
+ it.pageInfo, it.nextFunc = iterator.NewPageInfo(fetch, it.bufLen, it.takeBuf)
+ it.pageInfo.MaxSize = int(req.GetPageSize())
+ it.pageInfo.Token = req.GetPageToken()
+
+ return it
+}
+
+// RestoreDatabase create a new database by restoring from a completed backup. The new
+// database must be in the same project and in an instance with the same
+// instance configuration as the instance containing
+// the backup. The returned database [long-running
+// operation][google.longrunning.Operation] has a name of the format
+// projects/<project>/instances/<instance>/databases/<database>/operations/<operation_id>,
+// and can be used to track the progress of the operation, and to cancel it.
+// The metadata field type is
+// RestoreDatabaseMetadata.
+// The response type
+// is Database, if
+// successful. Cancelling the returned operation will stop the restore and
+// delete the database.
+// There can be only one database being restored into an instance at a time.
+// Once the restore operation completes, a new restore operation can be
+// initiated, without waiting for the optimize operation associated with the
+// first restore to complete.
+func (c *databaseAdminRESTClient) RestoreDatabase(ctx context.Context, req *databasepb.RestoreDatabaseRequest, opts ...gax.CallOption) (*RestoreDatabaseOperation, error) {
+ m := protojson.MarshalOptions{AllowPartial: true, UseEnumNumbers: true}
+ jsonReq, err := m.Marshal(req)
+ if err != nil {
+ return nil, err
+ }
+
+ baseUrl, err := url.Parse(c.endpoint)
+ if err != nil {
+ return nil, err
+ }
+ baseUrl.Path += fmt.Sprintf("/v1/%v/databases:restore", req.GetParent())
+
+ params := url.Values{}
+ params.Add("$alt", "json;enum-encoding=int")
+
+ baseUrl.RawQuery = params.Encode()
+
+ // Build HTTP headers from client and context metadata.
+ hds := []string{"x-goog-request-params", fmt.Sprintf("%s=%v", "parent", url.QueryEscape(req.GetParent()))}
+
+ hds = append(c.xGoogHeaders, hds...)
+ hds = append(hds, "Content-Type", "application/json")
+ headers := gax.BuildHeaders(ctx, hds...)
+ unm := protojson.UnmarshalOptions{AllowPartial: true, DiscardUnknown: true}
+ resp := &longrunningpb.Operation{}
+ e := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
+ if settings.Path != "" {
+ baseUrl.Path = settings.Path
+ }
+ httpReq, err := http.NewRequest("POST", baseUrl.String(), bytes.NewReader(jsonReq))
+ if err != nil {
+ return err
+ }
+ httpReq = httpReq.WithContext(ctx)
+ httpReq.Header = headers
+
+ httpRsp, err := c.httpClient.Do(httpReq)
+ if err != nil {
+ return err
+ }
+ defer httpRsp.Body.Close()
+
+ if err = googleapi.CheckResponse(httpRsp); err != nil {
+ return err
+ }
+
+ buf, err := io.ReadAll(httpRsp.Body)
+ if err != nil {
+ return err
+ }
+
+ if err := unm.Unmarshal(buf, resp); err != nil {
+ return err
+ }
+
+ return nil
+ }, opts...)
+ if e != nil {
+ return nil, e
+ }
+
+ override := fmt.Sprintf("/v1/%s", resp.GetName())
+ return &RestoreDatabaseOperation{
+ lro: longrunning.InternalNewOperation(*c.LROClient, resp),
+ pollPath: override,
+ }, nil
+}
+
+// ListDatabaseOperations lists database [longrunning-operations][google.longrunning.Operation].
+// A database operation has a name of the form
+// projects/<project>/instances/<instance>/databases/<database>/operations/<operation>.
+// The long-running operation
+// metadata field type
+// metadata.type_url describes the type of the metadata. Operations returned
+// include those that have completed/failed/canceled within the last 7 days,
+// and pending operations.
+func (c *databaseAdminRESTClient) ListDatabaseOperations(ctx context.Context, req *databasepb.ListDatabaseOperationsRequest, opts ...gax.CallOption) *OperationIterator {
+ it := &OperationIterator{}
+ req = proto.Clone(req).(*databasepb.ListDatabaseOperationsRequest)
+ unm := protojson.UnmarshalOptions{AllowPartial: true, DiscardUnknown: true}
+ it.InternalFetch = func(pageSize int, pageToken string) ([]*longrunningpb.Operation, string, error) {
+ resp := &databasepb.ListDatabaseOperationsResponse{}
+ if pageToken != "" {
+ req.PageToken = pageToken
+ }
+ if pageSize > math.MaxInt32 {
+ req.PageSize = math.MaxInt32
+ } else if pageSize != 0 {
+ req.PageSize = int32(pageSize)
+ }
+ baseUrl, err := url.Parse(c.endpoint)
+ if err != nil {
+ return nil, "", err
+ }
+ baseUrl.Path += fmt.Sprintf("/v1/%v/databaseOperations", req.GetParent())
+
+ params := url.Values{}
+ params.Add("$alt", "json;enum-encoding=int")
+ if req.GetFilter() != "" {
+ params.Add("filter", fmt.Sprintf("%v", req.GetFilter()))
+ }
+ if req.GetPageSize() != 0 {
+ params.Add("pageSize", fmt.Sprintf("%v", req.GetPageSize()))
+ }
+ if req.GetPageToken() != "" {
+ params.Add("pageToken", fmt.Sprintf("%v", req.GetPageToken()))
+ }
+
+ baseUrl.RawQuery = params.Encode()
+
+ // Build HTTP headers from client and context metadata.
+ hds := append(c.xGoogHeaders, "Content-Type", "application/json")
+ headers := gax.BuildHeaders(ctx, hds...)
+ e := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
+ if settings.Path != "" {
+ baseUrl.Path = settings.Path
+ }
+ httpReq, err := http.NewRequest("GET", baseUrl.String(), nil)
+ if err != nil {
+ return err
+ }
+ httpReq.Header = headers
+
+ httpRsp, err := c.httpClient.Do(httpReq)
+ if err != nil {
+ return err
+ }
+ defer httpRsp.Body.Close()
+
+ if err = googleapi.CheckResponse(httpRsp); err != nil {
+ return err
+ }
+
+ buf, err := io.ReadAll(httpRsp.Body)
+ if err != nil {
+ return err
+ }
+
+ if err := unm.Unmarshal(buf, resp); err != nil {
+ return err
+ }
+
+ return nil
+ }, opts...)
+ if e != nil {
+ return nil, "", e
+ }
+ it.Response = resp
+ return resp.GetOperations(), resp.GetNextPageToken(), nil
+ }
+
+ fetch := func(pageSize int, pageToken string) (string, error) {
+ items, nextPageToken, err := it.InternalFetch(pageSize, pageToken)
+ if err != nil {
+ return "", err
+ }
+ it.items = append(it.items, items...)
+ return nextPageToken, nil
+ }
+
+ it.pageInfo, it.nextFunc = iterator.NewPageInfo(fetch, it.bufLen, it.takeBuf)
+ it.pageInfo.MaxSize = int(req.GetPageSize())
+ it.pageInfo.Token = req.GetPageToken()
+
+ return it
+}
+
+// ListBackupOperations lists the backup [long-running operations][google.longrunning.Operation] in
+// the given instance. A backup operation has a name of the form
+// projects/<project>/instances/<instance>/backups/<backup>/operations/<operation>.
+// The long-running operation
+// metadata field type
+// metadata.type_url describes the type of the metadata. Operations returned
+// include those that have completed/failed/canceled within the last 7 days,
+// and pending operations. Operations returned are ordered by
+// operation.metadata.value.progress.start_time in descending order starting
+// from the most recently started operation.
+func (c *databaseAdminRESTClient) ListBackupOperations(ctx context.Context, req *databasepb.ListBackupOperationsRequest, opts ...gax.CallOption) *OperationIterator {
+ it := &OperationIterator{}
+ req = proto.Clone(req).(*databasepb.ListBackupOperationsRequest)
+ unm := protojson.UnmarshalOptions{AllowPartial: true, DiscardUnknown: true}
+ it.InternalFetch = func(pageSize int, pageToken string) ([]*longrunningpb.Operation, string, error) {
+ resp := &databasepb.ListBackupOperationsResponse{}
+ if pageToken != "" {
+ req.PageToken = pageToken
+ }
+ if pageSize > math.MaxInt32 {
+ req.PageSize = math.MaxInt32
+ } else if pageSize != 0 {
+ req.PageSize = int32(pageSize)
+ }
+ baseUrl, err := url.Parse(c.endpoint)
+ if err != nil {
+ return nil, "", err
+ }
+ baseUrl.Path += fmt.Sprintf("/v1/%v/backupOperations", req.GetParent())
+
+ params := url.Values{}
+ params.Add("$alt", "json;enum-encoding=int")
+ if req.GetFilter() != "" {
+ params.Add("filter", fmt.Sprintf("%v", req.GetFilter()))
+ }
+ if req.GetPageSize() != 0 {
+ params.Add("pageSize", fmt.Sprintf("%v", req.GetPageSize()))
+ }
+ if req.GetPageToken() != "" {
+ params.Add("pageToken", fmt.Sprintf("%v", req.GetPageToken()))
+ }
+
+ baseUrl.RawQuery = params.Encode()
+
+ // Build HTTP headers from client and context metadata.
+ hds := append(c.xGoogHeaders, "Content-Type", "application/json")
+ headers := gax.BuildHeaders(ctx, hds...)
+ e := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
+ if settings.Path != "" {
+ baseUrl.Path = settings.Path
+ }
+ httpReq, err := http.NewRequest("GET", baseUrl.String(), nil)
+ if err != nil {
+ return err
+ }
+ httpReq.Header = headers
+
+ httpRsp, err := c.httpClient.Do(httpReq)
+ if err != nil {
+ return err
+ }
+ defer httpRsp.Body.Close()
+
+ if err = googleapi.CheckResponse(httpRsp); err != nil {
+ return err
+ }
+
+ buf, err := io.ReadAll(httpRsp.Body)
+ if err != nil {
+ return err
+ }
+
+ if err := unm.Unmarshal(buf, resp); err != nil {
+ return err
+ }
+
+ return nil
+ }, opts...)
+ if e != nil {
+ return nil, "", e
+ }
+ it.Response = resp
+ return resp.GetOperations(), resp.GetNextPageToken(), nil
+ }
+
+ fetch := func(pageSize int, pageToken string) (string, error) {
+ items, nextPageToken, err := it.InternalFetch(pageSize, pageToken)
+ if err != nil {
+ return "", err
+ }
+ it.items = append(it.items, items...)
+ return nextPageToken, nil
+ }
+
+ it.pageInfo, it.nextFunc = iterator.NewPageInfo(fetch, it.bufLen, it.takeBuf)
+ it.pageInfo.MaxSize = int(req.GetPageSize())
+ it.pageInfo.Token = req.GetPageToken()
+
+ return it
+}
+
+// ListDatabaseRoles lists Cloud Spanner database roles.
+func (c *databaseAdminRESTClient) ListDatabaseRoles(ctx context.Context, req *databasepb.ListDatabaseRolesRequest, opts ...gax.CallOption) *DatabaseRoleIterator {
+ it := &DatabaseRoleIterator{}
+ req = proto.Clone(req).(*databasepb.ListDatabaseRolesRequest)
+ unm := protojson.UnmarshalOptions{AllowPartial: true, DiscardUnknown: true}
+ it.InternalFetch = func(pageSize int, pageToken string) ([]*databasepb.DatabaseRole, string, error) {
+ resp := &databasepb.ListDatabaseRolesResponse{}
+ if pageToken != "" {
+ req.PageToken = pageToken
+ }
+ if pageSize > math.MaxInt32 {
+ req.PageSize = math.MaxInt32
+ } else if pageSize != 0 {
+ req.PageSize = int32(pageSize)
+ }
+ baseUrl, err := url.Parse(c.endpoint)
+ if err != nil {
+ return nil, "", err
+ }
+ baseUrl.Path += fmt.Sprintf("/v1/%v/databaseRoles", req.GetParent())
+
+ params := url.Values{}
+ params.Add("$alt", "json;enum-encoding=int")
+ if req.GetPageSize() != 0 {
+ params.Add("pageSize", fmt.Sprintf("%v", req.GetPageSize()))
+ }
+ if req.GetPageToken() != "" {
+ params.Add("pageToken", fmt.Sprintf("%v", req.GetPageToken()))
+ }
+
+ baseUrl.RawQuery = params.Encode()
+
+ // Build HTTP headers from client and context metadata.
+ hds := append(c.xGoogHeaders, "Content-Type", "application/json")
+ headers := gax.BuildHeaders(ctx, hds...)
+ e := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
+ if settings.Path != "" {
+ baseUrl.Path = settings.Path
+ }
+ httpReq, err := http.NewRequest("GET", baseUrl.String(), nil)
+ if err != nil {
+ return err
+ }
+ httpReq.Header = headers
+
+ httpRsp, err := c.httpClient.Do(httpReq)
+ if err != nil {
+ return err
+ }
+ defer httpRsp.Body.Close()
+
+ if err = googleapi.CheckResponse(httpRsp); err != nil {
+ return err
+ }
+
+ buf, err := io.ReadAll(httpRsp.Body)
+ if err != nil {
+ return err
+ }
+
+ if err := unm.Unmarshal(buf, resp); err != nil {
+ return err
+ }
+
+ return nil
+ }, opts...)
+ if e != nil {
+ return nil, "", e
+ }
+ it.Response = resp
+ return resp.GetDatabaseRoles(), resp.GetNextPageToken(), nil
+ }
+
+ fetch := func(pageSize int, pageToken string) (string, error) {
+ items, nextPageToken, err := it.InternalFetch(pageSize, pageToken)
+ if err != nil {
+ return "", err
+ }
+ it.items = append(it.items, items...)
+ return nextPageToken, nil
+ }
+
+ it.pageInfo, it.nextFunc = iterator.NewPageInfo(fetch, it.bufLen, it.takeBuf)
+ it.pageInfo.MaxSize = int(req.GetPageSize())
+ it.pageInfo.Token = req.GetPageToken()
+
+ return it
+}
+
+// CreateBackupSchedule creates a new backup schedule.
+func (c *databaseAdminRESTClient) CreateBackupSchedule(ctx context.Context, req *databasepb.CreateBackupScheduleRequest, opts ...gax.CallOption) (*databasepb.BackupSchedule, error) {
+ m := protojson.MarshalOptions{AllowPartial: true, UseEnumNumbers: true}
+ body := req.GetBackupSchedule()
+ jsonReq, err := m.Marshal(body)
+ if err != nil {
+ return nil, err
+ }
+
+ baseUrl, err := url.Parse(c.endpoint)
+ if err != nil {
+ return nil, err
+ }
+ baseUrl.Path += fmt.Sprintf("/v1/%v/backupSchedules", req.GetParent())
+
+ params := url.Values{}
+ params.Add("$alt", "json;enum-encoding=int")
+ params.Add("backupScheduleId", fmt.Sprintf("%v", req.GetBackupScheduleId()))
+
+ baseUrl.RawQuery = params.Encode()
+
+ // Build HTTP headers from client and context metadata.
+ hds := []string{"x-goog-request-params", fmt.Sprintf("%s=%v", "parent", url.QueryEscape(req.GetParent()))}
+
+ hds = append(c.xGoogHeaders, hds...)
+ hds = append(hds, "Content-Type", "application/json")
+ headers := gax.BuildHeaders(ctx, hds...)
+ opts = append((*c.CallOptions).CreateBackupSchedule[0:len((*c.CallOptions).CreateBackupSchedule):len((*c.CallOptions).CreateBackupSchedule)], opts...)
+ unm := protojson.UnmarshalOptions{AllowPartial: true, DiscardUnknown: true}
+ resp := &databasepb.BackupSchedule{}
+ e := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
+ if settings.Path != "" {
+ baseUrl.Path = settings.Path
+ }
+ httpReq, err := http.NewRequest("POST", baseUrl.String(), bytes.NewReader(jsonReq))
+ if err != nil {
+ return err
+ }
+ httpReq = httpReq.WithContext(ctx)
+ httpReq.Header = headers
+
+ httpRsp, err := c.httpClient.Do(httpReq)
+ if err != nil {
+ return err
+ }
+ defer httpRsp.Body.Close()
+
+ if err = googleapi.CheckResponse(httpRsp); err != nil {
+ return err
+ }
+
+ buf, err := io.ReadAll(httpRsp.Body)
+ if err != nil {
+ return err
+ }
+
+ if err := unm.Unmarshal(buf, resp); err != nil {
+ return err
+ }
+
+ return nil
+ }, opts...)
+ if e != nil {
+ return nil, e
+ }
+ return resp, nil
+}
+
+// GetBackupSchedule gets backup schedule for the input schedule name.
+func (c *databaseAdminRESTClient) GetBackupSchedule(ctx context.Context, req *databasepb.GetBackupScheduleRequest, opts ...gax.CallOption) (*databasepb.BackupSchedule, error) {
+ baseUrl, err := url.Parse(c.endpoint)
+ if err != nil {
+ return nil, err
+ }
+ baseUrl.Path += fmt.Sprintf("/v1/%v", req.GetName())
+
+ params := url.Values{}
+ params.Add("$alt", "json;enum-encoding=int")
+
+ baseUrl.RawQuery = params.Encode()
+
+ // Build HTTP headers from client and context metadata.
+ hds := []string{"x-goog-request-params", fmt.Sprintf("%s=%v", "name", url.QueryEscape(req.GetName()))}
+
+ hds = append(c.xGoogHeaders, hds...)
+ hds = append(hds, "Content-Type", "application/json")
+ headers := gax.BuildHeaders(ctx, hds...)
+ opts = append((*c.CallOptions).GetBackupSchedule[0:len((*c.CallOptions).GetBackupSchedule):len((*c.CallOptions).GetBackupSchedule)], opts...)
+ unm := protojson.UnmarshalOptions{AllowPartial: true, DiscardUnknown: true}
+ resp := &databasepb.BackupSchedule{}
+ e := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
+ if settings.Path != "" {
+ baseUrl.Path = settings.Path
+ }
+ httpReq, err := http.NewRequest("GET", baseUrl.String(), nil)
+ if err != nil {
+ return err
+ }
+ httpReq = httpReq.WithContext(ctx)
+ httpReq.Header = headers
+
+ httpRsp, err := c.httpClient.Do(httpReq)
+ if err != nil {
+ return err
+ }
+ defer httpRsp.Body.Close()
+
+ if err = googleapi.CheckResponse(httpRsp); err != nil {
+ return err
+ }
+
+ buf, err := io.ReadAll(httpRsp.Body)
+ if err != nil {
+ return err
+ }
+
+ if err := unm.Unmarshal(buf, resp); err != nil {
+ return err
+ }
+
+ return nil
+ }, opts...)
+ if e != nil {
+ return nil, e
+ }
+ return resp, nil
+}
+
+// UpdateBackupSchedule updates a backup schedule.
+func (c *databaseAdminRESTClient) UpdateBackupSchedule(ctx context.Context, req *databasepb.UpdateBackupScheduleRequest, opts ...gax.CallOption) (*databasepb.BackupSchedule, error) {
+ m := protojson.MarshalOptions{AllowPartial: true, UseEnumNumbers: true}
+ body := req.GetBackupSchedule()
+ jsonReq, err := m.Marshal(body)
+ if err != nil {
+ return nil, err
+ }
+
+ baseUrl, err := url.Parse(c.endpoint)
+ if err != nil {
+ return nil, err
+ }
+ baseUrl.Path += fmt.Sprintf("/v1/%v", req.GetBackupSchedule().GetName())
+
+ params := url.Values{}
+ params.Add("$alt", "json;enum-encoding=int")
+ if req.GetUpdateMask() != nil {
+ updateMask, err := protojson.Marshal(req.GetUpdateMask())
+ if err != nil {
+ return nil, err
+ }
+ params.Add("updateMask", string(updateMask[1:len(updateMask)-1]))
+ }
+
+ baseUrl.RawQuery = params.Encode()
+
+ // Build HTTP headers from client and context metadata.
+ hds := []string{"x-goog-request-params", fmt.Sprintf("%s=%v", "backup_schedule.name", url.QueryEscape(req.GetBackupSchedule().GetName()))}
+
+ hds = append(c.xGoogHeaders, hds...)
+ hds = append(hds, "Content-Type", "application/json")
+ headers := gax.BuildHeaders(ctx, hds...)
+ opts = append((*c.CallOptions).UpdateBackupSchedule[0:len((*c.CallOptions).UpdateBackupSchedule):len((*c.CallOptions).UpdateBackupSchedule)], opts...)
+ unm := protojson.UnmarshalOptions{AllowPartial: true, DiscardUnknown: true}
+ resp := &databasepb.BackupSchedule{}
+ e := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
+ if settings.Path != "" {
+ baseUrl.Path = settings.Path
+ }
+ httpReq, err := http.NewRequest("PATCH", baseUrl.String(), bytes.NewReader(jsonReq))
+ if err != nil {
+ return err
+ }
+ httpReq = httpReq.WithContext(ctx)
+ httpReq.Header = headers
+
+ httpRsp, err := c.httpClient.Do(httpReq)
+ if err != nil {
+ return err
+ }
+ defer httpRsp.Body.Close()
+
+ if err = googleapi.CheckResponse(httpRsp); err != nil {
+ return err
+ }
+
+ buf, err := io.ReadAll(httpRsp.Body)
+ if err != nil {
+ return err
+ }
+
+ if err := unm.Unmarshal(buf, resp); err != nil {
+ return err
+ }
+
+ return nil
+ }, opts...)
+ if e != nil {
+ return nil, e
+ }
+ return resp, nil
+}
+
+// DeleteBackupSchedule deletes a backup schedule.
+func (c *databaseAdminRESTClient) DeleteBackupSchedule(ctx context.Context, req *databasepb.DeleteBackupScheduleRequest, opts ...gax.CallOption) error {
+ baseUrl, err := url.Parse(c.endpoint)
+ if err != nil {
+ return err
+ }
+ baseUrl.Path += fmt.Sprintf("/v1/%v", req.GetName())
+
+ params := url.Values{}
+ params.Add("$alt", "json;enum-encoding=int")
+
+ baseUrl.RawQuery = params.Encode()
+
+ // Build HTTP headers from client and context metadata.
+ hds := []string{"x-goog-request-params", fmt.Sprintf("%s=%v", "name", url.QueryEscape(req.GetName()))}
+
+ hds = append(c.xGoogHeaders, hds...)
+ hds = append(hds, "Content-Type", "application/json")
+ headers := gax.BuildHeaders(ctx, hds...)
+ return gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
+ if settings.Path != "" {
+ baseUrl.Path = settings.Path
+ }
+ httpReq, err := http.NewRequest("DELETE", baseUrl.String(), nil)
+ if err != nil {
+ return err
+ }
+ httpReq = httpReq.WithContext(ctx)
+ httpReq.Header = headers
+
+ httpRsp, err := c.httpClient.Do(httpReq)
+ if err != nil {
+ return err
+ }
+ defer httpRsp.Body.Close()
+
+ // Returns nil if there is no error, otherwise wraps
+ // the response code and body into a non-nil error
+ return googleapi.CheckResponse(httpRsp)
+ }, opts...)
+}
+
+// ListBackupSchedules lists all the backup schedules for the database.
+func (c *databaseAdminRESTClient) ListBackupSchedules(ctx context.Context, req *databasepb.ListBackupSchedulesRequest, opts ...gax.CallOption) *BackupScheduleIterator {
+ it := &BackupScheduleIterator{}
+ req = proto.Clone(req).(*databasepb.ListBackupSchedulesRequest)
+ unm := protojson.UnmarshalOptions{AllowPartial: true, DiscardUnknown: true}
+ it.InternalFetch = func(pageSize int, pageToken string) ([]*databasepb.BackupSchedule, string, error) {
+ resp := &databasepb.ListBackupSchedulesResponse{}
+ if pageToken != "" {
+ req.PageToken = pageToken
+ }
+ if pageSize > math.MaxInt32 {
+ req.PageSize = math.MaxInt32
+ } else if pageSize != 0 {
+ req.PageSize = int32(pageSize)
+ }
+ baseUrl, err := url.Parse(c.endpoint)
+ if err != nil {
+ return nil, "", err
+ }
+ baseUrl.Path += fmt.Sprintf("/v1/%v/backupSchedules", req.GetParent())
+
+ params := url.Values{}
+ params.Add("$alt", "json;enum-encoding=int")
+ if req.GetPageSize() != 0 {
+ params.Add("pageSize", fmt.Sprintf("%v", req.GetPageSize()))
+ }
+ if req.GetPageToken() != "" {
+ params.Add("pageToken", fmt.Sprintf("%v", req.GetPageToken()))
+ }
+
+ baseUrl.RawQuery = params.Encode()
+
+ // Build HTTP headers from client and context metadata.
+ hds := append(c.xGoogHeaders, "Content-Type", "application/json")
+ headers := gax.BuildHeaders(ctx, hds...)
+ e := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
+ if settings.Path != "" {
+ baseUrl.Path = settings.Path
+ }
+ httpReq, err := http.NewRequest("GET", baseUrl.String(), nil)
+ if err != nil {
+ return err
+ }
+ httpReq.Header = headers
+
+ httpRsp, err := c.httpClient.Do(httpReq)
+ if err != nil {
+ return err
+ }
+ defer httpRsp.Body.Close()
+
+ if err = googleapi.CheckResponse(httpRsp); err != nil {
+ return err
+ }
+
+ buf, err := io.ReadAll(httpRsp.Body)
+ if err != nil {
+ return err
+ }
+
+ if err := unm.Unmarshal(buf, resp); err != nil {
+ return err
+ }
+
+ return nil
+ }, opts...)
+ if e != nil {
+ return nil, "", e
+ }
+ it.Response = resp
+ return resp.GetBackupSchedules(), resp.GetNextPageToken(), nil
+ }
+
+ fetch := func(pageSize int, pageToken string) (string, error) {
+ items, nextPageToken, err := it.InternalFetch(pageSize, pageToken)
+ if err != nil {
+ return "", err
+ }
+ it.items = append(it.items, items...)
+ return nextPageToken, nil
+ }
+
+ it.pageInfo, it.nextFunc = iterator.NewPageInfo(fetch, it.bufLen, it.takeBuf)
+ it.pageInfo.MaxSize = int(req.GetPageSize())
+ it.pageInfo.Token = req.GetPageToken()
+
+ return it
+}
+
+// CancelOperation is a utility method from google.longrunning.Operations.
+func (c *databaseAdminRESTClient) CancelOperation(ctx context.Context, req *longrunningpb.CancelOperationRequest, opts ...gax.CallOption) error {
+ baseUrl, err := url.Parse(c.endpoint)
+ if err != nil {
+ return err
+ }
+ baseUrl.Path += fmt.Sprintf("/v1/%v:cancel", req.GetName())
+
+ params := url.Values{}
+ params.Add("$alt", "json;enum-encoding=int")
+
+ baseUrl.RawQuery = params.Encode()
+
+ // Build HTTP headers from client and context metadata.
+ hds := []string{"x-goog-request-params", fmt.Sprintf("%s=%v", "name", url.QueryEscape(req.GetName()))}
+
+ hds = append(c.xGoogHeaders, hds...)
+ hds = append(hds, "Content-Type", "application/json")
+ headers := gax.BuildHeaders(ctx, hds...)
+ return gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
+ if settings.Path != "" {
+ baseUrl.Path = settings.Path
+ }
+ httpReq, err := http.NewRequest("POST", baseUrl.String(), nil)
+ if err != nil {
+ return err
+ }
+ httpReq = httpReq.WithContext(ctx)
+ httpReq.Header = headers
+
+ httpRsp, err := c.httpClient.Do(httpReq)
+ if err != nil {
+ return err
+ }
+ defer httpRsp.Body.Close()
+
+ // Returns nil if there is no error, otherwise wraps
+ // the response code and body into a non-nil error
+ return googleapi.CheckResponse(httpRsp)
+ }, opts...)
+}
+
+// DeleteOperation is a utility method from google.longrunning.Operations.
+func (c *databaseAdminRESTClient) DeleteOperation(ctx context.Context, req *longrunningpb.DeleteOperationRequest, opts ...gax.CallOption) error {
+ baseUrl, err := url.Parse(c.endpoint)
+ if err != nil {
+ return err
+ }
+ baseUrl.Path += fmt.Sprintf("/v1/%v", req.GetName())
+
+ params := url.Values{}
+ params.Add("$alt", "json;enum-encoding=int")
+
+ baseUrl.RawQuery = params.Encode()
+
+ // Build HTTP headers from client and context metadata.
+ hds := []string{"x-goog-request-params", fmt.Sprintf("%s=%v", "name", url.QueryEscape(req.GetName()))}
+
+ hds = append(c.xGoogHeaders, hds...)
+ hds = append(hds, "Content-Type", "application/json")
+ headers := gax.BuildHeaders(ctx, hds...)
+ return gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
+ if settings.Path != "" {
+ baseUrl.Path = settings.Path
+ }
+ httpReq, err := http.NewRequest("DELETE", baseUrl.String(), nil)
+ if err != nil {
+ return err
+ }
+ httpReq = httpReq.WithContext(ctx)
+ httpReq.Header = headers
+
+ httpRsp, err := c.httpClient.Do(httpReq)
+ if err != nil {
+ return err
+ }
+ defer httpRsp.Body.Close()
+
+ // Returns nil if there is no error, otherwise wraps
+ // the response code and body into a non-nil error
+ return googleapi.CheckResponse(httpRsp)
+ }, opts...)
+}
+
+// GetOperation is a utility method from google.longrunning.Operations.
+func (c *databaseAdminRESTClient) GetOperation(ctx context.Context, req *longrunningpb.GetOperationRequest, opts ...gax.CallOption) (*longrunningpb.Operation, error) {
+ baseUrl, err := url.Parse(c.endpoint)
+ if err != nil {
+ return nil, err
+ }
+ baseUrl.Path += fmt.Sprintf("/v1/%v", req.GetName())
+
+ params := url.Values{}
+ params.Add("$alt", "json;enum-encoding=int")
+
+ baseUrl.RawQuery = params.Encode()
+
+ // Build HTTP headers from client and context metadata.
+ hds := []string{"x-goog-request-params", fmt.Sprintf("%s=%v", "name", url.QueryEscape(req.GetName()))}
+
+ hds = append(c.xGoogHeaders, hds...)
+ hds = append(hds, "Content-Type", "application/json")
+ headers := gax.BuildHeaders(ctx, hds...)
+ opts = append((*c.CallOptions).GetOperation[0:len((*c.CallOptions).GetOperation):len((*c.CallOptions).GetOperation)], opts...)
+ unm := protojson.UnmarshalOptions{AllowPartial: true, DiscardUnknown: true}
+ resp := &longrunningpb.Operation{}
+ e := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
+ if settings.Path != "" {
+ baseUrl.Path = settings.Path
+ }
+ httpReq, err := http.NewRequest("GET", baseUrl.String(), nil)
+ if err != nil {
+ return err
+ }
+ httpReq = httpReq.WithContext(ctx)
+ httpReq.Header = headers
+
+ httpRsp, err := c.httpClient.Do(httpReq)
+ if err != nil {
+ return err
+ }
+ defer httpRsp.Body.Close()
+
+ if err = googleapi.CheckResponse(httpRsp); err != nil {
+ return err
+ }
+
+ buf, err := io.ReadAll(httpRsp.Body)
+ if err != nil {
+ return err
+ }
+
+ if err := unm.Unmarshal(buf, resp); err != nil {
+ return err
+ }
+
+ return nil
+ }, opts...)
+ if e != nil {
+ return nil, e
+ }
+ return resp, nil
+}
+
+// ListOperations is a utility method from google.longrunning.Operations.
+func (c *databaseAdminRESTClient) ListOperations(ctx context.Context, req *longrunningpb.ListOperationsRequest, opts ...gax.CallOption) *OperationIterator {
+ it := &OperationIterator{}
+ req = proto.Clone(req).(*longrunningpb.ListOperationsRequest)
+ unm := protojson.UnmarshalOptions{AllowPartial: true, DiscardUnknown: true}
+ it.InternalFetch = func(pageSize int, pageToken string) ([]*longrunningpb.Operation, string, error) {
+ resp := &longrunningpb.ListOperationsResponse{}
+ if pageToken != "" {
+ req.PageToken = pageToken
+ }
+ if pageSize > math.MaxInt32 {
+ req.PageSize = math.MaxInt32
+ } else if pageSize != 0 {
+ req.PageSize = int32(pageSize)
+ }
+ baseUrl, err := url.Parse(c.endpoint)
+ if err != nil {
+ return nil, "", err
+ }
+ baseUrl.Path += fmt.Sprintf("/v1/%v", req.GetName())
+
+ params := url.Values{}
+ params.Add("$alt", "json;enum-encoding=int")
+ if req.GetFilter() != "" {
+ params.Add("filter", fmt.Sprintf("%v", req.GetFilter()))
+ }
+ if req.GetPageSize() != 0 {
+ params.Add("pageSize", fmt.Sprintf("%v", req.GetPageSize()))
+ }
+ if req.GetPageToken() != "" {
+ params.Add("pageToken", fmt.Sprintf("%v", req.GetPageToken()))
+ }
+
+ baseUrl.RawQuery = params.Encode()
+
+ // Build HTTP headers from client and context metadata.
+ hds := append(c.xGoogHeaders, "Content-Type", "application/json")
+ headers := gax.BuildHeaders(ctx, hds...)
+ e := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
+ if settings.Path != "" {
+ baseUrl.Path = settings.Path
+ }
+ httpReq, err := http.NewRequest("GET", baseUrl.String(), nil)
+ if err != nil {
+ return err
+ }
+ httpReq.Header = headers
+
+ httpRsp, err := c.httpClient.Do(httpReq)
+ if err != nil {
+ return err
+ }
+ defer httpRsp.Body.Close()
+
+ if err = googleapi.CheckResponse(httpRsp); err != nil {
+ return err
+ }
+
+ buf, err := io.ReadAll(httpRsp.Body)
+ if err != nil {
+ return err
+ }
+
+ if err := unm.Unmarshal(buf, resp); err != nil {
+ return err
+ }
+
+ return nil
+ }, opts...)
+ if e != nil {
+ return nil, "", e
+ }
+ it.Response = resp
+ return resp.GetOperations(), resp.GetNextPageToken(), nil
+ }
+
+ fetch := func(pageSize int, pageToken string) (string, error) {
+ items, nextPageToken, err := it.InternalFetch(pageSize, pageToken)
+ if err != nil {
+ return "", err
+ }
+ it.items = append(it.items, items...)
+ return nextPageToken, nil
+ }
+
+ it.pageInfo, it.nextFunc = iterator.NewPageInfo(fetch, it.bufLen, it.takeBuf)
+ it.pageInfo.MaxSize = int(req.GetPageSize())
+ it.pageInfo.Token = req.GetPageToken()
+
+ return it
+}
+
+// CopyBackupOperation returns a new CopyBackupOperation from a given name.
+// The name must be that of a previously created CopyBackupOperation, possibly from a different process.
+func (c *databaseAdminGRPCClient) CopyBackupOperation(name string) *CopyBackupOperation {
+ return &CopyBackupOperation{
+ lro: longrunning.InternalNewOperation(*c.LROClient, &longrunningpb.Operation{Name: name}),
+ }
+}
+
+// CopyBackupOperation returns a new CopyBackupOperation from a given name.
+// The name must be that of a previously created CopyBackupOperation, possibly from a different process.
+func (c *databaseAdminRESTClient) CopyBackupOperation(name string) *CopyBackupOperation {
+ override := fmt.Sprintf("/v1/%s", name)
+ return &CopyBackupOperation{
+ lro: longrunning.InternalNewOperation(*c.LROClient, &longrunningpb.Operation{Name: name}),
+ pollPath: override,
+ }
+}
+
+// CreateBackupOperation returns a new CreateBackupOperation from a given name.
+// The name must be that of a previously created CreateBackupOperation, possibly from a different process.
+func (c *databaseAdminGRPCClient) CreateBackupOperation(name string) *CreateBackupOperation {
+ return &CreateBackupOperation{
+ lro: longrunning.InternalNewOperation(*c.LROClient, &longrunningpb.Operation{Name: name}),
+ }
+}
+
+// CreateBackupOperation returns a new CreateBackupOperation from a given name.
+// The name must be that of a previously created CreateBackupOperation, possibly from a different process.
+func (c *databaseAdminRESTClient) CreateBackupOperation(name string) *CreateBackupOperation {
+ override := fmt.Sprintf("/v1/%s", name)
+ return &CreateBackupOperation{
+ lro: longrunning.InternalNewOperation(*c.LROClient, &longrunningpb.Operation{Name: name}),
+ pollPath: override,
+ }
+}
+
+// CreateDatabaseOperation returns a new CreateDatabaseOperation from a given name.
+// The name must be that of a previously created CreateDatabaseOperation, possibly from a different process.
+func (c *databaseAdminGRPCClient) CreateDatabaseOperation(name string) *CreateDatabaseOperation {
+ return &CreateDatabaseOperation{
+ lro: longrunning.InternalNewOperation(*c.LROClient, &longrunningpb.Operation{Name: name}),
+ }
+}
+
+// CreateDatabaseOperation returns a new CreateDatabaseOperation from a given name.
+// The name must be that of a previously created CreateDatabaseOperation, possibly from a different process.
+func (c *databaseAdminRESTClient) CreateDatabaseOperation(name string) *CreateDatabaseOperation {
+ override := fmt.Sprintf("/v1/%s", name)
+ return &CreateDatabaseOperation{
+ lro: longrunning.InternalNewOperation(*c.LROClient, &longrunningpb.Operation{Name: name}),
+ pollPath: override,
+ }
+}
+
+// RestoreDatabaseOperation returns a new RestoreDatabaseOperation from a given name.
+// The name must be that of a previously created RestoreDatabaseOperation, possibly from a different process.
+func (c *databaseAdminGRPCClient) RestoreDatabaseOperation(name string) *RestoreDatabaseOperation {
+ return &RestoreDatabaseOperation{
+ lro: longrunning.InternalNewOperation(*c.LROClient, &longrunningpb.Operation{Name: name}),
+ }
+}
+
+// RestoreDatabaseOperation returns a new RestoreDatabaseOperation from a given name.
+// The name must be that of a previously created RestoreDatabaseOperation, possibly from a different process.
+func (c *databaseAdminRESTClient) RestoreDatabaseOperation(name string) *RestoreDatabaseOperation {
+ override := fmt.Sprintf("/v1/%s", name)
+ return &RestoreDatabaseOperation{
+ lro: longrunning.InternalNewOperation(*c.LROClient, &longrunningpb.Operation{Name: name}),
+ pollPath: override,
+ }
+}
+
+// UpdateDatabaseOperation returns a new UpdateDatabaseOperation from a given name.
+// The name must be that of a previously created UpdateDatabaseOperation, possibly from a different process.
+func (c *databaseAdminGRPCClient) UpdateDatabaseOperation(name string) *UpdateDatabaseOperation {
+ return &UpdateDatabaseOperation{
+ lro: longrunning.InternalNewOperation(*c.LROClient, &longrunningpb.Operation{Name: name}),
+ }
+}
+
+// UpdateDatabaseOperation returns a new UpdateDatabaseOperation from a given name.
+// The name must be that of a previously created UpdateDatabaseOperation, possibly from a different process.
+func (c *databaseAdminRESTClient) UpdateDatabaseOperation(name string) *UpdateDatabaseOperation {
+ override := fmt.Sprintf("/v1/%s", name)
+ return &UpdateDatabaseOperation{
+ lro: longrunning.InternalNewOperation(*c.LROClient, &longrunningpb.Operation{Name: name}),
+ pollPath: override,
+ }
+}
+
+// UpdateDatabaseDdlOperation returns a new UpdateDatabaseDdlOperation from a given name.
+// The name must be that of a previously created UpdateDatabaseDdlOperation, possibly from a different process.
+func (c *databaseAdminGRPCClient) UpdateDatabaseDdlOperation(name string) *UpdateDatabaseDdlOperation {
+ return &UpdateDatabaseDdlOperation{
+ lro: longrunning.InternalNewOperation(*c.LROClient, &longrunningpb.Operation{Name: name}),
+ }
+}
+
+// UpdateDatabaseDdlOperation returns a new UpdateDatabaseDdlOperation from a given name.
+// The name must be that of a previously created UpdateDatabaseDdlOperation, possibly from a different process.
+func (c *databaseAdminRESTClient) UpdateDatabaseDdlOperation(name string) *UpdateDatabaseDdlOperation {
+ override := fmt.Sprintf("/v1/%s", name)
+ return &UpdateDatabaseDdlOperation{
+ lro: longrunning.InternalNewOperation(*c.LROClient, &longrunningpb.Operation{Name: name}),
+ pollPath: override,
+ }
+}
diff --git a/vendor/cloud.google.com/go/spanner/admin/database/apiv1/databasepb/backup.pb.go b/vendor/cloud.google.com/go/spanner/admin/database/apiv1/databasepb/backup.pb.go
new file mode 100644
index 000000000..7a5db895c
--- /dev/null
+++ b/vendor/cloud.google.com/go/spanner/admin/database/apiv1/databasepb/backup.pb.go
@@ -0,0 +1,2446 @@
+// Copyright 2024 Google LLC
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Code generated by protoc-gen-go. DO NOT EDIT.
+// versions:
+// protoc-gen-go v1.34.2
+// protoc v4.25.3
+// source: google/spanner/admin/database/v1/backup.proto
+
+package databasepb
+
+import (
+ reflect "reflect"
+ sync "sync"
+
+ longrunningpb "cloud.google.com/go/longrunning/autogen/longrunningpb"
+ _ "google.golang.org/genproto/googleapis/api/annotations"
+ protoreflect "google.golang.org/protobuf/reflect/protoreflect"
+ protoimpl "google.golang.org/protobuf/runtime/protoimpl"
+ fieldmaskpb "google.golang.org/protobuf/types/known/fieldmaskpb"
+ timestamppb "google.golang.org/protobuf/types/known/timestamppb"
+)
+
+const (
+ // Verify that this generated code is sufficiently up-to-date.
+ _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion)
+ // Verify that runtime/protoimpl is sufficiently up-to-date.
+ _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20)
+)
+
+// Indicates the current state of the backup.
+type Backup_State int32
+
+const (
+ // Not specified.
+ Backup_STATE_UNSPECIFIED Backup_State = 0
+ // The pending backup is still being created. Operations on the
+ // backup may fail with `FAILED_PRECONDITION` in this state.
+ Backup_CREATING Backup_State = 1
+ // The backup is complete and ready for use.
+ Backup_READY Backup_State = 2
+)
+
+// Enum value maps for Backup_State.
+var (
+ Backup_State_name = map[int32]string{
+ 0: "STATE_UNSPECIFIED",
+ 1: "CREATING",
+ 2: "READY",
+ }
+ Backup_State_value = map[string]int32{
+ "STATE_UNSPECIFIED": 0,
+ "CREATING": 1,
+ "READY": 2,
+ }
+)
+
+func (x Backup_State) Enum() *Backup_State {
+ p := new(Backup_State)
+ *p = x
+ return p
+}
+
+func (x Backup_State) String() string {
+ return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x))
+}
+
+func (Backup_State) Descriptor() protoreflect.EnumDescriptor {
+ return file_google_spanner_admin_database_v1_backup_proto_enumTypes[0].Descriptor()
+}
+
+func (Backup_State) Type() protoreflect.EnumType {
+ return &file_google_spanner_admin_database_v1_backup_proto_enumTypes[0]
+}
+
+func (x Backup_State) Number() protoreflect.EnumNumber {
+ return protoreflect.EnumNumber(x)
+}
+
+// Deprecated: Use Backup_State.Descriptor instead.
+func (Backup_State) EnumDescriptor() ([]byte, []int) {
+ return file_google_spanner_admin_database_v1_backup_proto_rawDescGZIP(), []int{0, 0}
+}
+
+// Encryption types for the backup.
+type CreateBackupEncryptionConfig_EncryptionType int32
+
+const (
+ // Unspecified. Do not use.
+ CreateBackupEncryptionConfig_ENCRYPTION_TYPE_UNSPECIFIED CreateBackupEncryptionConfig_EncryptionType = 0
+ // Use the same encryption configuration as the database. This is the
+ // default option when
+ // [encryption_config][google.spanner.admin.database.v1.CreateBackupEncryptionConfig]
+ // is empty. For example, if the database is using
+ // `Customer_Managed_Encryption`, the backup will be using the same Cloud
+ // KMS key as the database.
+ CreateBackupEncryptionConfig_USE_DATABASE_ENCRYPTION CreateBackupEncryptionConfig_EncryptionType = 1
+ // Use Google default encryption.
+ CreateBackupEncryptionConfig_GOOGLE_DEFAULT_ENCRYPTION CreateBackupEncryptionConfig_EncryptionType = 2
+ // Use customer managed encryption. If specified, `kms_key_name`
+ // must contain a valid Cloud KMS key.
+ CreateBackupEncryptionConfig_CUSTOMER_MANAGED_ENCRYPTION CreateBackupEncryptionConfig_EncryptionType = 3
+)
+
+// Enum value maps for CreateBackupEncryptionConfig_EncryptionType.
+var (
+ CreateBackupEncryptionConfig_EncryptionType_name = map[int32]string{
+ 0: "ENCRYPTION_TYPE_UNSPECIFIED",
+ 1: "USE_DATABASE_ENCRYPTION",
+ 2: "GOOGLE_DEFAULT_ENCRYPTION",
+ 3: "CUSTOMER_MANAGED_ENCRYPTION",
+ }
+ CreateBackupEncryptionConfig_EncryptionType_value = map[string]int32{
+ "ENCRYPTION_TYPE_UNSPECIFIED": 0,
+ "USE_DATABASE_ENCRYPTION": 1,
+ "GOOGLE_DEFAULT_ENCRYPTION": 2,
+ "CUSTOMER_MANAGED_ENCRYPTION": 3,
+ }
+)
+
+func (x CreateBackupEncryptionConfig_EncryptionType) Enum() *CreateBackupEncryptionConfig_EncryptionType {
+ p := new(CreateBackupEncryptionConfig_EncryptionType)
+ *p = x
+ return p
+}
+
+func (x CreateBackupEncryptionConfig_EncryptionType) String() string {
+ return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x))
+}
+
+func (CreateBackupEncryptionConfig_EncryptionType) Descriptor() protoreflect.EnumDescriptor {
+ return file_google_spanner_admin_database_v1_backup_proto_enumTypes[1].Descriptor()
+}
+
+func (CreateBackupEncryptionConfig_EncryptionType) Type() protoreflect.EnumType {
+ return &file_google_spanner_admin_database_v1_backup_proto_enumTypes[1]
+}
+
+func (x CreateBackupEncryptionConfig_EncryptionType) Number() protoreflect.EnumNumber {
+ return protoreflect.EnumNumber(x)
+}
+
+// Deprecated: Use CreateBackupEncryptionConfig_EncryptionType.Descriptor instead.
+func (CreateBackupEncryptionConfig_EncryptionType) EnumDescriptor() ([]byte, []int) {
+ return file_google_spanner_admin_database_v1_backup_proto_rawDescGZIP(), []int{13, 0}
+}
+
+// Encryption types for the backup.
+type CopyBackupEncryptionConfig_EncryptionType int32
+
+const (
+ // Unspecified. Do not use.
+ CopyBackupEncryptionConfig_ENCRYPTION_TYPE_UNSPECIFIED CopyBackupEncryptionConfig_EncryptionType = 0
+ // This is the default option for
+ // [CopyBackup][google.spanner.admin.database.v1.DatabaseAdmin.CopyBackup]
+ // when
+ // [encryption_config][google.spanner.admin.database.v1.CopyBackupEncryptionConfig]
+ // is not specified. For example, if the source backup is using
+ // `Customer_Managed_Encryption`, the backup will be using the same Cloud
+ // KMS key as the source backup.
+ CopyBackupEncryptionConfig_USE_CONFIG_DEFAULT_OR_BACKUP_ENCRYPTION CopyBackupEncryptionConfig_EncryptionType = 1
+ // Use Google default encryption.
+ CopyBackupEncryptionConfig_GOOGLE_DEFAULT_ENCRYPTION CopyBackupEncryptionConfig_EncryptionType = 2
+ // Use customer managed encryption. If specified, either `kms_key_name` or
+ // `kms_key_names` must contain valid Cloud KMS key(s).
+ CopyBackupEncryptionConfig_CUSTOMER_MANAGED_ENCRYPTION CopyBackupEncryptionConfig_EncryptionType = 3
+)
+
+// Enum value maps for CopyBackupEncryptionConfig_EncryptionType.
+var (
+ CopyBackupEncryptionConfig_EncryptionType_name = map[int32]string{
+ 0: "ENCRYPTION_TYPE_UNSPECIFIED",
+ 1: "USE_CONFIG_DEFAULT_OR_BACKUP_ENCRYPTION",
+ 2: "GOOGLE_DEFAULT_ENCRYPTION",
+ 3: "CUSTOMER_MANAGED_ENCRYPTION",
+ }
+ CopyBackupEncryptionConfig_EncryptionType_value = map[string]int32{
+ "ENCRYPTION_TYPE_UNSPECIFIED": 0,
+ "USE_CONFIG_DEFAULT_OR_BACKUP_ENCRYPTION": 1,
+ "GOOGLE_DEFAULT_ENCRYPTION": 2,
+ "CUSTOMER_MANAGED_ENCRYPTION": 3,
+ }
+)
+
+func (x CopyBackupEncryptionConfig_EncryptionType) Enum() *CopyBackupEncryptionConfig_EncryptionType {
+ p := new(CopyBackupEncryptionConfig_EncryptionType)
+ *p = x
+ return p
+}
+
+func (x CopyBackupEncryptionConfig_EncryptionType) String() string {
+ return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x))
+}
+
+func (CopyBackupEncryptionConfig_EncryptionType) Descriptor() protoreflect.EnumDescriptor {
+ return file_google_spanner_admin_database_v1_backup_proto_enumTypes[2].Descriptor()
+}
+
+func (CopyBackupEncryptionConfig_EncryptionType) Type() protoreflect.EnumType {
+ return &file_google_spanner_admin_database_v1_backup_proto_enumTypes[2]
+}
+
+func (x CopyBackupEncryptionConfig_EncryptionType) Number() protoreflect.EnumNumber {
+ return protoreflect.EnumNumber(x)
+}
+
+// Deprecated: Use CopyBackupEncryptionConfig_EncryptionType.Descriptor instead.
+func (CopyBackupEncryptionConfig_EncryptionType) EnumDescriptor() ([]byte, []int) {
+ return file_google_spanner_admin_database_v1_backup_proto_rawDescGZIP(), []int{14, 0}
+}
+
+// A backup of a Cloud Spanner database.
+type Backup struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // Required for the
+ // [CreateBackup][google.spanner.admin.database.v1.DatabaseAdmin.CreateBackup]
+ // operation. Name of the database from which this backup was created. This
+ // needs to be in the same instance as the backup. Values are of the form
+ // `projects/<project>/instances/<instance>/databases/<database>`.
+ Database string `protobuf:"bytes,2,opt,name=database,proto3" json:"database,omitempty"`
+ // The backup will contain an externally consistent copy of the database at
+ // the timestamp specified by `version_time`. If `version_time` is not
+ // specified, the system will set `version_time` to the `create_time` of the
+ // backup.
+ VersionTime *timestamppb.Timestamp `protobuf:"bytes,9,opt,name=version_time,json=versionTime,proto3" json:"version_time,omitempty"`
+ // Required for the
+ // [CreateBackup][google.spanner.admin.database.v1.DatabaseAdmin.CreateBackup]
+ // operation. The expiration time of the backup, with microseconds
+ // granularity that must be at least 6 hours and at most 366 days
+ // from the time the CreateBackup request is processed. Once the `expire_time`
+ // has passed, the backup is eligible to be automatically deleted by Cloud
+ // Spanner to free the resources used by the backup.
+ ExpireTime *timestamppb.Timestamp `protobuf:"bytes,3,opt,name=expire_time,json=expireTime,proto3" json:"expire_time,omitempty"`
+ // Output only for the
+ // [CreateBackup][google.spanner.admin.database.v1.DatabaseAdmin.CreateBackup]
+ // operation. Required for the
+ // [UpdateBackup][google.spanner.admin.database.v1.DatabaseAdmin.UpdateBackup]
+ // operation.
+ //
+ // A globally unique identifier for the backup which cannot be
+ // changed. Values are of the form
+ // `projects/<project>/instances/<instance>/backups/[a-z][a-z0-9_\-]*[a-z0-9]`
+ // The final segment of the name must be between 2 and 60 characters
+ // in length.
+ //
+ // The backup is stored in the location(s) specified in the instance
+ // configuration of the instance containing the backup, identified
+ // by the prefix of the backup name of the form
+ // `projects/<project>/instances/<instance>`.
+ Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"`
+ // Output only. The time the
+ // [CreateBackup][google.spanner.admin.database.v1.DatabaseAdmin.CreateBackup]
+ // request is received. If the request does not specify `version_time`, the
+ // `version_time` of the backup will be equivalent to the `create_time`.
+ CreateTime *timestamppb.Timestamp `protobuf:"bytes,4,opt,name=create_time,json=createTime,proto3" json:"create_time,omitempty"`
+ // Output only. Size of the backup in bytes.
+ SizeBytes int64 `protobuf:"varint,5,opt,name=size_bytes,json=sizeBytes,proto3" json:"size_bytes,omitempty"`
+ // Output only. The number of bytes that will be freed by deleting this
+ // backup. This value will be zero if, for example, this backup is part of an
+ // incremental backup chain and younger backups in the chain require that we
+ // keep its data. For backups not in an incremental backup chain, this is
+ // always the size of the backup. This value may change if backups on the same
+ // chain get created, deleted or expired.
+ FreeableSizeBytes int64 `protobuf:"varint,15,opt,name=freeable_size_bytes,json=freeableSizeBytes,proto3" json:"freeable_size_bytes,omitempty"`
+ // Output only. For a backup in an incremental backup chain, this is the
+ // storage space needed to keep the data that has changed since the previous
+ // backup. For all other backups, this is always the size of the backup. This
+ // value may change if backups on the same chain get deleted or expired.
+ //
+ // This field can be used to calculate the total storage space used by a set
+ // of backups. For example, the total space used by all backups of a database
+ // can be computed by summing up this field.
+ ExclusiveSizeBytes int64 `protobuf:"varint,16,opt,name=exclusive_size_bytes,json=exclusiveSizeBytes,proto3" json:"exclusive_size_bytes,omitempty"`
+ // Output only. The current state of the backup.
+ State Backup_State `protobuf:"varint,6,opt,name=state,proto3,enum=google.spanner.admin.database.v1.Backup_State" json:"state,omitempty"`
+ // Output only. The names of the restored databases that reference the backup.
+ // The database names are of
+ // the form `projects/<project>/instances/<instance>/databases/<database>`.
+ // Referencing databases may exist in different instances. The existence of
+ // any referencing database prevents the backup from being deleted. When a
+ // restored database from the backup enters the `READY` state, the reference
+ // to the backup is removed.
+ ReferencingDatabases []string `protobuf:"bytes,7,rep,name=referencing_databases,json=referencingDatabases,proto3" json:"referencing_databases,omitempty"`
+ // Output only. The encryption information for the backup.
+ EncryptionInfo *EncryptionInfo `protobuf:"bytes,8,opt,name=encryption_info,json=encryptionInfo,proto3" json:"encryption_info,omitempty"`
+ // Output only. The encryption information for the backup, whether it is
+ // protected by one or more KMS keys. The information includes all Cloud
+ // KMS key versions used to encrypt the backup. The `encryption_status' field
+ // inside of each `EncryptionInfo` is not populated. At least one of the key
+ // versions must be available for the backup to be restored. If a key version
+ // is revoked in the middle of a restore, the restore behavior is undefined.
+ EncryptionInformation []*EncryptionInfo `protobuf:"bytes,13,rep,name=encryption_information,json=encryptionInformation,proto3" json:"encryption_information,omitempty"`
+ // Output only. The database dialect information for the backup.
+ DatabaseDialect DatabaseDialect `protobuf:"varint,10,opt,name=database_dialect,json=databaseDialect,proto3,enum=google.spanner.admin.database.v1.DatabaseDialect" json:"database_dialect,omitempty"`
+ // Output only. The names of the destination backups being created by copying
+ // this source backup. The backup names are of the form
+ // `projects/<project>/instances/<instance>/backups/<backup>`.
+ // Referencing backups may exist in different instances. The existence of
+ // any referencing backup prevents the backup from being deleted. When the
+ // copy operation is done (either successfully completed or cancelled or the
+ // destination backup is deleted), the reference to the backup is removed.
+ ReferencingBackups []string `protobuf:"bytes,11,rep,name=referencing_backups,json=referencingBackups,proto3" json:"referencing_backups,omitempty"`
+ // Output only. The max allowed expiration time of the backup, with
+ // microseconds granularity. A backup's expiration time can be configured in
+ // multiple APIs: CreateBackup, UpdateBackup, CopyBackup. When updating or
+ // copying an existing backup, the expiration time specified must be
+ // less than `Backup.max_expire_time`.
+ MaxExpireTime *timestamppb.Timestamp `protobuf:"bytes,12,opt,name=max_expire_time,json=maxExpireTime,proto3" json:"max_expire_time,omitempty"`
+ // Output only. List of backup schedule URIs that are associated with
+ // creating this backup. This is only applicable for scheduled backups, and
+ // is empty for on-demand backups.
+ //
+ // To optimize for storage, whenever possible, multiple schedules are
+ // collapsed together to create one backup. In such cases, this field captures
+ // the list of all backup schedule URIs that are associated with creating
+ // this backup. If collapsing is not done, then this field captures the
+ // single backup schedule URI associated with creating this backup.
+ BackupSchedules []string `protobuf:"bytes,14,rep,name=backup_schedules,json=backupSchedules,proto3" json:"backup_schedules,omitempty"`
+ // Output only. Populated only for backups in an incremental backup chain.
+ // Backups share the same chain id if and only if they belong to the same
+ // incremental backup chain. Use this field to determine which backups are
+ // part of the same incremental backup chain. The ordering of backups in the
+ // chain can be determined by ordering the backup `version_time`.
+ IncrementalBackupChainId string `protobuf:"bytes,17,opt,name=incremental_backup_chain_id,json=incrementalBackupChainId,proto3" json:"incremental_backup_chain_id,omitempty"`
+ // Output only. Data deleted at a time older than this is guaranteed not to be
+ // retained in order to support this backup. For a backup in an incremental
+ // backup chain, this is the version time of the oldest backup that exists or
+ // ever existed in the chain. For all other backups, this is the version time
+ // of the backup. This field can be used to understand what data is being
+ // retained by the backup system.
+ OldestVersionTime *timestamppb.Timestamp `protobuf:"bytes,18,opt,name=oldest_version_time,json=oldestVersionTime,proto3" json:"oldest_version_time,omitempty"`
+}
+
+func (x *Backup) Reset() {
+ *x = Backup{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_google_spanner_admin_database_v1_backup_proto_msgTypes[0]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *Backup) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*Backup) ProtoMessage() {}
+
+func (x *Backup) ProtoReflect() protoreflect.Message {
+ mi := &file_google_spanner_admin_database_v1_backup_proto_msgTypes[0]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use Backup.ProtoReflect.Descriptor instead.
+func (*Backup) Descriptor() ([]byte, []int) {
+ return file_google_spanner_admin_database_v1_backup_proto_rawDescGZIP(), []int{0}
+}
+
+func (x *Backup) GetDatabase() string {
+ if x != nil {
+ return x.Database
+ }
+ return ""
+}
+
+func (x *Backup) GetVersionTime() *timestamppb.Timestamp {
+ if x != nil {
+ return x.VersionTime
+ }
+ return nil
+}
+
+func (x *Backup) GetExpireTime() *timestamppb.Timestamp {
+ if x != nil {
+ return x.ExpireTime
+ }
+ return nil
+}
+
+func (x *Backup) GetName() string {
+ if x != nil {
+ return x.Name
+ }
+ return ""
+}
+
+func (x *Backup) GetCreateTime() *timestamppb.Timestamp {
+ if x != nil {
+ return x.CreateTime
+ }
+ return nil
+}
+
+func (x *Backup) GetSizeBytes() int64 {
+ if x != nil {
+ return x.SizeBytes
+ }
+ return 0
+}
+
+func (x *Backup) GetFreeableSizeBytes() int64 {
+ if x != nil {
+ return x.FreeableSizeBytes
+ }
+ return 0
+}
+
+func (x *Backup) GetExclusiveSizeBytes() int64 {
+ if x != nil {
+ return x.ExclusiveSizeBytes
+ }
+ return 0
+}
+
+func (x *Backup) GetState() Backup_State {
+ if x != nil {
+ return x.State
+ }
+ return Backup_STATE_UNSPECIFIED
+}
+
+func (x *Backup) GetReferencingDatabases() []string {
+ if x != nil {
+ return x.ReferencingDatabases
+ }
+ return nil
+}
+
+func (x *Backup) GetEncryptionInfo() *EncryptionInfo {
+ if x != nil {
+ return x.EncryptionInfo
+ }
+ return nil
+}
+
+func (x *Backup) GetEncryptionInformation() []*EncryptionInfo {
+ if x != nil {
+ return x.EncryptionInformation
+ }
+ return nil
+}
+
+func (x *Backup) GetDatabaseDialect() DatabaseDialect {
+ if x != nil {
+ return x.DatabaseDialect
+ }
+ return DatabaseDialect_DATABASE_DIALECT_UNSPECIFIED
+}
+
+func (x *Backup) GetReferencingBackups() []string {
+ if x != nil {
+ return x.ReferencingBackups
+ }
+ return nil
+}
+
+func (x *Backup) GetMaxExpireTime() *timestamppb.Timestamp {
+ if x != nil {
+ return x.MaxExpireTime
+ }
+ return nil
+}
+
+func (x *Backup) GetBackupSchedules() []string {
+ if x != nil {
+ return x.BackupSchedules
+ }
+ return nil
+}
+
+func (x *Backup) GetIncrementalBackupChainId() string {
+ if x != nil {
+ return x.IncrementalBackupChainId
+ }
+ return ""
+}
+
+func (x *Backup) GetOldestVersionTime() *timestamppb.Timestamp {
+ if x != nil {
+ return x.OldestVersionTime
+ }
+ return nil
+}
+
+// The request for
+// [CreateBackup][google.spanner.admin.database.v1.DatabaseAdmin.CreateBackup].
+type CreateBackupRequest struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // Required. The name of the instance in which the backup will be
+ // created. This must be the same instance that contains the database the
+ // backup will be created from. The backup will be stored in the
+ // location(s) specified in the instance configuration of this
+ // instance. Values are of the form
+ // `projects/<project>/instances/<instance>`.
+ Parent string `protobuf:"bytes,1,opt,name=parent,proto3" json:"parent,omitempty"`
+ // Required. The id of the backup to be created. The `backup_id` appended to
+ // `parent` forms the full backup name of the form
+ // `projects/<project>/instances/<instance>/backups/<backup_id>`.
+ BackupId string `protobuf:"bytes,2,opt,name=backup_id,json=backupId,proto3" json:"backup_id,omitempty"`
+ // Required. The backup to create.
+ Backup *Backup `protobuf:"bytes,3,opt,name=backup,proto3" json:"backup,omitempty"`
+ // Optional. The encryption configuration used to encrypt the backup. If this
+ // field is not specified, the backup will use the same encryption
+ // configuration as the database by default, namely
+ // [encryption_type][google.spanner.admin.database.v1.CreateBackupEncryptionConfig.encryption_type]
+ // = `USE_DATABASE_ENCRYPTION`.
+ EncryptionConfig *CreateBackupEncryptionConfig `protobuf:"bytes,4,opt,name=encryption_config,json=encryptionConfig,proto3" json:"encryption_config,omitempty"`
+}
+
+func (x *CreateBackupRequest) Reset() {
+ *x = CreateBackupRequest{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_google_spanner_admin_database_v1_backup_proto_msgTypes[1]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *CreateBackupRequest) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*CreateBackupRequest) ProtoMessage() {}
+
+func (x *CreateBackupRequest) ProtoReflect() protoreflect.Message {
+ mi := &file_google_spanner_admin_database_v1_backup_proto_msgTypes[1]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use CreateBackupRequest.ProtoReflect.Descriptor instead.
+func (*CreateBackupRequest) Descriptor() ([]byte, []int) {
+ return file_google_spanner_admin_database_v1_backup_proto_rawDescGZIP(), []int{1}
+}
+
+func (x *CreateBackupRequest) GetParent() string {
+ if x != nil {
+ return x.Parent
+ }
+ return ""
+}
+
+func (x *CreateBackupRequest) GetBackupId() string {
+ if x != nil {
+ return x.BackupId
+ }
+ return ""
+}
+
+func (x *CreateBackupRequest) GetBackup() *Backup {
+ if x != nil {
+ return x.Backup
+ }
+ return nil
+}
+
+func (x *CreateBackupRequest) GetEncryptionConfig() *CreateBackupEncryptionConfig {
+ if x != nil {
+ return x.EncryptionConfig
+ }
+ return nil
+}
+
+// Metadata type for the operation returned by
+// [CreateBackup][google.spanner.admin.database.v1.DatabaseAdmin.CreateBackup].
+type CreateBackupMetadata struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // The name of the backup being created.
+ Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"`
+ // The name of the database the backup is created from.
+ Database string `protobuf:"bytes,2,opt,name=database,proto3" json:"database,omitempty"`
+ // The progress of the
+ // [CreateBackup][google.spanner.admin.database.v1.DatabaseAdmin.CreateBackup]
+ // operation.
+ Progress *OperationProgress `protobuf:"bytes,3,opt,name=progress,proto3" json:"progress,omitempty"`
+ // The time at which cancellation of this operation was received.
+ // [Operations.CancelOperation][google.longrunning.Operations.CancelOperation]
+ // starts asynchronous cancellation on a long-running operation. The server
+ // makes a best effort to cancel the operation, but success is not guaranteed.
+ // Clients can use
+ // [Operations.GetOperation][google.longrunning.Operations.GetOperation] or
+ // other methods to check whether the cancellation succeeded or whether the
+ // operation completed despite cancellation. On successful cancellation,
+ // the operation is not deleted; instead, it becomes an operation with
+ // an [Operation.error][google.longrunning.Operation.error] value with a
+ // [google.rpc.Status.code][google.rpc.Status.code] of 1,
+ // corresponding to `Code.CANCELLED`.
+ CancelTime *timestamppb.Timestamp `protobuf:"bytes,4,opt,name=cancel_time,json=cancelTime,proto3" json:"cancel_time,omitempty"`
+}
+
+func (x *CreateBackupMetadata) Reset() {
+ *x = CreateBackupMetadata{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_google_spanner_admin_database_v1_backup_proto_msgTypes[2]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *CreateBackupMetadata) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*CreateBackupMetadata) ProtoMessage() {}
+
+func (x *CreateBackupMetadata) ProtoReflect() protoreflect.Message {
+ mi := &file_google_spanner_admin_database_v1_backup_proto_msgTypes[2]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use CreateBackupMetadata.ProtoReflect.Descriptor instead.
+func (*CreateBackupMetadata) Descriptor() ([]byte, []int) {
+ return file_google_spanner_admin_database_v1_backup_proto_rawDescGZIP(), []int{2}
+}
+
+func (x *CreateBackupMetadata) GetName() string {
+ if x != nil {
+ return x.Name
+ }
+ return ""
+}
+
+func (x *CreateBackupMetadata) GetDatabase() string {
+ if x != nil {
+ return x.Database
+ }
+ return ""
+}
+
+func (x *CreateBackupMetadata) GetProgress() *OperationProgress {
+ if x != nil {
+ return x.Progress
+ }
+ return nil
+}
+
+func (x *CreateBackupMetadata) GetCancelTime() *timestamppb.Timestamp {
+ if x != nil {
+ return x.CancelTime
+ }
+ return nil
+}
+
+// The request for
+// [CopyBackup][google.spanner.admin.database.v1.DatabaseAdmin.CopyBackup].
+type CopyBackupRequest struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // Required. The name of the destination instance that will contain the backup
+ // copy. Values are of the form: `projects/<project>/instances/<instance>`.
+ Parent string `protobuf:"bytes,1,opt,name=parent,proto3" json:"parent,omitempty"`
+ // Required. The id of the backup copy.
+ // The `backup_id` appended to `parent` forms the full backup_uri of the form
+ // `projects/<project>/instances/<instance>/backups/<backup>`.
+ BackupId string `protobuf:"bytes,2,opt,name=backup_id,json=backupId,proto3" json:"backup_id,omitempty"`
+ // Required. The source backup to be copied.
+ // The source backup needs to be in READY state for it to be copied.
+ // Once CopyBackup is in progress, the source backup cannot be deleted or
+ // cleaned up on expiration until CopyBackup is finished.
+ // Values are of the form:
+ // `projects/<project>/instances/<instance>/backups/<backup>`.
+ SourceBackup string `protobuf:"bytes,3,opt,name=source_backup,json=sourceBackup,proto3" json:"source_backup,omitempty"`
+ // Required. The expiration time of the backup in microsecond granularity.
+ // The expiration time must be at least 6 hours and at most 366 days
+ // from the `create_time` of the source backup. Once the `expire_time` has
+ // passed, the backup is eligible to be automatically deleted by Cloud Spanner
+ // to free the resources used by the backup.
+ ExpireTime *timestamppb.Timestamp `protobuf:"bytes,4,opt,name=expire_time,json=expireTime,proto3" json:"expire_time,omitempty"`
+ // Optional. The encryption configuration used to encrypt the backup. If this
+ // field is not specified, the backup will use the same encryption
+ // configuration as the source backup by default, namely
+ // [encryption_type][google.spanner.admin.database.v1.CopyBackupEncryptionConfig.encryption_type]
+ // = `USE_CONFIG_DEFAULT_OR_BACKUP_ENCRYPTION`.
+ EncryptionConfig *CopyBackupEncryptionConfig `protobuf:"bytes,5,opt,name=encryption_config,json=encryptionConfig,proto3" json:"encryption_config,omitempty"`
+}
+
+func (x *CopyBackupRequest) Reset() {
+ *x = CopyBackupRequest{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_google_spanner_admin_database_v1_backup_proto_msgTypes[3]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *CopyBackupRequest) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*CopyBackupRequest) ProtoMessage() {}
+
+func (x *CopyBackupRequest) ProtoReflect() protoreflect.Message {
+ mi := &file_google_spanner_admin_database_v1_backup_proto_msgTypes[3]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use CopyBackupRequest.ProtoReflect.Descriptor instead.
+func (*CopyBackupRequest) Descriptor() ([]byte, []int) {
+ return file_google_spanner_admin_database_v1_backup_proto_rawDescGZIP(), []int{3}
+}
+
+func (x *CopyBackupRequest) GetParent() string {
+ if x != nil {
+ return x.Parent
+ }
+ return ""
+}
+
+func (x *CopyBackupRequest) GetBackupId() string {
+ if x != nil {
+ return x.BackupId
+ }
+ return ""
+}
+
+func (x *CopyBackupRequest) GetSourceBackup() string {
+ if x != nil {
+ return x.SourceBackup
+ }
+ return ""
+}
+
+func (x *CopyBackupRequest) GetExpireTime() *timestamppb.Timestamp {
+ if x != nil {
+ return x.ExpireTime
+ }
+ return nil
+}
+
+func (x *CopyBackupRequest) GetEncryptionConfig() *CopyBackupEncryptionConfig {
+ if x != nil {
+ return x.EncryptionConfig
+ }
+ return nil
+}
+
+// Metadata type for the operation returned by
+// [CopyBackup][google.spanner.admin.database.v1.DatabaseAdmin.CopyBackup].
+type CopyBackupMetadata struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // The name of the backup being created through the copy operation.
+ // Values are of the form
+ // `projects/<project>/instances/<instance>/backups/<backup>`.
+ Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"`
+ // The name of the source backup that is being copied.
+ // Values are of the form
+ // `projects/<project>/instances/<instance>/backups/<backup>`.
+ SourceBackup string `protobuf:"bytes,2,opt,name=source_backup,json=sourceBackup,proto3" json:"source_backup,omitempty"`
+ // The progress of the
+ // [CopyBackup][google.spanner.admin.database.v1.DatabaseAdmin.CopyBackup]
+ // operation.
+ Progress *OperationProgress `protobuf:"bytes,3,opt,name=progress,proto3" json:"progress,omitempty"`
+ // The time at which cancellation of CopyBackup operation was received.
+ // [Operations.CancelOperation][google.longrunning.Operations.CancelOperation]
+ // starts asynchronous cancellation on a long-running operation. The server
+ // makes a best effort to cancel the operation, but success is not guaranteed.
+ // Clients can use
+ // [Operations.GetOperation][google.longrunning.Operations.GetOperation] or
+ // other methods to check whether the cancellation succeeded or whether the
+ // operation completed despite cancellation. On successful cancellation,
+ // the operation is not deleted; instead, it becomes an operation with
+ // an [Operation.error][google.longrunning.Operation.error] value with a
+ // [google.rpc.Status.code][google.rpc.Status.code] of 1,
+ // corresponding to `Code.CANCELLED`.
+ CancelTime *timestamppb.Timestamp `protobuf:"bytes,4,opt,name=cancel_time,json=cancelTime,proto3" json:"cancel_time,omitempty"`
+}
+
+func (x *CopyBackupMetadata) Reset() {
+ *x = CopyBackupMetadata{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_google_spanner_admin_database_v1_backup_proto_msgTypes[4]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *CopyBackupMetadata) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*CopyBackupMetadata) ProtoMessage() {}
+
+func (x *CopyBackupMetadata) ProtoReflect() protoreflect.Message {
+ mi := &file_google_spanner_admin_database_v1_backup_proto_msgTypes[4]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use CopyBackupMetadata.ProtoReflect.Descriptor instead.
+func (*CopyBackupMetadata) Descriptor() ([]byte, []int) {
+ return file_google_spanner_admin_database_v1_backup_proto_rawDescGZIP(), []int{4}
+}
+
+func (x *CopyBackupMetadata) GetName() string {
+ if x != nil {
+ return x.Name
+ }
+ return ""
+}
+
+func (x *CopyBackupMetadata) GetSourceBackup() string {
+ if x != nil {
+ return x.SourceBackup
+ }
+ return ""
+}
+
+func (x *CopyBackupMetadata) GetProgress() *OperationProgress {
+ if x != nil {
+ return x.Progress
+ }
+ return nil
+}
+
+func (x *CopyBackupMetadata) GetCancelTime() *timestamppb.Timestamp {
+ if x != nil {
+ return x.CancelTime
+ }
+ return nil
+}
+
+// The request for
+// [UpdateBackup][google.spanner.admin.database.v1.DatabaseAdmin.UpdateBackup].
+type UpdateBackupRequest struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // Required. The backup to update. `backup.name`, and the fields to be updated
+ // as specified by `update_mask` are required. Other fields are ignored.
+ // Update is only supported for the following fields:
+ // - `backup.expire_time`.
+ Backup *Backup `protobuf:"bytes,1,opt,name=backup,proto3" json:"backup,omitempty"`
+ // Required. A mask specifying which fields (e.g. `expire_time`) in the
+ // Backup resource should be updated. This mask is relative to the Backup
+ // resource, not to the request message. The field mask must always be
+ // specified; this prevents any future fields from being erased accidentally
+ // by clients that do not know about them.
+ UpdateMask *fieldmaskpb.FieldMask `protobuf:"bytes,2,opt,name=update_mask,json=updateMask,proto3" json:"update_mask,omitempty"`
+}
+
+func (x *UpdateBackupRequest) Reset() {
+ *x = UpdateBackupRequest{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_google_spanner_admin_database_v1_backup_proto_msgTypes[5]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *UpdateBackupRequest) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*UpdateBackupRequest) ProtoMessage() {}
+
+func (x *UpdateBackupRequest) ProtoReflect() protoreflect.Message {
+ mi := &file_google_spanner_admin_database_v1_backup_proto_msgTypes[5]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use UpdateBackupRequest.ProtoReflect.Descriptor instead.
+func (*UpdateBackupRequest) Descriptor() ([]byte, []int) {
+ return file_google_spanner_admin_database_v1_backup_proto_rawDescGZIP(), []int{5}
+}
+
+func (x *UpdateBackupRequest) GetBackup() *Backup {
+ if x != nil {
+ return x.Backup
+ }
+ return nil
+}
+
+func (x *UpdateBackupRequest) GetUpdateMask() *fieldmaskpb.FieldMask {
+ if x != nil {
+ return x.UpdateMask
+ }
+ return nil
+}
+
+// The request for
+// [GetBackup][google.spanner.admin.database.v1.DatabaseAdmin.GetBackup].
+type GetBackupRequest struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // Required. Name of the backup.
+ // Values are of the form
+ // `projects/<project>/instances/<instance>/backups/<backup>`.
+ Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"`
+}
+
+func (x *GetBackupRequest) Reset() {
+ *x = GetBackupRequest{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_google_spanner_admin_database_v1_backup_proto_msgTypes[6]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *GetBackupRequest) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*GetBackupRequest) ProtoMessage() {}
+
+func (x *GetBackupRequest) ProtoReflect() protoreflect.Message {
+ mi := &file_google_spanner_admin_database_v1_backup_proto_msgTypes[6]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use GetBackupRequest.ProtoReflect.Descriptor instead.
+func (*GetBackupRequest) Descriptor() ([]byte, []int) {
+ return file_google_spanner_admin_database_v1_backup_proto_rawDescGZIP(), []int{6}
+}
+
+func (x *GetBackupRequest) GetName() string {
+ if x != nil {
+ return x.Name
+ }
+ return ""
+}
+
+// The request for
+// [DeleteBackup][google.spanner.admin.database.v1.DatabaseAdmin.DeleteBackup].
+type DeleteBackupRequest struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // Required. Name of the backup to delete.
+ // Values are of the form
+ // `projects/<project>/instances/<instance>/backups/<backup>`.
+ Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"`
+}
+
+func (x *DeleteBackupRequest) Reset() {
+ *x = DeleteBackupRequest{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_google_spanner_admin_database_v1_backup_proto_msgTypes[7]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *DeleteBackupRequest) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*DeleteBackupRequest) ProtoMessage() {}
+
+func (x *DeleteBackupRequest) ProtoReflect() protoreflect.Message {
+ mi := &file_google_spanner_admin_database_v1_backup_proto_msgTypes[7]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use DeleteBackupRequest.ProtoReflect.Descriptor instead.
+func (*DeleteBackupRequest) Descriptor() ([]byte, []int) {
+ return file_google_spanner_admin_database_v1_backup_proto_rawDescGZIP(), []int{7}
+}
+
+func (x *DeleteBackupRequest) GetName() string {
+ if x != nil {
+ return x.Name
+ }
+ return ""
+}
+
+// The request for
+// [ListBackups][google.spanner.admin.database.v1.DatabaseAdmin.ListBackups].
+type ListBackupsRequest struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // Required. The instance to list backups from. Values are of the
+ // form `projects/<project>/instances/<instance>`.
+ Parent string `protobuf:"bytes,1,opt,name=parent,proto3" json:"parent,omitempty"`
+ // An expression that filters the list of returned backups.
+ //
+ // A filter expression consists of a field name, a comparison operator, and a
+ // value for filtering.
+ // The value must be a string, a number, or a boolean. The comparison operator
+ // must be one of: `<`, `>`, `<=`, `>=`, `!=`, `=`, or `:`.
+ // Colon `:` is the contains operator. Filter rules are not case sensitive.
+ //
+ // The following fields in the
+ // [Backup][google.spanner.admin.database.v1.Backup] are eligible for
+ // filtering:
+ //
+ // - `name`
+ // - `database`
+ // - `state`
+ // - `create_time` (and values are of the format YYYY-MM-DDTHH:MM:SSZ)
+ // - `expire_time` (and values are of the format YYYY-MM-DDTHH:MM:SSZ)
+ // - `version_time` (and values are of the format YYYY-MM-DDTHH:MM:SSZ)
+ // - `size_bytes`
+ // - `backup_schedules`
+ //
+ // You can combine multiple expressions by enclosing each expression in
+ // parentheses. By default, expressions are combined with AND logic, but
+ // you can specify AND, OR, and NOT logic explicitly.
+ //
+ // Here are a few examples:
+ //
+ // - `name:Howl` - The backup's name contains the string "howl".
+ // - `database:prod`
+ // - The database's name contains the string "prod".
+ // - `state:CREATING` - The backup is pending creation.
+ // - `state:READY` - The backup is fully created and ready for use.
+ // - `(name:howl) AND (create_time < \"2018-03-28T14:50:00Z\")`
+ // - The backup name contains the string "howl" and `create_time`
+ // of the backup is before 2018-03-28T14:50:00Z.
+ // - `expire_time < \"2018-03-28T14:50:00Z\"`
+ // - The backup `expire_time` is before 2018-03-28T14:50:00Z.
+ // - `size_bytes > 10000000000` - The backup's size is greater than 10GB
+ // - `backup_schedules:daily`
+ // - The backup is created from a schedule with "daily" in its name.
+ Filter string `protobuf:"bytes,2,opt,name=filter,proto3" json:"filter,omitempty"`
+ // Number of backups to be returned in the response. If 0 or
+ // less, defaults to the server's maximum allowed page size.
+ PageSize int32 `protobuf:"varint,3,opt,name=page_size,json=pageSize,proto3" json:"page_size,omitempty"`
+ // If non-empty, `page_token` should contain a
+ // [next_page_token][google.spanner.admin.database.v1.ListBackupsResponse.next_page_token]
+ // from a previous
+ // [ListBackupsResponse][google.spanner.admin.database.v1.ListBackupsResponse]
+ // to the same `parent` and with the same `filter`.
+ PageToken string `protobuf:"bytes,4,opt,name=page_token,json=pageToken,proto3" json:"page_token,omitempty"`
+}
+
+func (x *ListBackupsRequest) Reset() {
+ *x = ListBackupsRequest{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_google_spanner_admin_database_v1_backup_proto_msgTypes[8]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *ListBackupsRequest) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*ListBackupsRequest) ProtoMessage() {}
+
+func (x *ListBackupsRequest) ProtoReflect() protoreflect.Message {
+ mi := &file_google_spanner_admin_database_v1_backup_proto_msgTypes[8]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use ListBackupsRequest.ProtoReflect.Descriptor instead.
+func (*ListBackupsRequest) Descriptor() ([]byte, []int) {
+ return file_google_spanner_admin_database_v1_backup_proto_rawDescGZIP(), []int{8}
+}
+
+func (x *ListBackupsRequest) GetParent() string {
+ if x != nil {
+ return x.Parent
+ }
+ return ""
+}
+
+func (x *ListBackupsRequest) GetFilter() string {
+ if x != nil {
+ return x.Filter
+ }
+ return ""
+}
+
+func (x *ListBackupsRequest) GetPageSize() int32 {
+ if x != nil {
+ return x.PageSize
+ }
+ return 0
+}
+
+func (x *ListBackupsRequest) GetPageToken() string {
+ if x != nil {
+ return x.PageToken
+ }
+ return ""
+}
+
+// The response for
+// [ListBackups][google.spanner.admin.database.v1.DatabaseAdmin.ListBackups].
+type ListBackupsResponse struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // The list of matching backups. Backups returned are ordered by `create_time`
+ // in descending order, starting from the most recent `create_time`.
+ Backups []*Backup `protobuf:"bytes,1,rep,name=backups,proto3" json:"backups,omitempty"`
+ // `next_page_token` can be sent in a subsequent
+ // [ListBackups][google.spanner.admin.database.v1.DatabaseAdmin.ListBackups]
+ // call to fetch more of the matching backups.
+ NextPageToken string `protobuf:"bytes,2,opt,name=next_page_token,json=nextPageToken,proto3" json:"next_page_token,omitempty"`
+}
+
+func (x *ListBackupsResponse) Reset() {
+ *x = ListBackupsResponse{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_google_spanner_admin_database_v1_backup_proto_msgTypes[9]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *ListBackupsResponse) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*ListBackupsResponse) ProtoMessage() {}
+
+func (x *ListBackupsResponse) ProtoReflect() protoreflect.Message {
+ mi := &file_google_spanner_admin_database_v1_backup_proto_msgTypes[9]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use ListBackupsResponse.ProtoReflect.Descriptor instead.
+func (*ListBackupsResponse) Descriptor() ([]byte, []int) {
+ return file_google_spanner_admin_database_v1_backup_proto_rawDescGZIP(), []int{9}
+}
+
+func (x *ListBackupsResponse) GetBackups() []*Backup {
+ if x != nil {
+ return x.Backups
+ }
+ return nil
+}
+
+func (x *ListBackupsResponse) GetNextPageToken() string {
+ if x != nil {
+ return x.NextPageToken
+ }
+ return ""
+}
+
+// The request for
+// [ListBackupOperations][google.spanner.admin.database.v1.DatabaseAdmin.ListBackupOperations].
+type ListBackupOperationsRequest struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // Required. The instance of the backup operations. Values are of
+ // the form `projects/<project>/instances/<instance>`.
+ Parent string `protobuf:"bytes,1,opt,name=parent,proto3" json:"parent,omitempty"`
+ // An expression that filters the list of returned backup operations.
+ //
+ // A filter expression consists of a field name, a
+ // comparison operator, and a value for filtering.
+ // The value must be a string, a number, or a boolean. The comparison operator
+ // must be one of: `<`, `>`, `<=`, `>=`, `!=`, `=`, or `:`.
+ // Colon `:` is the contains operator. Filter rules are not case sensitive.
+ //
+ // The following fields in the [operation][google.longrunning.Operation]
+ // are eligible for filtering:
+ //
+ // - `name` - The name of the long-running operation
+ // - `done` - False if the operation is in progress, else true.
+ // - `metadata.@type` - the type of metadata. For example, the type string
+ // for
+ // [CreateBackupMetadata][google.spanner.admin.database.v1.CreateBackupMetadata]
+ // is
+ // `type.googleapis.com/google.spanner.admin.database.v1.CreateBackupMetadata`.
+ // - `metadata.<field_name>` - any field in metadata.value.
+ // `metadata.@type` must be specified first if filtering on metadata
+ // fields.
+ // - `error` - Error associated with the long-running operation.
+ // - `response.@type` - the type of response.
+ // - `response.<field_name>` - any field in response.value.
+ //
+ // You can combine multiple expressions by enclosing each expression in
+ // parentheses. By default, expressions are combined with AND logic, but
+ // you can specify AND, OR, and NOT logic explicitly.
+ //
+ // Here are a few examples:
+ //
+ // - `done:true` - The operation is complete.
+ // - `(metadata.@type=type.googleapis.com/google.spanner.admin.database.v1.CreateBackupMetadata) AND` \
+ // `metadata.database:prod` - Returns operations where:
+ // - The operation's metadata type is
+ // [CreateBackupMetadata][google.spanner.admin.database.v1.CreateBackupMetadata].
+ // - The source database name of backup contains the string "prod".
+ // - `(metadata.@type=type.googleapis.com/google.spanner.admin.database.v1.CreateBackupMetadata) AND` \
+ // `(metadata.name:howl) AND` \
+ // `(metadata.progress.start_time < \"2018-03-28T14:50:00Z\") AND` \
+ // `(error:*)` - Returns operations where:
+ // - The operation's metadata type is
+ // [CreateBackupMetadata][google.spanner.admin.database.v1.CreateBackupMetadata].
+ // - The backup name contains the string "howl".
+ // - The operation started before 2018-03-28T14:50:00Z.
+ // - The operation resulted in an error.
+ // - `(metadata.@type=type.googleapis.com/google.spanner.admin.database.v1.CopyBackupMetadata) AND` \
+ // `(metadata.source_backup:test) AND` \
+ // `(metadata.progress.start_time < \"2022-01-18T14:50:00Z\") AND` \
+ // `(error:*)` - Returns operations where:
+ // - The operation's metadata type is
+ // [CopyBackupMetadata][google.spanner.admin.database.v1.CopyBackupMetadata].
+ // - The source backup name contains the string "test".
+ // - The operation started before 2022-01-18T14:50:00Z.
+ // - The operation resulted in an error.
+ // - `((metadata.@type=type.googleapis.com/google.spanner.admin.database.v1.CreateBackupMetadata) AND` \
+ // `(metadata.database:test_db)) OR` \
+ // `((metadata.@type=type.googleapis.com/google.spanner.admin.database.v1.CopyBackupMetadata)
+ // AND` \
+ // `(metadata.source_backup:test_bkp)) AND` \
+ // `(error:*)` - Returns operations where:
+ // - The operation's metadata matches either of criteria:
+ // - The operation's metadata type is
+ // [CreateBackupMetadata][google.spanner.admin.database.v1.CreateBackupMetadata]
+ // AND the source database name of the backup contains the string
+ // "test_db"
+ // - The operation's metadata type is
+ // [CopyBackupMetadata][google.spanner.admin.database.v1.CopyBackupMetadata]
+ // AND the source backup name contains the string "test_bkp"
+ // - The operation resulted in an error.
+ Filter string `protobuf:"bytes,2,opt,name=filter,proto3" json:"filter,omitempty"`
+ // Number of operations to be returned in the response. If 0 or
+ // less, defaults to the server's maximum allowed page size.
+ PageSize int32 `protobuf:"varint,3,opt,name=page_size,json=pageSize,proto3" json:"page_size,omitempty"`
+ // If non-empty, `page_token` should contain a
+ // [next_page_token][google.spanner.admin.database.v1.ListBackupOperationsResponse.next_page_token]
+ // from a previous
+ // [ListBackupOperationsResponse][google.spanner.admin.database.v1.ListBackupOperationsResponse]
+ // to the same `parent` and with the same `filter`.
+ PageToken string `protobuf:"bytes,4,opt,name=page_token,json=pageToken,proto3" json:"page_token,omitempty"`
+}
+
+func (x *ListBackupOperationsRequest) Reset() {
+ *x = ListBackupOperationsRequest{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_google_spanner_admin_database_v1_backup_proto_msgTypes[10]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *ListBackupOperationsRequest) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*ListBackupOperationsRequest) ProtoMessage() {}
+
+func (x *ListBackupOperationsRequest) ProtoReflect() protoreflect.Message {
+ mi := &file_google_spanner_admin_database_v1_backup_proto_msgTypes[10]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use ListBackupOperationsRequest.ProtoReflect.Descriptor instead.
+func (*ListBackupOperationsRequest) Descriptor() ([]byte, []int) {
+ return file_google_spanner_admin_database_v1_backup_proto_rawDescGZIP(), []int{10}
+}
+
+func (x *ListBackupOperationsRequest) GetParent() string {
+ if x != nil {
+ return x.Parent
+ }
+ return ""
+}
+
+func (x *ListBackupOperationsRequest) GetFilter() string {
+ if x != nil {
+ return x.Filter
+ }
+ return ""
+}
+
+func (x *ListBackupOperationsRequest) GetPageSize() int32 {
+ if x != nil {
+ return x.PageSize
+ }
+ return 0
+}
+
+func (x *ListBackupOperationsRequest) GetPageToken() string {
+ if x != nil {
+ return x.PageToken
+ }
+ return ""
+}
+
+// The response for
+// [ListBackupOperations][google.spanner.admin.database.v1.DatabaseAdmin.ListBackupOperations].
+type ListBackupOperationsResponse struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // The list of matching backup [long-running
+ // operations][google.longrunning.Operation]. Each operation's name will be
+ // prefixed by the backup's name. The operation's
+ // [metadata][google.longrunning.Operation.metadata] field type
+ // `metadata.type_url` describes the type of the metadata. Operations returned
+ // include those that are pending or have completed/failed/canceled within the
+ // last 7 days. Operations returned are ordered by
+ // `operation.metadata.value.progress.start_time` in descending order starting
+ // from the most recently started operation.
+ Operations []*longrunningpb.Operation `protobuf:"bytes,1,rep,name=operations,proto3" json:"operations,omitempty"`
+ // `next_page_token` can be sent in a subsequent
+ // [ListBackupOperations][google.spanner.admin.database.v1.DatabaseAdmin.ListBackupOperations]
+ // call to fetch more of the matching metadata.
+ NextPageToken string `protobuf:"bytes,2,opt,name=next_page_token,json=nextPageToken,proto3" json:"next_page_token,omitempty"`
+}
+
+func (x *ListBackupOperationsResponse) Reset() {
+ *x = ListBackupOperationsResponse{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_google_spanner_admin_database_v1_backup_proto_msgTypes[11]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *ListBackupOperationsResponse) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*ListBackupOperationsResponse) ProtoMessage() {}
+
+func (x *ListBackupOperationsResponse) ProtoReflect() protoreflect.Message {
+ mi := &file_google_spanner_admin_database_v1_backup_proto_msgTypes[11]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use ListBackupOperationsResponse.ProtoReflect.Descriptor instead.
+func (*ListBackupOperationsResponse) Descriptor() ([]byte, []int) {
+ return file_google_spanner_admin_database_v1_backup_proto_rawDescGZIP(), []int{11}
+}
+
+func (x *ListBackupOperationsResponse) GetOperations() []*longrunningpb.Operation {
+ if x != nil {
+ return x.Operations
+ }
+ return nil
+}
+
+func (x *ListBackupOperationsResponse) GetNextPageToken() string {
+ if x != nil {
+ return x.NextPageToken
+ }
+ return ""
+}
+
+// Information about a backup.
+type BackupInfo struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // Name of the backup.
+ Backup string `protobuf:"bytes,1,opt,name=backup,proto3" json:"backup,omitempty"`
+ // The backup contains an externally consistent copy of `source_database` at
+ // the timestamp specified by `version_time`. If the
+ // [CreateBackup][google.spanner.admin.database.v1.DatabaseAdmin.CreateBackup]
+ // request did not specify `version_time`, the `version_time` of the backup is
+ // equivalent to the `create_time`.
+ VersionTime *timestamppb.Timestamp `protobuf:"bytes,4,opt,name=version_time,json=versionTime,proto3" json:"version_time,omitempty"`
+ // The time the
+ // [CreateBackup][google.spanner.admin.database.v1.DatabaseAdmin.CreateBackup]
+ // request was received.
+ CreateTime *timestamppb.Timestamp `protobuf:"bytes,2,opt,name=create_time,json=createTime,proto3" json:"create_time,omitempty"`
+ // Name of the database the backup was created from.
+ SourceDatabase string `protobuf:"bytes,3,opt,name=source_database,json=sourceDatabase,proto3" json:"source_database,omitempty"`
+}
+
+func (x *BackupInfo) Reset() {
+ *x = BackupInfo{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_google_spanner_admin_database_v1_backup_proto_msgTypes[12]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *BackupInfo) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*BackupInfo) ProtoMessage() {}
+
+func (x *BackupInfo) ProtoReflect() protoreflect.Message {
+ mi := &file_google_spanner_admin_database_v1_backup_proto_msgTypes[12]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use BackupInfo.ProtoReflect.Descriptor instead.
+func (*BackupInfo) Descriptor() ([]byte, []int) {
+ return file_google_spanner_admin_database_v1_backup_proto_rawDescGZIP(), []int{12}
+}
+
+func (x *BackupInfo) GetBackup() string {
+ if x != nil {
+ return x.Backup
+ }
+ return ""
+}
+
+func (x *BackupInfo) GetVersionTime() *timestamppb.Timestamp {
+ if x != nil {
+ return x.VersionTime
+ }
+ return nil
+}
+
+func (x *BackupInfo) GetCreateTime() *timestamppb.Timestamp {
+ if x != nil {
+ return x.CreateTime
+ }
+ return nil
+}
+
+func (x *BackupInfo) GetSourceDatabase() string {
+ if x != nil {
+ return x.SourceDatabase
+ }
+ return ""
+}
+
+// Encryption configuration for the backup to create.
+type CreateBackupEncryptionConfig struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // Required. The encryption type of the backup.
+ EncryptionType CreateBackupEncryptionConfig_EncryptionType `protobuf:"varint,1,opt,name=encryption_type,json=encryptionType,proto3,enum=google.spanner.admin.database.v1.CreateBackupEncryptionConfig_EncryptionType" json:"encryption_type,omitempty"`
+ // Optional. The Cloud KMS key that will be used to protect the backup.
+ // This field should be set only when
+ // [encryption_type][google.spanner.admin.database.v1.CreateBackupEncryptionConfig.encryption_type]
+ // is `CUSTOMER_MANAGED_ENCRYPTION`. Values are of the form
+ // `projects/<project>/locations/<location>/keyRings/<key_ring>/cryptoKeys/<kms_key_name>`.
+ KmsKeyName string `protobuf:"bytes,2,opt,name=kms_key_name,json=kmsKeyName,proto3" json:"kms_key_name,omitempty"`
+ // Optional. Specifies the KMS configuration for the one or more keys used to
+ // protect the backup. Values are of the form
+ // `projects/<project>/locations/<location>/keyRings/<key_ring>/cryptoKeys/<kms_key_name>`.
+ //
+ // The keys referenced by kms_key_names must fully cover all
+ // regions of the backup's instance configuration. Some examples:
+ // * For single region instance configs, specify a single regional
+ // location KMS key.
+ // * For multi-regional instance configs of type GOOGLE_MANAGED,
+ // either specify a multi-regional location KMS key or multiple regional
+ // location KMS keys that cover all regions in the instance config.
+ // * For an instance config of type USER_MANAGED, please specify only
+ // regional location KMS keys to cover each region in the instance config.
+ // Multi-regional location KMS keys are not supported for USER_MANAGED
+ // instance configs.
+ KmsKeyNames []string `protobuf:"bytes,3,rep,name=kms_key_names,json=kmsKeyNames,proto3" json:"kms_key_names,omitempty"`
+}
+
+func (x *CreateBackupEncryptionConfig) Reset() {
+ *x = CreateBackupEncryptionConfig{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_google_spanner_admin_database_v1_backup_proto_msgTypes[13]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *CreateBackupEncryptionConfig) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*CreateBackupEncryptionConfig) ProtoMessage() {}
+
+func (x *CreateBackupEncryptionConfig) ProtoReflect() protoreflect.Message {
+ mi := &file_google_spanner_admin_database_v1_backup_proto_msgTypes[13]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use CreateBackupEncryptionConfig.ProtoReflect.Descriptor instead.
+func (*CreateBackupEncryptionConfig) Descriptor() ([]byte, []int) {
+ return file_google_spanner_admin_database_v1_backup_proto_rawDescGZIP(), []int{13}
+}
+
+func (x *CreateBackupEncryptionConfig) GetEncryptionType() CreateBackupEncryptionConfig_EncryptionType {
+ if x != nil {
+ return x.EncryptionType
+ }
+ return CreateBackupEncryptionConfig_ENCRYPTION_TYPE_UNSPECIFIED
+}
+
+func (x *CreateBackupEncryptionConfig) GetKmsKeyName() string {
+ if x != nil {
+ return x.KmsKeyName
+ }
+ return ""
+}
+
+func (x *CreateBackupEncryptionConfig) GetKmsKeyNames() []string {
+ if x != nil {
+ return x.KmsKeyNames
+ }
+ return nil
+}
+
+// Encryption configuration for the copied backup.
+type CopyBackupEncryptionConfig struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // Required. The encryption type of the backup.
+ EncryptionType CopyBackupEncryptionConfig_EncryptionType `protobuf:"varint,1,opt,name=encryption_type,json=encryptionType,proto3,enum=google.spanner.admin.database.v1.CopyBackupEncryptionConfig_EncryptionType" json:"encryption_type,omitempty"`
+ // Optional. The Cloud KMS key that will be used to protect the backup.
+ // This field should be set only when
+ // [encryption_type][google.spanner.admin.database.v1.CopyBackupEncryptionConfig.encryption_type]
+ // is `CUSTOMER_MANAGED_ENCRYPTION`. Values are of the form
+ // `projects/<project>/locations/<location>/keyRings/<key_ring>/cryptoKeys/<kms_key_name>`.
+ KmsKeyName string `protobuf:"bytes,2,opt,name=kms_key_name,json=kmsKeyName,proto3" json:"kms_key_name,omitempty"`
+ // Optional. Specifies the KMS configuration for the one or more keys used to
+ // protect the backup. Values are of the form
+ // `projects/<project>/locations/<location>/keyRings/<key_ring>/cryptoKeys/<kms_key_name>`.
+ // Kms keys specified can be in any order.
+ //
+ // The keys referenced by kms_key_names must fully cover all
+ // regions of the backup's instance configuration. Some examples:
+ // * For single region instance configs, specify a single regional
+ // location KMS key.
+ // * For multi-regional instance configs of type GOOGLE_MANAGED,
+ // either specify a multi-regional location KMS key or multiple regional
+ // location KMS keys that cover all regions in the instance config.
+ // * For an instance config of type USER_MANAGED, please specify only
+ // regional location KMS keys to cover each region in the instance config.
+ // Multi-regional location KMS keys are not supported for USER_MANAGED
+ // instance configs.
+ KmsKeyNames []string `protobuf:"bytes,3,rep,name=kms_key_names,json=kmsKeyNames,proto3" json:"kms_key_names,omitempty"`
+}
+
+func (x *CopyBackupEncryptionConfig) Reset() {
+ *x = CopyBackupEncryptionConfig{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_google_spanner_admin_database_v1_backup_proto_msgTypes[14]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *CopyBackupEncryptionConfig) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*CopyBackupEncryptionConfig) ProtoMessage() {}
+
+func (x *CopyBackupEncryptionConfig) ProtoReflect() protoreflect.Message {
+ mi := &file_google_spanner_admin_database_v1_backup_proto_msgTypes[14]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use CopyBackupEncryptionConfig.ProtoReflect.Descriptor instead.
+func (*CopyBackupEncryptionConfig) Descriptor() ([]byte, []int) {
+ return file_google_spanner_admin_database_v1_backup_proto_rawDescGZIP(), []int{14}
+}
+
+func (x *CopyBackupEncryptionConfig) GetEncryptionType() CopyBackupEncryptionConfig_EncryptionType {
+ if x != nil {
+ return x.EncryptionType
+ }
+ return CopyBackupEncryptionConfig_ENCRYPTION_TYPE_UNSPECIFIED
+}
+
+func (x *CopyBackupEncryptionConfig) GetKmsKeyName() string {
+ if x != nil {
+ return x.KmsKeyName
+ }
+ return ""
+}
+
+func (x *CopyBackupEncryptionConfig) GetKmsKeyNames() []string {
+ if x != nil {
+ return x.KmsKeyNames
+ }
+ return nil
+}
+
+// The specification for full backups.
+// A full backup stores the entire contents of the database at a given
+// version time.
+type FullBackupSpec struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+}
+
+func (x *FullBackupSpec) Reset() {
+ *x = FullBackupSpec{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_google_spanner_admin_database_v1_backup_proto_msgTypes[15]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *FullBackupSpec) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*FullBackupSpec) ProtoMessage() {}
+
+func (x *FullBackupSpec) ProtoReflect() protoreflect.Message {
+ mi := &file_google_spanner_admin_database_v1_backup_proto_msgTypes[15]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use FullBackupSpec.ProtoReflect.Descriptor instead.
+func (*FullBackupSpec) Descriptor() ([]byte, []int) {
+ return file_google_spanner_admin_database_v1_backup_proto_rawDescGZIP(), []int{15}
+}
+
+// The specification for incremental backup chains.
+// An incremental backup stores the delta of changes between a previous
+// backup and the database contents at a given version time. An
+// incremental backup chain consists of a full backup and zero or more
+// successive incremental backups. The first backup created for an
+// incremental backup chain is always a full backup.
+type IncrementalBackupSpec struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+}
+
+func (x *IncrementalBackupSpec) Reset() {
+ *x = IncrementalBackupSpec{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_google_spanner_admin_database_v1_backup_proto_msgTypes[16]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *IncrementalBackupSpec) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*IncrementalBackupSpec) ProtoMessage() {}
+
+func (x *IncrementalBackupSpec) ProtoReflect() protoreflect.Message {
+ mi := &file_google_spanner_admin_database_v1_backup_proto_msgTypes[16]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use IncrementalBackupSpec.ProtoReflect.Descriptor instead.
+func (*IncrementalBackupSpec) Descriptor() ([]byte, []int) {
+ return file_google_spanner_admin_database_v1_backup_proto_rawDescGZIP(), []int{16}
+}
+
+var File_google_spanner_admin_database_v1_backup_proto protoreflect.FileDescriptor
+
+var file_google_spanner_admin_database_v1_backup_proto_rawDesc = []byte{
+ 0x0a, 0x2d, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x73, 0x70, 0x61, 0x6e, 0x6e, 0x65, 0x72,
+ 0x2f, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2f, 0x64, 0x61, 0x74, 0x61, 0x62, 0x61, 0x73, 0x65, 0x2f,
+ 0x76, 0x31, 0x2f, 0x62, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12,
+ 0x20, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x70, 0x61, 0x6e, 0x6e, 0x65, 0x72, 0x2e,
+ 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x64, 0x61, 0x74, 0x61, 0x62, 0x61, 0x73, 0x65, 0x2e, 0x76,
+ 0x31, 0x1a, 0x1f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x66, 0x69,
+ 0x65, 0x6c, 0x64, 0x5f, 0x62, 0x65, 0x68, 0x61, 0x76, 0x69, 0x6f, 0x72, 0x2e, 0x70, 0x72, 0x6f,
+ 0x74, 0x6f, 0x1a, 0x19, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x72,
+ 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x23, 0x67,
+ 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x6c, 0x6f, 0x6e, 0x67, 0x72, 0x75, 0x6e, 0x6e, 0x69, 0x6e,
+ 0x67, 0x2f, 0x6f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x70, 0x72, 0x6f,
+ 0x74, 0x6f, 0x1a, 0x20, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f,
+ 0x62, 0x75, 0x66, 0x2f, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x5f, 0x6d, 0x61, 0x73, 0x6b, 0x2e, 0x70,
+ 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f,
+ 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x2e,
+ 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x2d, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x73, 0x70,
+ 0x61, 0x6e, 0x6e, 0x65, 0x72, 0x2f, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2f, 0x64, 0x61, 0x74, 0x61,
+ 0x62, 0x61, 0x73, 0x65, 0x2f, 0x76, 0x31, 0x2f, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x2e, 0x70,
+ 0x72, 0x6f, 0x74, 0x6f, 0x22, 0xad, 0x0b, 0x0a, 0x06, 0x42, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x12,
+ 0x40, 0x0a, 0x08, 0x64, 0x61, 0x74, 0x61, 0x62, 0x61, 0x73, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28,
+ 0x09, 0x42, 0x24, 0xfa, 0x41, 0x21, 0x0a, 0x1f, 0x73, 0x70, 0x61, 0x6e, 0x6e, 0x65, 0x72, 0x2e,
+ 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x44,
+ 0x61, 0x74, 0x61, 0x62, 0x61, 0x73, 0x65, 0x52, 0x08, 0x64, 0x61, 0x74, 0x61, 0x62, 0x61, 0x73,
+ 0x65, 0x12, 0x3d, 0x0a, 0x0c, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x5f, 0x74, 0x69, 0x6d,
+ 0x65, 0x18, 0x09, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65,
+ 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74,
+ 0x61, 0x6d, 0x70, 0x52, 0x0b, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x54, 0x69, 0x6d, 0x65,
+ 0x12, 0x3b, 0x0a, 0x0b, 0x65, 0x78, 0x70, 0x69, 0x72, 0x65, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x18,
+ 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70,
+ 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d,
+ 0x70, 0x52, 0x0a, 0x65, 0x78, 0x70, 0x69, 0x72, 0x65, 0x54, 0x69, 0x6d, 0x65, 0x12, 0x12, 0x0a,
+ 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d,
+ 0x65, 0x12, 0x40, 0x0a, 0x0b, 0x63, 0x72, 0x65, 0x61, 0x74, 0x65, 0x5f, 0x74, 0x69, 0x6d, 0x65,
+ 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e,
+ 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61,
+ 0x6d, 0x70, 0x42, 0x03, 0xe0, 0x41, 0x03, 0x52, 0x0a, 0x63, 0x72, 0x65, 0x61, 0x74, 0x65, 0x54,
+ 0x69, 0x6d, 0x65, 0x12, 0x22, 0x0a, 0x0a, 0x73, 0x69, 0x7a, 0x65, 0x5f, 0x62, 0x79, 0x74, 0x65,
+ 0x73, 0x18, 0x05, 0x20, 0x01, 0x28, 0x03, 0x42, 0x03, 0xe0, 0x41, 0x03, 0x52, 0x09, 0x73, 0x69,
+ 0x7a, 0x65, 0x42, 0x79, 0x74, 0x65, 0x73, 0x12, 0x33, 0x0a, 0x13, 0x66, 0x72, 0x65, 0x65, 0x61,
+ 0x62, 0x6c, 0x65, 0x5f, 0x73, 0x69, 0x7a, 0x65, 0x5f, 0x62, 0x79, 0x74, 0x65, 0x73, 0x18, 0x0f,
+ 0x20, 0x01, 0x28, 0x03, 0x42, 0x03, 0xe0, 0x41, 0x03, 0x52, 0x11, 0x66, 0x72, 0x65, 0x65, 0x61,
+ 0x62, 0x6c, 0x65, 0x53, 0x69, 0x7a, 0x65, 0x42, 0x79, 0x74, 0x65, 0x73, 0x12, 0x35, 0x0a, 0x14,
+ 0x65, 0x78, 0x63, 0x6c, 0x75, 0x73, 0x69, 0x76, 0x65, 0x5f, 0x73, 0x69, 0x7a, 0x65, 0x5f, 0x62,
+ 0x79, 0x74, 0x65, 0x73, 0x18, 0x10, 0x20, 0x01, 0x28, 0x03, 0x42, 0x03, 0xe0, 0x41, 0x03, 0x52,
+ 0x12, 0x65, 0x78, 0x63, 0x6c, 0x75, 0x73, 0x69, 0x76, 0x65, 0x53, 0x69, 0x7a, 0x65, 0x42, 0x79,
+ 0x74, 0x65, 0x73, 0x12, 0x49, 0x0a, 0x05, 0x73, 0x74, 0x61, 0x74, 0x65, 0x18, 0x06, 0x20, 0x01,
+ 0x28, 0x0e, 0x32, 0x2e, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x70, 0x61, 0x6e,
+ 0x6e, 0x65, 0x72, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x64, 0x61, 0x74, 0x61, 0x62, 0x61,
+ 0x73, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x42, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x2e, 0x53, 0x74, 0x61,
+ 0x74, 0x65, 0x42, 0x03, 0xe0, 0x41, 0x03, 0x52, 0x05, 0x73, 0x74, 0x61, 0x74, 0x65, 0x12, 0x5c,
+ 0x0a, 0x15, 0x72, 0x65, 0x66, 0x65, 0x72, 0x65, 0x6e, 0x63, 0x69, 0x6e, 0x67, 0x5f, 0x64, 0x61,
+ 0x74, 0x61, 0x62, 0x61, 0x73, 0x65, 0x73, 0x18, 0x07, 0x20, 0x03, 0x28, 0x09, 0x42, 0x27, 0xe0,
+ 0x41, 0x03, 0xfa, 0x41, 0x21, 0x0a, 0x1f, 0x73, 0x70, 0x61, 0x6e, 0x6e, 0x65, 0x72, 0x2e, 0x67,
+ 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x44, 0x61,
+ 0x74, 0x61, 0x62, 0x61, 0x73, 0x65, 0x52, 0x14, 0x72, 0x65, 0x66, 0x65, 0x72, 0x65, 0x6e, 0x63,
+ 0x69, 0x6e, 0x67, 0x44, 0x61, 0x74, 0x61, 0x62, 0x61, 0x73, 0x65, 0x73, 0x12, 0x5e, 0x0a, 0x0f,
+ 0x65, 0x6e, 0x63, 0x72, 0x79, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x69, 0x6e, 0x66, 0x6f, 0x18,
+ 0x08, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x30, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73,
+ 0x70, 0x61, 0x6e, 0x6e, 0x65, 0x72, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x64, 0x61, 0x74,
+ 0x61, 0x62, 0x61, 0x73, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x45, 0x6e, 0x63, 0x72, 0x79, 0x70, 0x74,
+ 0x69, 0x6f, 0x6e, 0x49, 0x6e, 0x66, 0x6f, 0x42, 0x03, 0xe0, 0x41, 0x03, 0x52, 0x0e, 0x65, 0x6e,
+ 0x63, 0x72, 0x79, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x49, 0x6e, 0x66, 0x6f, 0x12, 0x6c, 0x0a, 0x16,
+ 0x65, 0x6e, 0x63, 0x72, 0x79, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x69, 0x6e, 0x66, 0x6f, 0x72,
+ 0x6d, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x0d, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x30, 0x2e, 0x67,
+ 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x70, 0x61, 0x6e, 0x6e, 0x65, 0x72, 0x2e, 0x61, 0x64,
+ 0x6d, 0x69, 0x6e, 0x2e, 0x64, 0x61, 0x74, 0x61, 0x62, 0x61, 0x73, 0x65, 0x2e, 0x76, 0x31, 0x2e,
+ 0x45, 0x6e, 0x63, 0x72, 0x79, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x49, 0x6e, 0x66, 0x6f, 0x42, 0x03,
+ 0xe0, 0x41, 0x03, 0x52, 0x15, 0x65, 0x6e, 0x63, 0x72, 0x79, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x49,
+ 0x6e, 0x66, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x61, 0x0a, 0x10, 0x64, 0x61,
+ 0x74, 0x61, 0x62, 0x61, 0x73, 0x65, 0x5f, 0x64, 0x69, 0x61, 0x6c, 0x65, 0x63, 0x74, 0x18, 0x0a,
+ 0x20, 0x01, 0x28, 0x0e, 0x32, 0x31, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x70,
+ 0x61, 0x6e, 0x6e, 0x65, 0x72, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x64, 0x61, 0x74, 0x61,
+ 0x62, 0x61, 0x73, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x44, 0x61, 0x74, 0x61, 0x62, 0x61, 0x73, 0x65,
+ 0x44, 0x69, 0x61, 0x6c, 0x65, 0x63, 0x74, 0x42, 0x03, 0xe0, 0x41, 0x03, 0x52, 0x0f, 0x64, 0x61,
+ 0x74, 0x61, 0x62, 0x61, 0x73, 0x65, 0x44, 0x69, 0x61, 0x6c, 0x65, 0x63, 0x74, 0x12, 0x56, 0x0a,
+ 0x13, 0x72, 0x65, 0x66, 0x65, 0x72, 0x65, 0x6e, 0x63, 0x69, 0x6e, 0x67, 0x5f, 0x62, 0x61, 0x63,
+ 0x6b, 0x75, 0x70, 0x73, 0x18, 0x0b, 0x20, 0x03, 0x28, 0x09, 0x42, 0x25, 0xe0, 0x41, 0x03, 0xfa,
+ 0x41, 0x1f, 0x0a, 0x1d, 0x73, 0x70, 0x61, 0x6e, 0x6e, 0x65, 0x72, 0x2e, 0x67, 0x6f, 0x6f, 0x67,
+ 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x42, 0x61, 0x63, 0x6b, 0x75,
+ 0x70, 0x52, 0x12, 0x72, 0x65, 0x66, 0x65, 0x72, 0x65, 0x6e, 0x63, 0x69, 0x6e, 0x67, 0x42, 0x61,
+ 0x63, 0x6b, 0x75, 0x70, 0x73, 0x12, 0x47, 0x0a, 0x0f, 0x6d, 0x61, 0x78, 0x5f, 0x65, 0x78, 0x70,
+ 0x69, 0x72, 0x65, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x18, 0x0c, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a,
+ 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66,
+ 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x42, 0x03, 0xe0, 0x41, 0x03, 0x52,
+ 0x0d, 0x6d, 0x61, 0x78, 0x45, 0x78, 0x70, 0x69, 0x72, 0x65, 0x54, 0x69, 0x6d, 0x65, 0x12, 0x58,
+ 0x0a, 0x10, 0x62, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x5f, 0x73, 0x63, 0x68, 0x65, 0x64, 0x75, 0x6c,
+ 0x65, 0x73, 0x18, 0x0e, 0x20, 0x03, 0x28, 0x09, 0x42, 0x2d, 0xe0, 0x41, 0x03, 0xfa, 0x41, 0x27,
+ 0x0a, 0x25, 0x73, 0x70, 0x61, 0x6e, 0x6e, 0x65, 0x72, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65,
+ 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x42, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x53,
+ 0x63, 0x68, 0x65, 0x64, 0x75, 0x6c, 0x65, 0x52, 0x0f, 0x62, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x53,
+ 0x63, 0x68, 0x65, 0x64, 0x75, 0x6c, 0x65, 0x73, 0x12, 0x42, 0x0a, 0x1b, 0x69, 0x6e, 0x63, 0x72,
+ 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x61, 0x6c, 0x5f, 0x62, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x5f, 0x63,
+ 0x68, 0x61, 0x69, 0x6e, 0x5f, 0x69, 0x64, 0x18, 0x11, 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0,
+ 0x41, 0x03, 0x52, 0x18, 0x69, 0x6e, 0x63, 0x72, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x61, 0x6c, 0x42,
+ 0x61, 0x63, 0x6b, 0x75, 0x70, 0x43, 0x68, 0x61, 0x69, 0x6e, 0x49, 0x64, 0x12, 0x4f, 0x0a, 0x13,
+ 0x6f, 0x6c, 0x64, 0x65, 0x73, 0x74, 0x5f, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x5f, 0x74,
+ 0x69, 0x6d, 0x65, 0x18, 0x12, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67,
+ 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65,
+ 0x73, 0x74, 0x61, 0x6d, 0x70, 0x42, 0x03, 0xe0, 0x41, 0x03, 0x52, 0x11, 0x6f, 0x6c, 0x64, 0x65,
+ 0x73, 0x74, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x54, 0x69, 0x6d, 0x65, 0x22, 0x37, 0x0a,
+ 0x05, 0x53, 0x74, 0x61, 0x74, 0x65, 0x12, 0x15, 0x0a, 0x11, 0x53, 0x54, 0x41, 0x54, 0x45, 0x5f,
+ 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, 0x49, 0x45, 0x44, 0x10, 0x00, 0x12, 0x0c, 0x0a,
+ 0x08, 0x43, 0x52, 0x45, 0x41, 0x54, 0x49, 0x4e, 0x47, 0x10, 0x01, 0x12, 0x09, 0x0a, 0x05, 0x52,
+ 0x45, 0x41, 0x44, 0x59, 0x10, 0x02, 0x3a, 0x5c, 0xea, 0x41, 0x59, 0x0a, 0x1d, 0x73, 0x70, 0x61,
+ 0x6e, 0x6e, 0x65, 0x72, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e,
+ 0x63, 0x6f, 0x6d, 0x2f, 0x42, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x12, 0x38, 0x70, 0x72, 0x6f, 0x6a,
+ 0x65, 0x63, 0x74, 0x73, 0x2f, 0x7b, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x7d, 0x2f, 0x69,
+ 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x73, 0x2f, 0x7b, 0x69, 0x6e, 0x73, 0x74, 0x61, 0x6e,
+ 0x63, 0x65, 0x7d, 0x2f, 0x62, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x73, 0x2f, 0x7b, 0x62, 0x61, 0x63,
+ 0x6b, 0x75, 0x70, 0x7d, 0x22, 0xb1, 0x02, 0x0a, 0x13, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x42,
+ 0x61, 0x63, 0x6b, 0x75, 0x70, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x3f, 0x0a, 0x06,
+ 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x27, 0xe0, 0x41,
+ 0x02, 0xfa, 0x41, 0x21, 0x0a, 0x1f, 0x73, 0x70, 0x61, 0x6e, 0x6e, 0x65, 0x72, 0x2e, 0x67, 0x6f,
+ 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x49, 0x6e, 0x73,
+ 0x74, 0x61, 0x6e, 0x63, 0x65, 0x52, 0x06, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x12, 0x20, 0x0a,
+ 0x09, 0x62, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x5f, 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09,
+ 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x08, 0x62, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x49, 0x64, 0x12,
+ 0x45, 0x0a, 0x06, 0x62, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32,
+ 0x28, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x70, 0x61, 0x6e, 0x6e, 0x65, 0x72,
+ 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x64, 0x61, 0x74, 0x61, 0x62, 0x61, 0x73, 0x65, 0x2e,
+ 0x76, 0x31, 0x2e, 0x42, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x06,
+ 0x62, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x12, 0x70, 0x0a, 0x11, 0x65, 0x6e, 0x63, 0x72, 0x79, 0x70,
+ 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x18, 0x04, 0x20, 0x01, 0x28,
+ 0x0b, 0x32, 0x3e, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x70, 0x61, 0x6e, 0x6e,
+ 0x65, 0x72, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x64, 0x61, 0x74, 0x61, 0x62, 0x61, 0x73,
+ 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x42, 0x61, 0x63, 0x6b, 0x75,
+ 0x70, 0x45, 0x6e, 0x63, 0x72, 0x79, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x6f, 0x6e, 0x66, 0x69,
+ 0x67, 0x42, 0x03, 0xe0, 0x41, 0x01, 0x52, 0x10, 0x65, 0x6e, 0x63, 0x72, 0x79, 0x70, 0x74, 0x69,
+ 0x6f, 0x6e, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x22, 0x9e, 0x02, 0x0a, 0x14, 0x43, 0x72, 0x65,
+ 0x61, 0x74, 0x65, 0x42, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74,
+ 0x61, 0x12, 0x36, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42,
+ 0x22, 0xfa, 0x41, 0x1f, 0x0a, 0x1d, 0x73, 0x70, 0x61, 0x6e, 0x6e, 0x65, 0x72, 0x2e, 0x67, 0x6f,
+ 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x42, 0x61, 0x63,
+ 0x6b, 0x75, 0x70, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x40, 0x0a, 0x08, 0x64, 0x61, 0x74,
+ 0x61, 0x62, 0x61, 0x73, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x42, 0x24, 0xfa, 0x41, 0x21,
+ 0x0a, 0x1f, 0x73, 0x70, 0x61, 0x6e, 0x6e, 0x65, 0x72, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65,
+ 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x44, 0x61, 0x74, 0x61, 0x62, 0x61, 0x73,
+ 0x65, 0x52, 0x08, 0x64, 0x61, 0x74, 0x61, 0x62, 0x61, 0x73, 0x65, 0x12, 0x4f, 0x0a, 0x08, 0x70,
+ 0x72, 0x6f, 0x67, 0x72, 0x65, 0x73, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x33, 0x2e,
+ 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x70, 0x61, 0x6e, 0x6e, 0x65, 0x72, 0x2e, 0x61,
+ 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x64, 0x61, 0x74, 0x61, 0x62, 0x61, 0x73, 0x65, 0x2e, 0x76, 0x31,
+ 0x2e, 0x4f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x50, 0x72, 0x6f, 0x67, 0x72, 0x65,
+ 0x73, 0x73, 0x52, 0x08, 0x70, 0x72, 0x6f, 0x67, 0x72, 0x65, 0x73, 0x73, 0x12, 0x3b, 0x0a, 0x0b,
+ 0x63, 0x61, 0x6e, 0x63, 0x65, 0x6c, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28,
+ 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f,
+ 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x0a, 0x63,
+ 0x61, 0x6e, 0x63, 0x65, 0x6c, 0x54, 0x69, 0x6d, 0x65, 0x22, 0xf4, 0x02, 0x0a, 0x11, 0x43, 0x6f,
+ 0x70, 0x79, 0x42, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12,
+ 0x3f, 0x0a, 0x06, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42,
+ 0x27, 0xe0, 0x41, 0x02, 0xfa, 0x41, 0x21, 0x0a, 0x1f, 0x73, 0x70, 0x61, 0x6e, 0x6e, 0x65, 0x72,
+ 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f,
+ 0x49, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x52, 0x06, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74,
+ 0x12, 0x20, 0x0a, 0x09, 0x62, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x5f, 0x69, 0x64, 0x18, 0x02, 0x20,
+ 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x08, 0x62, 0x61, 0x63, 0x6b, 0x75, 0x70,
+ 0x49, 0x64, 0x12, 0x4a, 0x0a, 0x0d, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x62, 0x61, 0x63,
+ 0x6b, 0x75, 0x70, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x42, 0x25, 0xe0, 0x41, 0x02, 0xfa, 0x41,
+ 0x1f, 0x0a, 0x1d, 0x73, 0x70, 0x61, 0x6e, 0x6e, 0x65, 0x72, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c,
+ 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x42, 0x61, 0x63, 0x6b, 0x75, 0x70,
+ 0x52, 0x0c, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x42, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x12, 0x40,
+ 0x0a, 0x0b, 0x65, 0x78, 0x70, 0x69, 0x72, 0x65, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x18, 0x04, 0x20,
+ 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f,
+ 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x42,
+ 0x03, 0xe0, 0x41, 0x02, 0x52, 0x0a, 0x65, 0x78, 0x70, 0x69, 0x72, 0x65, 0x54, 0x69, 0x6d, 0x65,
+ 0x12, 0x6e, 0x0a, 0x11, 0x65, 0x6e, 0x63, 0x72, 0x79, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x63,
+ 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x3c, 0x2e, 0x67, 0x6f,
+ 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x70, 0x61, 0x6e, 0x6e, 0x65, 0x72, 0x2e, 0x61, 0x64, 0x6d,
+ 0x69, 0x6e, 0x2e, 0x64, 0x61, 0x74, 0x61, 0x62, 0x61, 0x73, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x43,
+ 0x6f, 0x70, 0x79, 0x42, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x45, 0x6e, 0x63, 0x72, 0x79, 0x70, 0x74,
+ 0x69, 0x6f, 0x6e, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x42, 0x03, 0xe0, 0x41, 0x01, 0x52, 0x10,
+ 0x65, 0x6e, 0x63, 0x72, 0x79, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67,
+ 0x22, 0xa3, 0x02, 0x0a, 0x12, 0x43, 0x6f, 0x70, 0x79, 0x42, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x4d,
+ 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x12, 0x36, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18,
+ 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x22, 0xfa, 0x41, 0x1f, 0x0a, 0x1d, 0x73, 0x70, 0x61, 0x6e,
+ 0x6e, 0x65, 0x72, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63,
+ 0x6f, 0x6d, 0x2f, 0x42, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12,
+ 0x47, 0x0a, 0x0d, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x62, 0x61, 0x63, 0x6b, 0x75, 0x70,
+ 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x42, 0x22, 0xfa, 0x41, 0x1f, 0x0a, 0x1d, 0x73, 0x70, 0x61,
+ 0x6e, 0x6e, 0x65, 0x72, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e,
+ 0x63, 0x6f, 0x6d, 0x2f, 0x42, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x52, 0x0c, 0x73, 0x6f, 0x75, 0x72,
+ 0x63, 0x65, 0x42, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x12, 0x4f, 0x0a, 0x08, 0x70, 0x72, 0x6f, 0x67,
+ 0x72, 0x65, 0x73, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x33, 0x2e, 0x67, 0x6f, 0x6f,
+ 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x70, 0x61, 0x6e, 0x6e, 0x65, 0x72, 0x2e, 0x61, 0x64, 0x6d, 0x69,
+ 0x6e, 0x2e, 0x64, 0x61, 0x74, 0x61, 0x62, 0x61, 0x73, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x4f, 0x70,
+ 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x50, 0x72, 0x6f, 0x67, 0x72, 0x65, 0x73, 0x73, 0x52,
+ 0x08, 0x70, 0x72, 0x6f, 0x67, 0x72, 0x65, 0x73, 0x73, 0x12, 0x3b, 0x0a, 0x0b, 0x63, 0x61, 0x6e,
+ 0x63, 0x65, 0x6c, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a,
+ 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66,
+ 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x0a, 0x63, 0x61, 0x6e, 0x63,
+ 0x65, 0x6c, 0x54, 0x69, 0x6d, 0x65, 0x22, 0x9e, 0x01, 0x0a, 0x13, 0x55, 0x70, 0x64, 0x61, 0x74,
+ 0x65, 0x42, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x45,
+ 0x0a, 0x06, 0x62, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x28,
+ 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x70, 0x61, 0x6e, 0x6e, 0x65, 0x72, 0x2e,
+ 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x64, 0x61, 0x74, 0x61, 0x62, 0x61, 0x73, 0x65, 0x2e, 0x76,
+ 0x31, 0x2e, 0x42, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x06, 0x62,
+ 0x61, 0x63, 0x6b, 0x75, 0x70, 0x12, 0x40, 0x0a, 0x0b, 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, 0x5f,
+ 0x6d, 0x61, 0x73, 0x6b, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f,
+ 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65,
+ 0x6c, 0x64, 0x4d, 0x61, 0x73, 0x6b, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x0a, 0x75, 0x70, 0x64,
+ 0x61, 0x74, 0x65, 0x4d, 0x61, 0x73, 0x6b, 0x22, 0x4d, 0x0a, 0x10, 0x47, 0x65, 0x74, 0x42, 0x61,
+ 0x63, 0x6b, 0x75, 0x70, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x39, 0x0a, 0x04, 0x6e,
+ 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x25, 0xe0, 0x41, 0x02, 0xfa, 0x41,
+ 0x1f, 0x0a, 0x1d, 0x73, 0x70, 0x61, 0x6e, 0x6e, 0x65, 0x72, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c,
+ 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x42, 0x61, 0x63, 0x6b, 0x75, 0x70,
+ 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x22, 0x50, 0x0a, 0x13, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65,
+ 0x42, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x39, 0x0a,
+ 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x25, 0xe0, 0x41, 0x02,
+ 0xfa, 0x41, 0x1f, 0x0a, 0x1d, 0x73, 0x70, 0x61, 0x6e, 0x6e, 0x65, 0x72, 0x2e, 0x67, 0x6f, 0x6f,
+ 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x42, 0x61, 0x63, 0x6b,
+ 0x75, 0x70, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x22, 0xa9, 0x01, 0x0a, 0x12, 0x4c, 0x69, 0x73,
+ 0x74, 0x42, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12,
+ 0x3f, 0x0a, 0x06, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42,
+ 0x27, 0xe0, 0x41, 0x02, 0xfa, 0x41, 0x21, 0x0a, 0x1f, 0x73, 0x70, 0x61, 0x6e, 0x6e, 0x65, 0x72,
+ 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f,
+ 0x49, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x52, 0x06, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74,
+ 0x12, 0x16, 0x0a, 0x06, 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09,
+ 0x52, 0x06, 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x12, 0x1b, 0x0a, 0x09, 0x70, 0x61, 0x67, 0x65,
+ 0x5f, 0x73, 0x69, 0x7a, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x05, 0x52, 0x08, 0x70, 0x61, 0x67,
+ 0x65, 0x53, 0x69, 0x7a, 0x65, 0x12, 0x1d, 0x0a, 0x0a, 0x70, 0x61, 0x67, 0x65, 0x5f, 0x74, 0x6f,
+ 0x6b, 0x65, 0x6e, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x70, 0x61, 0x67, 0x65, 0x54,
+ 0x6f, 0x6b, 0x65, 0x6e, 0x22, 0x81, 0x01, 0x0a, 0x13, 0x4c, 0x69, 0x73, 0x74, 0x42, 0x61, 0x63,
+ 0x6b, 0x75, 0x70, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x42, 0x0a, 0x07,
+ 0x62, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x28, 0x2e,
+ 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x70, 0x61, 0x6e, 0x6e, 0x65, 0x72, 0x2e, 0x61,
+ 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x64, 0x61, 0x74, 0x61, 0x62, 0x61, 0x73, 0x65, 0x2e, 0x76, 0x31,
+ 0x2e, 0x42, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x52, 0x07, 0x62, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x73,
+ 0x12, 0x26, 0x0a, 0x0f, 0x6e, 0x65, 0x78, 0x74, 0x5f, 0x70, 0x61, 0x67, 0x65, 0x5f, 0x74, 0x6f,
+ 0x6b, 0x65, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0d, 0x6e, 0x65, 0x78, 0x74, 0x50,
+ 0x61, 0x67, 0x65, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x22, 0xb2, 0x01, 0x0a, 0x1b, 0x4c, 0x69, 0x73,
+ 0x74, 0x42, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x4f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e,
+ 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x3f, 0x0a, 0x06, 0x70, 0x61, 0x72, 0x65,
+ 0x6e, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x27, 0xe0, 0x41, 0x02, 0xfa, 0x41, 0x21,
+ 0x0a, 0x1f, 0x73, 0x70, 0x61, 0x6e, 0x6e, 0x65, 0x72, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65,
+ 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x49, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63,
+ 0x65, 0x52, 0x06, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x12, 0x16, 0x0a, 0x06, 0x66, 0x69, 0x6c,
+ 0x74, 0x65, 0x72, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x66, 0x69, 0x6c, 0x74, 0x65,
+ 0x72, 0x12, 0x1b, 0x0a, 0x09, 0x70, 0x61, 0x67, 0x65, 0x5f, 0x73, 0x69, 0x7a, 0x65, 0x18, 0x03,
+ 0x20, 0x01, 0x28, 0x05, 0x52, 0x08, 0x70, 0x61, 0x67, 0x65, 0x53, 0x69, 0x7a, 0x65, 0x12, 0x1d,
+ 0x0a, 0x0a, 0x70, 0x61, 0x67, 0x65, 0x5f, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x18, 0x04, 0x20, 0x01,
+ 0x28, 0x09, 0x52, 0x09, 0x70, 0x61, 0x67, 0x65, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x22, 0x85, 0x01,
+ 0x0a, 0x1c, 0x4c, 0x69, 0x73, 0x74, 0x42, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x4f, 0x70, 0x65, 0x72,
+ 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x3d,
+ 0x0a, 0x0a, 0x6f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x01, 0x20, 0x03,
+ 0x28, 0x0b, 0x32, 0x1d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6c, 0x6f, 0x6e, 0x67,
+ 0x72, 0x75, 0x6e, 0x6e, 0x69, 0x6e, 0x67, 0x2e, 0x4f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f,
+ 0x6e, 0x52, 0x0a, 0x6f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x26, 0x0a,
+ 0x0f, 0x6e, 0x65, 0x78, 0x74, 0x5f, 0x70, 0x61, 0x67, 0x65, 0x5f, 0x74, 0x6f, 0x6b, 0x65, 0x6e,
+ 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0d, 0x6e, 0x65, 0x78, 0x74, 0x50, 0x61, 0x67, 0x65,
+ 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x22, 0x93, 0x02, 0x0a, 0x0a, 0x42, 0x61, 0x63, 0x6b, 0x75, 0x70,
+ 0x49, 0x6e, 0x66, 0x6f, 0x12, 0x3a, 0x0a, 0x06, 0x62, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x18, 0x01,
+ 0x20, 0x01, 0x28, 0x09, 0x42, 0x22, 0xfa, 0x41, 0x1f, 0x0a, 0x1d, 0x73, 0x70, 0x61, 0x6e, 0x6e,
+ 0x65, 0x72, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f,
+ 0x6d, 0x2f, 0x42, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x52, 0x06, 0x62, 0x61, 0x63, 0x6b, 0x75, 0x70,
+ 0x12, 0x3d, 0x0a, 0x0c, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x5f, 0x74, 0x69, 0x6d, 0x65,
+ 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e,
+ 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61,
+ 0x6d, 0x70, 0x52, 0x0b, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x54, 0x69, 0x6d, 0x65, 0x12,
+ 0x3b, 0x0a, 0x0b, 0x63, 0x72, 0x65, 0x61, 0x74, 0x65, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x18, 0x02,
+ 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72,
+ 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70,
+ 0x52, 0x0a, 0x63, 0x72, 0x65, 0x61, 0x74, 0x65, 0x54, 0x69, 0x6d, 0x65, 0x12, 0x4d, 0x0a, 0x0f,
+ 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x64, 0x61, 0x74, 0x61, 0x62, 0x61, 0x73, 0x65, 0x18,
+ 0x03, 0x20, 0x01, 0x28, 0x09, 0x42, 0x24, 0xfa, 0x41, 0x21, 0x0a, 0x1f, 0x73, 0x70, 0x61, 0x6e,
+ 0x6e, 0x65, 0x72, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63,
+ 0x6f, 0x6d, 0x2f, 0x44, 0x61, 0x74, 0x61, 0x62, 0x61, 0x73, 0x65, 0x52, 0x0e, 0x73, 0x6f, 0x75,
+ 0x72, 0x63, 0x65, 0x44, 0x61, 0x74, 0x61, 0x62, 0x61, 0x73, 0x65, 0x22, 0xc8, 0x03, 0x0a, 0x1c,
+ 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x42, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x45, 0x6e, 0x63, 0x72,
+ 0x79, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x7b, 0x0a, 0x0f,
+ 0x65, 0x6e, 0x63, 0x72, 0x79, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18,
+ 0x01, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x4d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73,
+ 0x70, 0x61, 0x6e, 0x6e, 0x65, 0x72, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x64, 0x61, 0x74,
+ 0x61, 0x62, 0x61, 0x73, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x42,
+ 0x61, 0x63, 0x6b, 0x75, 0x70, 0x45, 0x6e, 0x63, 0x72, 0x79, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x43,
+ 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x45, 0x6e, 0x63, 0x72, 0x79, 0x70, 0x74, 0x69, 0x6f, 0x6e,
+ 0x54, 0x79, 0x70, 0x65, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x0e, 0x65, 0x6e, 0x63, 0x72, 0x79,
+ 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x54, 0x79, 0x70, 0x65, 0x12, 0x4b, 0x0a, 0x0c, 0x6b, 0x6d, 0x73,
+ 0x5f, 0x6b, 0x65, 0x79, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x42,
+ 0x29, 0xe0, 0x41, 0x01, 0xfa, 0x41, 0x23, 0x0a, 0x21, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x6b, 0x6d,
+ 0x73, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d,
+ 0x2f, 0x43, 0x72, 0x79, 0x70, 0x74, 0x6f, 0x4b, 0x65, 0x79, 0x52, 0x0a, 0x6b, 0x6d, 0x73, 0x4b,
+ 0x65, 0x79, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x4d, 0x0a, 0x0d, 0x6b, 0x6d, 0x73, 0x5f, 0x6b, 0x65,
+ 0x79, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x09, 0x42, 0x29, 0xe0,
+ 0x41, 0x01, 0xfa, 0x41, 0x23, 0x0a, 0x21, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x6b, 0x6d, 0x73, 0x2e,
+ 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x43,
+ 0x72, 0x79, 0x70, 0x74, 0x6f, 0x4b, 0x65, 0x79, 0x52, 0x0b, 0x6b, 0x6d, 0x73, 0x4b, 0x65, 0x79,
+ 0x4e, 0x61, 0x6d, 0x65, 0x73, 0x22, 0x8e, 0x01, 0x0a, 0x0e, 0x45, 0x6e, 0x63, 0x72, 0x79, 0x70,
+ 0x74, 0x69, 0x6f, 0x6e, 0x54, 0x79, 0x70, 0x65, 0x12, 0x1f, 0x0a, 0x1b, 0x45, 0x4e, 0x43, 0x52,
+ 0x59, 0x50, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x55, 0x4e, 0x53, 0x50,
+ 0x45, 0x43, 0x49, 0x46, 0x49, 0x45, 0x44, 0x10, 0x00, 0x12, 0x1b, 0x0a, 0x17, 0x55, 0x53, 0x45,
+ 0x5f, 0x44, 0x41, 0x54, 0x41, 0x42, 0x41, 0x53, 0x45, 0x5f, 0x45, 0x4e, 0x43, 0x52, 0x59, 0x50,
+ 0x54, 0x49, 0x4f, 0x4e, 0x10, 0x01, 0x12, 0x1d, 0x0a, 0x19, 0x47, 0x4f, 0x4f, 0x47, 0x4c, 0x45,
+ 0x5f, 0x44, 0x45, 0x46, 0x41, 0x55, 0x4c, 0x54, 0x5f, 0x45, 0x4e, 0x43, 0x52, 0x59, 0x50, 0x54,
+ 0x49, 0x4f, 0x4e, 0x10, 0x02, 0x12, 0x1f, 0x0a, 0x1b, 0x43, 0x55, 0x53, 0x54, 0x4f, 0x4d, 0x45,
+ 0x52, 0x5f, 0x4d, 0x41, 0x4e, 0x41, 0x47, 0x45, 0x44, 0x5f, 0x45, 0x4e, 0x43, 0x52, 0x59, 0x50,
+ 0x54, 0x49, 0x4f, 0x4e, 0x10, 0x03, 0x22, 0xd4, 0x03, 0x0a, 0x1a, 0x43, 0x6f, 0x70, 0x79, 0x42,
+ 0x61, 0x63, 0x6b, 0x75, 0x70, 0x45, 0x6e, 0x63, 0x72, 0x79, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x43,
+ 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x79, 0x0a, 0x0f, 0x65, 0x6e, 0x63, 0x72, 0x79, 0x70, 0x74,
+ 0x69, 0x6f, 0x6e, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x4b,
+ 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x70, 0x61, 0x6e, 0x6e, 0x65, 0x72, 0x2e,
+ 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x64, 0x61, 0x74, 0x61, 0x62, 0x61, 0x73, 0x65, 0x2e, 0x76,
+ 0x31, 0x2e, 0x43, 0x6f, 0x70, 0x79, 0x42, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x45, 0x6e, 0x63, 0x72,
+ 0x79, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x45, 0x6e, 0x63,
+ 0x72, 0x79, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x54, 0x79, 0x70, 0x65, 0x42, 0x03, 0xe0, 0x41, 0x02,
+ 0x52, 0x0e, 0x65, 0x6e, 0x63, 0x72, 0x79, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x54, 0x79, 0x70, 0x65,
+ 0x12, 0x4b, 0x0a, 0x0c, 0x6b, 0x6d, 0x73, 0x5f, 0x6b, 0x65, 0x79, 0x5f, 0x6e, 0x61, 0x6d, 0x65,
+ 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x42, 0x29, 0xe0, 0x41, 0x01, 0xfa, 0x41, 0x23, 0x0a, 0x21,
+ 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x6b, 0x6d, 0x73, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61,
+ 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x43, 0x72, 0x79, 0x70, 0x74, 0x6f, 0x4b, 0x65,
+ 0x79, 0x52, 0x0a, 0x6b, 0x6d, 0x73, 0x4b, 0x65, 0x79, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x4d, 0x0a,
+ 0x0d, 0x6b, 0x6d, 0x73, 0x5f, 0x6b, 0x65, 0x79, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x18, 0x03,
+ 0x20, 0x03, 0x28, 0x09, 0x42, 0x29, 0xe0, 0x41, 0x01, 0xfa, 0x41, 0x23, 0x0a, 0x21, 0x63, 0x6c,
+ 0x6f, 0x75, 0x64, 0x6b, 0x6d, 0x73, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69,
+ 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x43, 0x72, 0x79, 0x70, 0x74, 0x6f, 0x4b, 0x65, 0x79, 0x52,
+ 0x0b, 0x6b, 0x6d, 0x73, 0x4b, 0x65, 0x79, 0x4e, 0x61, 0x6d, 0x65, 0x73, 0x22, 0x9e, 0x01, 0x0a,
+ 0x0e, 0x45, 0x6e, 0x63, 0x72, 0x79, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x54, 0x79, 0x70, 0x65, 0x12,
+ 0x1f, 0x0a, 0x1b, 0x45, 0x4e, 0x43, 0x52, 0x59, 0x50, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x54, 0x59,
+ 0x50, 0x45, 0x5f, 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, 0x49, 0x45, 0x44, 0x10, 0x00,
+ 0x12, 0x2b, 0x0a, 0x27, 0x55, 0x53, 0x45, 0x5f, 0x43, 0x4f, 0x4e, 0x46, 0x49, 0x47, 0x5f, 0x44,
+ 0x45, 0x46, 0x41, 0x55, 0x4c, 0x54, 0x5f, 0x4f, 0x52, 0x5f, 0x42, 0x41, 0x43, 0x4b, 0x55, 0x50,
+ 0x5f, 0x45, 0x4e, 0x43, 0x52, 0x59, 0x50, 0x54, 0x49, 0x4f, 0x4e, 0x10, 0x01, 0x12, 0x1d, 0x0a,
+ 0x19, 0x47, 0x4f, 0x4f, 0x47, 0x4c, 0x45, 0x5f, 0x44, 0x45, 0x46, 0x41, 0x55, 0x4c, 0x54, 0x5f,
+ 0x45, 0x4e, 0x43, 0x52, 0x59, 0x50, 0x54, 0x49, 0x4f, 0x4e, 0x10, 0x02, 0x12, 0x1f, 0x0a, 0x1b,
+ 0x43, 0x55, 0x53, 0x54, 0x4f, 0x4d, 0x45, 0x52, 0x5f, 0x4d, 0x41, 0x4e, 0x41, 0x47, 0x45, 0x44,
+ 0x5f, 0x45, 0x4e, 0x43, 0x52, 0x59, 0x50, 0x54, 0x49, 0x4f, 0x4e, 0x10, 0x03, 0x22, 0x10, 0x0a,
+ 0x0e, 0x46, 0x75, 0x6c, 0x6c, 0x42, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x53, 0x70, 0x65, 0x63, 0x22,
+ 0x17, 0x0a, 0x15, 0x49, 0x6e, 0x63, 0x72, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x61, 0x6c, 0x42, 0x61,
+ 0x63, 0x6b, 0x75, 0x70, 0x53, 0x70, 0x65, 0x63, 0x42, 0xfd, 0x01, 0x0a, 0x24, 0x63, 0x6f, 0x6d,
+ 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x70, 0x61, 0x6e, 0x6e, 0x65, 0x72, 0x2e,
+ 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x64, 0x61, 0x74, 0x61, 0x62, 0x61, 0x73, 0x65, 0x2e, 0x76,
+ 0x31, 0x42, 0x0b, 0x42, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01,
+ 0x5a, 0x46, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x63,
+ 0x6f, 0x6d, 0x2f, 0x67, 0x6f, 0x2f, 0x73, 0x70, 0x61, 0x6e, 0x6e, 0x65, 0x72, 0x2f, 0x61, 0x64,
+ 0x6d, 0x69, 0x6e, 0x2f, 0x64, 0x61, 0x74, 0x61, 0x62, 0x61, 0x73, 0x65, 0x2f, 0x61, 0x70, 0x69,
+ 0x76, 0x31, 0x2f, 0x64, 0x61, 0x74, 0x61, 0x62, 0x61, 0x73, 0x65, 0x70, 0x62, 0x3b, 0x64, 0x61,
+ 0x74, 0x61, 0x62, 0x61, 0x73, 0x65, 0x70, 0x62, 0xaa, 0x02, 0x26, 0x47, 0x6f, 0x6f, 0x67, 0x6c,
+ 0x65, 0x2e, 0x43, 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x53, 0x70, 0x61, 0x6e, 0x6e, 0x65, 0x72, 0x2e,
+ 0x41, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x44, 0x61, 0x74, 0x61, 0x62, 0x61, 0x73, 0x65, 0x2e, 0x56,
+ 0x31, 0xca, 0x02, 0x26, 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x5c, 0x43, 0x6c, 0x6f, 0x75, 0x64,
+ 0x5c, 0x53, 0x70, 0x61, 0x6e, 0x6e, 0x65, 0x72, 0x5c, 0x41, 0x64, 0x6d, 0x69, 0x6e, 0x5c, 0x44,
+ 0x61, 0x74, 0x61, 0x62, 0x61, 0x73, 0x65, 0x5c, 0x56, 0x31, 0xea, 0x02, 0x2b, 0x47, 0x6f, 0x6f,
+ 0x67, 0x6c, 0x65, 0x3a, 0x3a, 0x43, 0x6c, 0x6f, 0x75, 0x64, 0x3a, 0x3a, 0x53, 0x70, 0x61, 0x6e,
+ 0x6e, 0x65, 0x72, 0x3a, 0x3a, 0x41, 0x64, 0x6d, 0x69, 0x6e, 0x3a, 0x3a, 0x44, 0x61, 0x74, 0x61,
+ 0x62, 0x61, 0x73, 0x65, 0x3a, 0x3a, 0x56, 0x31, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33,
+}
+
+var (
+ file_google_spanner_admin_database_v1_backup_proto_rawDescOnce sync.Once
+ file_google_spanner_admin_database_v1_backup_proto_rawDescData = file_google_spanner_admin_database_v1_backup_proto_rawDesc
+)
+
+func file_google_spanner_admin_database_v1_backup_proto_rawDescGZIP() []byte {
+ file_google_spanner_admin_database_v1_backup_proto_rawDescOnce.Do(func() {
+ file_google_spanner_admin_database_v1_backup_proto_rawDescData = protoimpl.X.CompressGZIP(file_google_spanner_admin_database_v1_backup_proto_rawDescData)
+ })
+ return file_google_spanner_admin_database_v1_backup_proto_rawDescData
+}
+
+var file_google_spanner_admin_database_v1_backup_proto_enumTypes = make([]protoimpl.EnumInfo, 3)
+var file_google_spanner_admin_database_v1_backup_proto_msgTypes = make([]protoimpl.MessageInfo, 17)
+var file_google_spanner_admin_database_v1_backup_proto_goTypes = []any{
+ (Backup_State)(0), // 0: google.spanner.admin.database.v1.Backup.State
+ (CreateBackupEncryptionConfig_EncryptionType)(0), // 1: google.spanner.admin.database.v1.CreateBackupEncryptionConfig.EncryptionType
+ (CopyBackupEncryptionConfig_EncryptionType)(0), // 2: google.spanner.admin.database.v1.CopyBackupEncryptionConfig.EncryptionType
+ (*Backup)(nil), // 3: google.spanner.admin.database.v1.Backup
+ (*CreateBackupRequest)(nil), // 4: google.spanner.admin.database.v1.CreateBackupRequest
+ (*CreateBackupMetadata)(nil), // 5: google.spanner.admin.database.v1.CreateBackupMetadata
+ (*CopyBackupRequest)(nil), // 6: google.spanner.admin.database.v1.CopyBackupRequest
+ (*CopyBackupMetadata)(nil), // 7: google.spanner.admin.database.v1.CopyBackupMetadata
+ (*UpdateBackupRequest)(nil), // 8: google.spanner.admin.database.v1.UpdateBackupRequest
+ (*GetBackupRequest)(nil), // 9: google.spanner.admin.database.v1.GetBackupRequest
+ (*DeleteBackupRequest)(nil), // 10: google.spanner.admin.database.v1.DeleteBackupRequest
+ (*ListBackupsRequest)(nil), // 11: google.spanner.admin.database.v1.ListBackupsRequest
+ (*ListBackupsResponse)(nil), // 12: google.spanner.admin.database.v1.ListBackupsResponse
+ (*ListBackupOperationsRequest)(nil), // 13: google.spanner.admin.database.v1.ListBackupOperationsRequest
+ (*ListBackupOperationsResponse)(nil), // 14: google.spanner.admin.database.v1.ListBackupOperationsResponse
+ (*BackupInfo)(nil), // 15: google.spanner.admin.database.v1.BackupInfo
+ (*CreateBackupEncryptionConfig)(nil), // 16: google.spanner.admin.database.v1.CreateBackupEncryptionConfig
+ (*CopyBackupEncryptionConfig)(nil), // 17: google.spanner.admin.database.v1.CopyBackupEncryptionConfig
+ (*FullBackupSpec)(nil), // 18: google.spanner.admin.database.v1.FullBackupSpec
+ (*IncrementalBackupSpec)(nil), // 19: google.spanner.admin.database.v1.IncrementalBackupSpec
+ (*timestamppb.Timestamp)(nil), // 20: google.protobuf.Timestamp
+ (*EncryptionInfo)(nil), // 21: google.spanner.admin.database.v1.EncryptionInfo
+ (DatabaseDialect)(0), // 22: google.spanner.admin.database.v1.DatabaseDialect
+ (*OperationProgress)(nil), // 23: google.spanner.admin.database.v1.OperationProgress
+ (*fieldmaskpb.FieldMask)(nil), // 24: google.protobuf.FieldMask
+ (*longrunningpb.Operation)(nil), // 25: google.longrunning.Operation
+}
+var file_google_spanner_admin_database_v1_backup_proto_depIdxs = []int32{
+ 20, // 0: google.spanner.admin.database.v1.Backup.version_time:type_name -> google.protobuf.Timestamp
+ 20, // 1: google.spanner.admin.database.v1.Backup.expire_time:type_name -> google.protobuf.Timestamp
+ 20, // 2: google.spanner.admin.database.v1.Backup.create_time:type_name -> google.protobuf.Timestamp
+ 0, // 3: google.spanner.admin.database.v1.Backup.state:type_name -> google.spanner.admin.database.v1.Backup.State
+ 21, // 4: google.spanner.admin.database.v1.Backup.encryption_info:type_name -> google.spanner.admin.database.v1.EncryptionInfo
+ 21, // 5: google.spanner.admin.database.v1.Backup.encryption_information:type_name -> google.spanner.admin.database.v1.EncryptionInfo
+ 22, // 6: google.spanner.admin.database.v1.Backup.database_dialect:type_name -> google.spanner.admin.database.v1.DatabaseDialect
+ 20, // 7: google.spanner.admin.database.v1.Backup.max_expire_time:type_name -> google.protobuf.Timestamp
+ 20, // 8: google.spanner.admin.database.v1.Backup.oldest_version_time:type_name -> google.protobuf.Timestamp
+ 3, // 9: google.spanner.admin.database.v1.CreateBackupRequest.backup:type_name -> google.spanner.admin.database.v1.Backup
+ 16, // 10: google.spanner.admin.database.v1.CreateBackupRequest.encryption_config:type_name -> google.spanner.admin.database.v1.CreateBackupEncryptionConfig
+ 23, // 11: google.spanner.admin.database.v1.CreateBackupMetadata.progress:type_name -> google.spanner.admin.database.v1.OperationProgress
+ 20, // 12: google.spanner.admin.database.v1.CreateBackupMetadata.cancel_time:type_name -> google.protobuf.Timestamp
+ 20, // 13: google.spanner.admin.database.v1.CopyBackupRequest.expire_time:type_name -> google.protobuf.Timestamp
+ 17, // 14: google.spanner.admin.database.v1.CopyBackupRequest.encryption_config:type_name -> google.spanner.admin.database.v1.CopyBackupEncryptionConfig
+ 23, // 15: google.spanner.admin.database.v1.CopyBackupMetadata.progress:type_name -> google.spanner.admin.database.v1.OperationProgress
+ 20, // 16: google.spanner.admin.database.v1.CopyBackupMetadata.cancel_time:type_name -> google.protobuf.Timestamp
+ 3, // 17: google.spanner.admin.database.v1.UpdateBackupRequest.backup:type_name -> google.spanner.admin.database.v1.Backup
+ 24, // 18: google.spanner.admin.database.v1.UpdateBackupRequest.update_mask:type_name -> google.protobuf.FieldMask
+ 3, // 19: google.spanner.admin.database.v1.ListBackupsResponse.backups:type_name -> google.spanner.admin.database.v1.Backup
+ 25, // 20: google.spanner.admin.database.v1.ListBackupOperationsResponse.operations:type_name -> google.longrunning.Operation
+ 20, // 21: google.spanner.admin.database.v1.BackupInfo.version_time:type_name -> google.protobuf.Timestamp
+ 20, // 22: google.spanner.admin.database.v1.BackupInfo.create_time:type_name -> google.protobuf.Timestamp
+ 1, // 23: google.spanner.admin.database.v1.CreateBackupEncryptionConfig.encryption_type:type_name -> google.spanner.admin.database.v1.CreateBackupEncryptionConfig.EncryptionType
+ 2, // 24: google.spanner.admin.database.v1.CopyBackupEncryptionConfig.encryption_type:type_name -> google.spanner.admin.database.v1.CopyBackupEncryptionConfig.EncryptionType
+ 25, // [25:25] is the sub-list for method output_type
+ 25, // [25:25] is the sub-list for method input_type
+ 25, // [25:25] is the sub-list for extension type_name
+ 25, // [25:25] is the sub-list for extension extendee
+ 0, // [0:25] is the sub-list for field type_name
+}
+
+func init() { file_google_spanner_admin_database_v1_backup_proto_init() }
+func file_google_spanner_admin_database_v1_backup_proto_init() {
+ if File_google_spanner_admin_database_v1_backup_proto != nil {
+ return
+ }
+ file_google_spanner_admin_database_v1_common_proto_init()
+ if !protoimpl.UnsafeEnabled {
+ file_google_spanner_admin_database_v1_backup_proto_msgTypes[0].Exporter = func(v any, i int) any {
+ switch v := v.(*Backup); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_google_spanner_admin_database_v1_backup_proto_msgTypes[1].Exporter = func(v any, i int) any {
+ switch v := v.(*CreateBackupRequest); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_google_spanner_admin_database_v1_backup_proto_msgTypes[2].Exporter = func(v any, i int) any {
+ switch v := v.(*CreateBackupMetadata); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_google_spanner_admin_database_v1_backup_proto_msgTypes[3].Exporter = func(v any, i int) any {
+ switch v := v.(*CopyBackupRequest); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_google_spanner_admin_database_v1_backup_proto_msgTypes[4].Exporter = func(v any, i int) any {
+ switch v := v.(*CopyBackupMetadata); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_google_spanner_admin_database_v1_backup_proto_msgTypes[5].Exporter = func(v any, i int) any {
+ switch v := v.(*UpdateBackupRequest); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_google_spanner_admin_database_v1_backup_proto_msgTypes[6].Exporter = func(v any, i int) any {
+ switch v := v.(*GetBackupRequest); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_google_spanner_admin_database_v1_backup_proto_msgTypes[7].Exporter = func(v any, i int) any {
+ switch v := v.(*DeleteBackupRequest); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_google_spanner_admin_database_v1_backup_proto_msgTypes[8].Exporter = func(v any, i int) any {
+ switch v := v.(*ListBackupsRequest); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_google_spanner_admin_database_v1_backup_proto_msgTypes[9].Exporter = func(v any, i int) any {
+ switch v := v.(*ListBackupsResponse); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_google_spanner_admin_database_v1_backup_proto_msgTypes[10].Exporter = func(v any, i int) any {
+ switch v := v.(*ListBackupOperationsRequest); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_google_spanner_admin_database_v1_backup_proto_msgTypes[11].Exporter = func(v any, i int) any {
+ switch v := v.(*ListBackupOperationsResponse); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_google_spanner_admin_database_v1_backup_proto_msgTypes[12].Exporter = func(v any, i int) any {
+ switch v := v.(*BackupInfo); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_google_spanner_admin_database_v1_backup_proto_msgTypes[13].Exporter = func(v any, i int) any {
+ switch v := v.(*CreateBackupEncryptionConfig); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_google_spanner_admin_database_v1_backup_proto_msgTypes[14].Exporter = func(v any, i int) any {
+ switch v := v.(*CopyBackupEncryptionConfig); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_google_spanner_admin_database_v1_backup_proto_msgTypes[15].Exporter = func(v any, i int) any {
+ switch v := v.(*FullBackupSpec); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_google_spanner_admin_database_v1_backup_proto_msgTypes[16].Exporter = func(v any, i int) any {
+ switch v := v.(*IncrementalBackupSpec); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ }
+ type x struct{}
+ out := protoimpl.TypeBuilder{
+ File: protoimpl.DescBuilder{
+ GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
+ RawDescriptor: file_google_spanner_admin_database_v1_backup_proto_rawDesc,
+ NumEnums: 3,
+ NumMessages: 17,
+ NumExtensions: 0,
+ NumServices: 0,
+ },
+ GoTypes: file_google_spanner_admin_database_v1_backup_proto_goTypes,
+ DependencyIndexes: file_google_spanner_admin_database_v1_backup_proto_depIdxs,
+ EnumInfos: file_google_spanner_admin_database_v1_backup_proto_enumTypes,
+ MessageInfos: file_google_spanner_admin_database_v1_backup_proto_msgTypes,
+ }.Build()
+ File_google_spanner_admin_database_v1_backup_proto = out.File
+ file_google_spanner_admin_database_v1_backup_proto_rawDesc = nil
+ file_google_spanner_admin_database_v1_backup_proto_goTypes = nil
+ file_google_spanner_admin_database_v1_backup_proto_depIdxs = nil
+}
diff --git a/vendor/cloud.google.com/go/spanner/admin/database/apiv1/databasepb/backup_schedule.pb.go b/vendor/cloud.google.com/go/spanner/admin/database/apiv1/databasepb/backup_schedule.pb.go
new file mode 100644
index 000000000..fd7ca6b0d
--- /dev/null
+++ b/vendor/cloud.google.com/go/spanner/admin/database/apiv1/databasepb/backup_schedule.pb.go
@@ -0,0 +1,1080 @@
+// Copyright 2024 Google LLC
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Code generated by protoc-gen-go. DO NOT EDIT.
+// versions:
+// protoc-gen-go v1.34.2
+// protoc v4.25.3
+// source: google/spanner/admin/database/v1/backup_schedule.proto
+
+package databasepb
+
+import (
+ reflect "reflect"
+ sync "sync"
+
+ _ "google.golang.org/genproto/googleapis/api/annotations"
+ protoreflect "google.golang.org/protobuf/reflect/protoreflect"
+ protoimpl "google.golang.org/protobuf/runtime/protoimpl"
+ durationpb "google.golang.org/protobuf/types/known/durationpb"
+ fieldmaskpb "google.golang.org/protobuf/types/known/fieldmaskpb"
+ timestamppb "google.golang.org/protobuf/types/known/timestamppb"
+)
+
+const (
+ // Verify that this generated code is sufficiently up-to-date.
+ _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion)
+ // Verify that runtime/protoimpl is sufficiently up-to-date.
+ _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20)
+)
+
+// Defines specifications of the backup schedule.
+type BackupScheduleSpec struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // Required.
+ //
+ // Types that are assignable to ScheduleSpec:
+ //
+ // *BackupScheduleSpec_CronSpec
+ ScheduleSpec isBackupScheduleSpec_ScheduleSpec `protobuf_oneof:"schedule_spec"`
+}
+
+func (x *BackupScheduleSpec) Reset() {
+ *x = BackupScheduleSpec{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_google_spanner_admin_database_v1_backup_schedule_proto_msgTypes[0]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *BackupScheduleSpec) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*BackupScheduleSpec) ProtoMessage() {}
+
+func (x *BackupScheduleSpec) ProtoReflect() protoreflect.Message {
+ mi := &file_google_spanner_admin_database_v1_backup_schedule_proto_msgTypes[0]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use BackupScheduleSpec.ProtoReflect.Descriptor instead.
+func (*BackupScheduleSpec) Descriptor() ([]byte, []int) {
+ return file_google_spanner_admin_database_v1_backup_schedule_proto_rawDescGZIP(), []int{0}
+}
+
+func (m *BackupScheduleSpec) GetScheduleSpec() isBackupScheduleSpec_ScheduleSpec {
+ if m != nil {
+ return m.ScheduleSpec
+ }
+ return nil
+}
+
+func (x *BackupScheduleSpec) GetCronSpec() *CrontabSpec {
+ if x, ok := x.GetScheduleSpec().(*BackupScheduleSpec_CronSpec); ok {
+ return x.CronSpec
+ }
+ return nil
+}
+
+type isBackupScheduleSpec_ScheduleSpec interface {
+ isBackupScheduleSpec_ScheduleSpec()
+}
+
+type BackupScheduleSpec_CronSpec struct {
+ // Cron style schedule specification.
+ CronSpec *CrontabSpec `protobuf:"bytes,1,opt,name=cron_spec,json=cronSpec,proto3,oneof"`
+}
+
+func (*BackupScheduleSpec_CronSpec) isBackupScheduleSpec_ScheduleSpec() {}
+
+// BackupSchedule expresses the automated backup creation specification for a
+// Spanner database.
+// Next ID: 10
+type BackupSchedule struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // Identifier. Output only for the
+ // [CreateBackupSchedule][DatabaseAdmin.CreateBackupSchededule] operation.
+ // Required for the
+ // [UpdateBackupSchedule][google.spanner.admin.database.v1.DatabaseAdmin.UpdateBackupSchedule]
+ // operation. A globally unique identifier for the backup schedule which
+ // cannot be changed. Values are of the form
+ // `projects/<project>/instances/<instance>/databases/<database>/backupSchedules/[a-z][a-z0-9_\-]*[a-z0-9]`
+ // The final segment of the name must be between 2 and 60 characters in
+ // length.
+ Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"`
+ // Optional. The schedule specification based on which the backup creations
+ // are triggered.
+ Spec *BackupScheduleSpec `protobuf:"bytes,6,opt,name=spec,proto3" json:"spec,omitempty"`
+ // Optional. The retention duration of a backup that must be at least 6 hours
+ // and at most 366 days. The backup is eligible to be automatically deleted
+ // once the retention period has elapsed.
+ RetentionDuration *durationpb.Duration `protobuf:"bytes,3,opt,name=retention_duration,json=retentionDuration,proto3" json:"retention_duration,omitempty"`
+ // Optional. The encryption configuration that will be used to encrypt the
+ // backup. If this field is not specified, the backup will use the same
+ // encryption configuration as the database.
+ EncryptionConfig *CreateBackupEncryptionConfig `protobuf:"bytes,4,opt,name=encryption_config,json=encryptionConfig,proto3" json:"encryption_config,omitempty"`
+ // Required. Backup type spec determines the type of backup that is created by
+ // the backup schedule. Currently, only full backups are supported.
+ //
+ // Types that are assignable to BackupTypeSpec:
+ //
+ // *BackupSchedule_FullBackupSpec
+ // *BackupSchedule_IncrementalBackupSpec
+ BackupTypeSpec isBackupSchedule_BackupTypeSpec `protobuf_oneof:"backup_type_spec"`
+ // Output only. The timestamp at which the schedule was last updated.
+ // If the schedule has never been updated, this field contains the timestamp
+ // when the schedule was first created.
+ UpdateTime *timestamppb.Timestamp `protobuf:"bytes,9,opt,name=update_time,json=updateTime,proto3" json:"update_time,omitempty"`
+}
+
+func (x *BackupSchedule) Reset() {
+ *x = BackupSchedule{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_google_spanner_admin_database_v1_backup_schedule_proto_msgTypes[1]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *BackupSchedule) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*BackupSchedule) ProtoMessage() {}
+
+func (x *BackupSchedule) ProtoReflect() protoreflect.Message {
+ mi := &file_google_spanner_admin_database_v1_backup_schedule_proto_msgTypes[1]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use BackupSchedule.ProtoReflect.Descriptor instead.
+func (*BackupSchedule) Descriptor() ([]byte, []int) {
+ return file_google_spanner_admin_database_v1_backup_schedule_proto_rawDescGZIP(), []int{1}
+}
+
+func (x *BackupSchedule) GetName() string {
+ if x != nil {
+ return x.Name
+ }
+ return ""
+}
+
+func (x *BackupSchedule) GetSpec() *BackupScheduleSpec {
+ if x != nil {
+ return x.Spec
+ }
+ return nil
+}
+
+func (x *BackupSchedule) GetRetentionDuration() *durationpb.Duration {
+ if x != nil {
+ return x.RetentionDuration
+ }
+ return nil
+}
+
+func (x *BackupSchedule) GetEncryptionConfig() *CreateBackupEncryptionConfig {
+ if x != nil {
+ return x.EncryptionConfig
+ }
+ return nil
+}
+
+func (m *BackupSchedule) GetBackupTypeSpec() isBackupSchedule_BackupTypeSpec {
+ if m != nil {
+ return m.BackupTypeSpec
+ }
+ return nil
+}
+
+func (x *BackupSchedule) GetFullBackupSpec() *FullBackupSpec {
+ if x, ok := x.GetBackupTypeSpec().(*BackupSchedule_FullBackupSpec); ok {
+ return x.FullBackupSpec
+ }
+ return nil
+}
+
+func (x *BackupSchedule) GetIncrementalBackupSpec() *IncrementalBackupSpec {
+ if x, ok := x.GetBackupTypeSpec().(*BackupSchedule_IncrementalBackupSpec); ok {
+ return x.IncrementalBackupSpec
+ }
+ return nil
+}
+
+func (x *BackupSchedule) GetUpdateTime() *timestamppb.Timestamp {
+ if x != nil {
+ return x.UpdateTime
+ }
+ return nil
+}
+
+type isBackupSchedule_BackupTypeSpec interface {
+ isBackupSchedule_BackupTypeSpec()
+}
+
+type BackupSchedule_FullBackupSpec struct {
+ // The schedule creates only full backups.
+ FullBackupSpec *FullBackupSpec `protobuf:"bytes,7,opt,name=full_backup_spec,json=fullBackupSpec,proto3,oneof"`
+}
+
+type BackupSchedule_IncrementalBackupSpec struct {
+ // The schedule creates incremental backup chains.
+ IncrementalBackupSpec *IncrementalBackupSpec `protobuf:"bytes,8,opt,name=incremental_backup_spec,json=incrementalBackupSpec,proto3,oneof"`
+}
+
+func (*BackupSchedule_FullBackupSpec) isBackupSchedule_BackupTypeSpec() {}
+
+func (*BackupSchedule_IncrementalBackupSpec) isBackupSchedule_BackupTypeSpec() {}
+
+// CrontabSpec can be used to specify the version time and frequency at
+// which the backup should be created.
+type CrontabSpec struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // Required. Textual representation of the crontab. User can customize the
+ // backup frequency and the backup version time using the cron
+ // expression. The version time must be in UTC timzeone.
+ //
+ // The backup will contain an externally consistent copy of the
+ // database at the version time. Allowed frequencies are 12 hour, 1 day,
+ // 1 week and 1 month. Examples of valid cron specifications:
+ // - `0 2/12 * * * ` : every 12 hours at (2, 14) hours past midnight in UTC.
+ // - `0 2,14 * * * ` : every 12 hours at (2,14) hours past midnight in UTC.
+ // - `0 2 * * * ` : once a day at 2 past midnight in UTC.
+ // - `0 2 * * 0 ` : once a week every Sunday at 2 past midnight in UTC.
+ // - `0 2 8 * * ` : once a month on 8th day at 2 past midnight in UTC.
+ Text string `protobuf:"bytes,1,opt,name=text,proto3" json:"text,omitempty"`
+ // Output only. The time zone of the times in `CrontabSpec.text`. Currently
+ // only UTC is supported.
+ TimeZone string `protobuf:"bytes,2,opt,name=time_zone,json=timeZone,proto3" json:"time_zone,omitempty"`
+ // Output only. Schedule backups will contain an externally consistent copy
+ // of the database at the version time specified in
+ // `schedule_spec.cron_spec`. However, Spanner may not initiate the creation
+ // of the scheduled backups at that version time. Spanner will initiate
+ // the creation of scheduled backups within the time window bounded by the
+ // version_time specified in `schedule_spec.cron_spec` and version_time +
+ // `creation_window`.
+ CreationWindow *durationpb.Duration `protobuf:"bytes,3,opt,name=creation_window,json=creationWindow,proto3" json:"creation_window,omitempty"`
+}
+
+func (x *CrontabSpec) Reset() {
+ *x = CrontabSpec{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_google_spanner_admin_database_v1_backup_schedule_proto_msgTypes[2]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *CrontabSpec) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*CrontabSpec) ProtoMessage() {}
+
+func (x *CrontabSpec) ProtoReflect() protoreflect.Message {
+ mi := &file_google_spanner_admin_database_v1_backup_schedule_proto_msgTypes[2]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use CrontabSpec.ProtoReflect.Descriptor instead.
+func (*CrontabSpec) Descriptor() ([]byte, []int) {
+ return file_google_spanner_admin_database_v1_backup_schedule_proto_rawDescGZIP(), []int{2}
+}
+
+func (x *CrontabSpec) GetText() string {
+ if x != nil {
+ return x.Text
+ }
+ return ""
+}
+
+func (x *CrontabSpec) GetTimeZone() string {
+ if x != nil {
+ return x.TimeZone
+ }
+ return ""
+}
+
+func (x *CrontabSpec) GetCreationWindow() *durationpb.Duration {
+ if x != nil {
+ return x.CreationWindow
+ }
+ return nil
+}
+
+// The request for
+// [CreateBackupSchedule][google.spanner.admin.database.v1.DatabaseAdmin.CreateBackupSchedule].
+type CreateBackupScheduleRequest struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // Required. The name of the database that this backup schedule applies to.
+ Parent string `protobuf:"bytes,1,opt,name=parent,proto3" json:"parent,omitempty"`
+ // Required. The Id to use for the backup schedule. The `backup_schedule_id`
+ // appended to `parent` forms the full backup schedule name of the form
+ // `projects/<project>/instances/<instance>/databases/<database>/backupSchedules/<backup_schedule_id>`.
+ BackupScheduleId string `protobuf:"bytes,2,opt,name=backup_schedule_id,json=backupScheduleId,proto3" json:"backup_schedule_id,omitempty"`
+ // Required. The backup schedule to create.
+ BackupSchedule *BackupSchedule `protobuf:"bytes,3,opt,name=backup_schedule,json=backupSchedule,proto3" json:"backup_schedule,omitempty"`
+}
+
+func (x *CreateBackupScheduleRequest) Reset() {
+ *x = CreateBackupScheduleRequest{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_google_spanner_admin_database_v1_backup_schedule_proto_msgTypes[3]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *CreateBackupScheduleRequest) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*CreateBackupScheduleRequest) ProtoMessage() {}
+
+func (x *CreateBackupScheduleRequest) ProtoReflect() protoreflect.Message {
+ mi := &file_google_spanner_admin_database_v1_backup_schedule_proto_msgTypes[3]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use CreateBackupScheduleRequest.ProtoReflect.Descriptor instead.
+func (*CreateBackupScheduleRequest) Descriptor() ([]byte, []int) {
+ return file_google_spanner_admin_database_v1_backup_schedule_proto_rawDescGZIP(), []int{3}
+}
+
+func (x *CreateBackupScheduleRequest) GetParent() string {
+ if x != nil {
+ return x.Parent
+ }
+ return ""
+}
+
+func (x *CreateBackupScheduleRequest) GetBackupScheduleId() string {
+ if x != nil {
+ return x.BackupScheduleId
+ }
+ return ""
+}
+
+func (x *CreateBackupScheduleRequest) GetBackupSchedule() *BackupSchedule {
+ if x != nil {
+ return x.BackupSchedule
+ }
+ return nil
+}
+
+// The request for
+// [GetBackupSchedule][google.spanner.admin.database.v1.DatabaseAdmin.GetBackupSchedule].
+type GetBackupScheduleRequest struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // Required. The name of the schedule to retrieve.
+ // Values are of the form
+ // `projects/<project>/instances/<instance>/databases/<database>/backupSchedules/<backup_schedule_id>`.
+ Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"`
+}
+
+func (x *GetBackupScheduleRequest) Reset() {
+ *x = GetBackupScheduleRequest{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_google_spanner_admin_database_v1_backup_schedule_proto_msgTypes[4]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *GetBackupScheduleRequest) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*GetBackupScheduleRequest) ProtoMessage() {}
+
+func (x *GetBackupScheduleRequest) ProtoReflect() protoreflect.Message {
+ mi := &file_google_spanner_admin_database_v1_backup_schedule_proto_msgTypes[4]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use GetBackupScheduleRequest.ProtoReflect.Descriptor instead.
+func (*GetBackupScheduleRequest) Descriptor() ([]byte, []int) {
+ return file_google_spanner_admin_database_v1_backup_schedule_proto_rawDescGZIP(), []int{4}
+}
+
+func (x *GetBackupScheduleRequest) GetName() string {
+ if x != nil {
+ return x.Name
+ }
+ return ""
+}
+
+// The request for
+// [DeleteBackupSchedule][google.spanner.admin.database.v1.DatabaseAdmin.DeleteBackupSchedule].
+type DeleteBackupScheduleRequest struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // Required. The name of the schedule to delete.
+ // Values are of the form
+ // `projects/<project>/instances/<instance>/databases/<database>/backupSchedules/<backup_schedule_id>`.
+ Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"`
+}
+
+func (x *DeleteBackupScheduleRequest) Reset() {
+ *x = DeleteBackupScheduleRequest{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_google_spanner_admin_database_v1_backup_schedule_proto_msgTypes[5]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *DeleteBackupScheduleRequest) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*DeleteBackupScheduleRequest) ProtoMessage() {}
+
+func (x *DeleteBackupScheduleRequest) ProtoReflect() protoreflect.Message {
+ mi := &file_google_spanner_admin_database_v1_backup_schedule_proto_msgTypes[5]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use DeleteBackupScheduleRequest.ProtoReflect.Descriptor instead.
+func (*DeleteBackupScheduleRequest) Descriptor() ([]byte, []int) {
+ return file_google_spanner_admin_database_v1_backup_schedule_proto_rawDescGZIP(), []int{5}
+}
+
+func (x *DeleteBackupScheduleRequest) GetName() string {
+ if x != nil {
+ return x.Name
+ }
+ return ""
+}
+
+// The request for
+// [ListBackupSchedules][google.spanner.admin.database.v1.DatabaseAdmin.ListBackupSchedules].
+type ListBackupSchedulesRequest struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // Required. Database is the parent resource whose backup schedules should be
+ // listed. Values are of the form
+ // projects/<project>/instances/<instance>/databases/<database>
+ Parent string `protobuf:"bytes,1,opt,name=parent,proto3" json:"parent,omitempty"`
+ // Optional. Number of backup schedules to be returned in the response. If 0
+ // or less, defaults to the server's maximum allowed page size.
+ PageSize int32 `protobuf:"varint,2,opt,name=page_size,json=pageSize,proto3" json:"page_size,omitempty"`
+ // Optional. If non-empty, `page_token` should contain a
+ // [next_page_token][google.spanner.admin.database.v1.ListBackupSchedulesResponse.next_page_token]
+ // from a previous
+ // [ListBackupSchedulesResponse][google.spanner.admin.database.v1.ListBackupSchedulesResponse]
+ // to the same `parent`.
+ PageToken string `protobuf:"bytes,4,opt,name=page_token,json=pageToken,proto3" json:"page_token,omitempty"`
+}
+
+func (x *ListBackupSchedulesRequest) Reset() {
+ *x = ListBackupSchedulesRequest{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_google_spanner_admin_database_v1_backup_schedule_proto_msgTypes[6]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *ListBackupSchedulesRequest) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*ListBackupSchedulesRequest) ProtoMessage() {}
+
+func (x *ListBackupSchedulesRequest) ProtoReflect() protoreflect.Message {
+ mi := &file_google_spanner_admin_database_v1_backup_schedule_proto_msgTypes[6]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use ListBackupSchedulesRequest.ProtoReflect.Descriptor instead.
+func (*ListBackupSchedulesRequest) Descriptor() ([]byte, []int) {
+ return file_google_spanner_admin_database_v1_backup_schedule_proto_rawDescGZIP(), []int{6}
+}
+
+func (x *ListBackupSchedulesRequest) GetParent() string {
+ if x != nil {
+ return x.Parent
+ }
+ return ""
+}
+
+func (x *ListBackupSchedulesRequest) GetPageSize() int32 {
+ if x != nil {
+ return x.PageSize
+ }
+ return 0
+}
+
+func (x *ListBackupSchedulesRequest) GetPageToken() string {
+ if x != nil {
+ return x.PageToken
+ }
+ return ""
+}
+
+// The response for
+// [ListBackupSchedules][google.spanner.admin.database.v1.DatabaseAdmin.ListBackupSchedules].
+type ListBackupSchedulesResponse struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // The list of backup schedules for a database.
+ BackupSchedules []*BackupSchedule `protobuf:"bytes,1,rep,name=backup_schedules,json=backupSchedules,proto3" json:"backup_schedules,omitempty"`
+ // `next_page_token` can be sent in a subsequent
+ // [ListBackupSchedules][google.spanner.admin.database.v1.DatabaseAdmin.ListBackupSchedules]
+ // call to fetch more of the schedules.
+ NextPageToken string `protobuf:"bytes,2,opt,name=next_page_token,json=nextPageToken,proto3" json:"next_page_token,omitempty"`
+}
+
+func (x *ListBackupSchedulesResponse) Reset() {
+ *x = ListBackupSchedulesResponse{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_google_spanner_admin_database_v1_backup_schedule_proto_msgTypes[7]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *ListBackupSchedulesResponse) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*ListBackupSchedulesResponse) ProtoMessage() {}
+
+func (x *ListBackupSchedulesResponse) ProtoReflect() protoreflect.Message {
+ mi := &file_google_spanner_admin_database_v1_backup_schedule_proto_msgTypes[7]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use ListBackupSchedulesResponse.ProtoReflect.Descriptor instead.
+func (*ListBackupSchedulesResponse) Descriptor() ([]byte, []int) {
+ return file_google_spanner_admin_database_v1_backup_schedule_proto_rawDescGZIP(), []int{7}
+}
+
+func (x *ListBackupSchedulesResponse) GetBackupSchedules() []*BackupSchedule {
+ if x != nil {
+ return x.BackupSchedules
+ }
+ return nil
+}
+
+func (x *ListBackupSchedulesResponse) GetNextPageToken() string {
+ if x != nil {
+ return x.NextPageToken
+ }
+ return ""
+}
+
+// The request for
+// [UpdateBackupScheduleRequest][google.spanner.admin.database.v1.DatabaseAdmin.UpdateBackupSchedule].
+type UpdateBackupScheduleRequest struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // Required. The backup schedule to update. `backup_schedule.name`, and the
+ // fields to be updated as specified by `update_mask` are required. Other
+ // fields are ignored.
+ BackupSchedule *BackupSchedule `protobuf:"bytes,1,opt,name=backup_schedule,json=backupSchedule,proto3" json:"backup_schedule,omitempty"`
+ // Required. A mask specifying which fields in the BackupSchedule resource
+ // should be updated. This mask is relative to the BackupSchedule resource,
+ // not to the request message. The field mask must always be
+ // specified; this prevents any future fields from being erased
+ // accidentally.
+ UpdateMask *fieldmaskpb.FieldMask `protobuf:"bytes,2,opt,name=update_mask,json=updateMask,proto3" json:"update_mask,omitempty"`
+}
+
+func (x *UpdateBackupScheduleRequest) Reset() {
+ *x = UpdateBackupScheduleRequest{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_google_spanner_admin_database_v1_backup_schedule_proto_msgTypes[8]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *UpdateBackupScheduleRequest) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*UpdateBackupScheduleRequest) ProtoMessage() {}
+
+func (x *UpdateBackupScheduleRequest) ProtoReflect() protoreflect.Message {
+ mi := &file_google_spanner_admin_database_v1_backup_schedule_proto_msgTypes[8]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use UpdateBackupScheduleRequest.ProtoReflect.Descriptor instead.
+func (*UpdateBackupScheduleRequest) Descriptor() ([]byte, []int) {
+ return file_google_spanner_admin_database_v1_backup_schedule_proto_rawDescGZIP(), []int{8}
+}
+
+func (x *UpdateBackupScheduleRequest) GetBackupSchedule() *BackupSchedule {
+ if x != nil {
+ return x.BackupSchedule
+ }
+ return nil
+}
+
+func (x *UpdateBackupScheduleRequest) GetUpdateMask() *fieldmaskpb.FieldMask {
+ if x != nil {
+ return x.UpdateMask
+ }
+ return nil
+}
+
+var File_google_spanner_admin_database_v1_backup_schedule_proto protoreflect.FileDescriptor
+
+var file_google_spanner_admin_database_v1_backup_schedule_proto_rawDesc = []byte{
+ 0x0a, 0x36, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x73, 0x70, 0x61, 0x6e, 0x6e, 0x65, 0x72,
+ 0x2f, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2f, 0x64, 0x61, 0x74, 0x61, 0x62, 0x61, 0x73, 0x65, 0x2f,
+ 0x76, 0x31, 0x2f, 0x62, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x5f, 0x73, 0x63, 0x68, 0x65, 0x64, 0x75,
+ 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x20, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65,
+ 0x2e, 0x73, 0x70, 0x61, 0x6e, 0x6e, 0x65, 0x72, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x64,
+ 0x61, 0x74, 0x61, 0x62, 0x61, 0x73, 0x65, 0x2e, 0x76, 0x31, 0x1a, 0x1f, 0x67, 0x6f, 0x6f, 0x67,
+ 0x6c, 0x65, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x5f, 0x62, 0x65, 0x68,
+ 0x61, 0x76, 0x69, 0x6f, 0x72, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x19, 0x67, 0x6f, 0x6f,
+ 0x67, 0x6c, 0x65, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65,
+ 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70,
+ 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x64, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e,
+ 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x20, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70,
+ 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x5f, 0x6d, 0x61,
+ 0x73, 0x6b, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65,
+ 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74,
+ 0x61, 0x6d, 0x70, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x2d, 0x67, 0x6f, 0x6f, 0x67, 0x6c,
+ 0x65, 0x2f, 0x73, 0x70, 0x61, 0x6e, 0x6e, 0x65, 0x72, 0x2f, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2f,
+ 0x64, 0x61, 0x74, 0x61, 0x62, 0x61, 0x73, 0x65, 0x2f, 0x76, 0x31, 0x2f, 0x62, 0x61, 0x63, 0x6b,
+ 0x75, 0x70, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0x73, 0x0a, 0x12, 0x42, 0x61, 0x63, 0x6b,
+ 0x75, 0x70, 0x53, 0x63, 0x68, 0x65, 0x64, 0x75, 0x6c, 0x65, 0x53, 0x70, 0x65, 0x63, 0x12, 0x4c,
+ 0x0a, 0x09, 0x63, 0x72, 0x6f, 0x6e, 0x5f, 0x73, 0x70, 0x65, 0x63, 0x18, 0x01, 0x20, 0x01, 0x28,
+ 0x0b, 0x32, 0x2d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x70, 0x61, 0x6e, 0x6e,
+ 0x65, 0x72, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x64, 0x61, 0x74, 0x61, 0x62, 0x61, 0x73,
+ 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x43, 0x72, 0x6f, 0x6e, 0x74, 0x61, 0x62, 0x53, 0x70, 0x65, 0x63,
+ 0x48, 0x00, 0x52, 0x08, 0x63, 0x72, 0x6f, 0x6e, 0x53, 0x70, 0x65, 0x63, 0x42, 0x0f, 0x0a, 0x0d,
+ 0x73, 0x63, 0x68, 0x65, 0x64, 0x75, 0x6c, 0x65, 0x5f, 0x73, 0x70, 0x65, 0x63, 0x22, 0x88, 0x06,
+ 0x0a, 0x0e, 0x42, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x53, 0x63, 0x68, 0x65, 0x64, 0x75, 0x6c, 0x65,
+ 0x12, 0x17, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x03,
+ 0xe0, 0x41, 0x08, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x4d, 0x0a, 0x04, 0x73, 0x70, 0x65,
+ 0x63, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x34, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65,
+ 0x2e, 0x73, 0x70, 0x61, 0x6e, 0x6e, 0x65, 0x72, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x64,
+ 0x61, 0x74, 0x61, 0x62, 0x61, 0x73, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x42, 0x61, 0x63, 0x6b, 0x75,
+ 0x70, 0x53, 0x63, 0x68, 0x65, 0x64, 0x75, 0x6c, 0x65, 0x53, 0x70, 0x65, 0x63, 0x42, 0x03, 0xe0,
+ 0x41, 0x01, 0x52, 0x04, 0x73, 0x70, 0x65, 0x63, 0x12, 0x4d, 0x0a, 0x12, 0x72, 0x65, 0x74, 0x65,
+ 0x6e, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x64, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x03,
+ 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72,
+ 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x42,
+ 0x03, 0xe0, 0x41, 0x01, 0x52, 0x11, 0x72, 0x65, 0x74, 0x65, 0x6e, 0x74, 0x69, 0x6f, 0x6e, 0x44,
+ 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x70, 0x0a, 0x11, 0x65, 0x6e, 0x63, 0x72, 0x79,
+ 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x18, 0x04, 0x20, 0x01,
+ 0x28, 0x0b, 0x32, 0x3e, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x70, 0x61, 0x6e,
+ 0x6e, 0x65, 0x72, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x64, 0x61, 0x74, 0x61, 0x62, 0x61,
+ 0x73, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x42, 0x61, 0x63, 0x6b,
+ 0x75, 0x70, 0x45, 0x6e, 0x63, 0x72, 0x79, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x6f, 0x6e, 0x66,
+ 0x69, 0x67, 0x42, 0x03, 0xe0, 0x41, 0x01, 0x52, 0x10, 0x65, 0x6e, 0x63, 0x72, 0x79, 0x70, 0x74,
+ 0x69, 0x6f, 0x6e, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x5c, 0x0a, 0x10, 0x66, 0x75, 0x6c,
+ 0x6c, 0x5f, 0x62, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x5f, 0x73, 0x70, 0x65, 0x63, 0x18, 0x07, 0x20,
+ 0x01, 0x28, 0x0b, 0x32, 0x30, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x70, 0x61,
+ 0x6e, 0x6e, 0x65, 0x72, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x64, 0x61, 0x74, 0x61, 0x62,
+ 0x61, 0x73, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x46, 0x75, 0x6c, 0x6c, 0x42, 0x61, 0x63, 0x6b, 0x75,
+ 0x70, 0x53, 0x70, 0x65, 0x63, 0x48, 0x00, 0x52, 0x0e, 0x66, 0x75, 0x6c, 0x6c, 0x42, 0x61, 0x63,
+ 0x6b, 0x75, 0x70, 0x53, 0x70, 0x65, 0x63, 0x12, 0x71, 0x0a, 0x17, 0x69, 0x6e, 0x63, 0x72, 0x65,
+ 0x6d, 0x65, 0x6e, 0x74, 0x61, 0x6c, 0x5f, 0x62, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x5f, 0x73, 0x70,
+ 0x65, 0x63, 0x18, 0x08, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x37, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c,
+ 0x65, 0x2e, 0x73, 0x70, 0x61, 0x6e, 0x6e, 0x65, 0x72, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e,
+ 0x64, 0x61, 0x74, 0x61, 0x62, 0x61, 0x73, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x49, 0x6e, 0x63, 0x72,
+ 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x61, 0x6c, 0x42, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x53, 0x70, 0x65,
+ 0x63, 0x48, 0x00, 0x52, 0x15, 0x69, 0x6e, 0x63, 0x72, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x61, 0x6c,
+ 0x42, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x53, 0x70, 0x65, 0x63, 0x12, 0x40, 0x0a, 0x0b, 0x75, 0x70,
+ 0x64, 0x61, 0x74, 0x65, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x18, 0x09, 0x20, 0x01, 0x28, 0x0b, 0x32,
+ 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75,
+ 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x42, 0x03, 0xe0, 0x41, 0x03,
+ 0x52, 0x0a, 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, 0x54, 0x69, 0x6d, 0x65, 0x3a, 0xa5, 0x01, 0xea,
+ 0x41, 0xa1, 0x01, 0x0a, 0x25, 0x73, 0x70, 0x61, 0x6e, 0x6e, 0x65, 0x72, 0x2e, 0x67, 0x6f, 0x6f,
+ 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x42, 0x61, 0x63, 0x6b,
+ 0x75, 0x70, 0x53, 0x63, 0x68, 0x65, 0x64, 0x75, 0x6c, 0x65, 0x12, 0x57, 0x70, 0x72, 0x6f, 0x6a,
+ 0x65, 0x63, 0x74, 0x73, 0x2f, 0x7b, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x7d, 0x2f, 0x69,
+ 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x73, 0x2f, 0x7b, 0x69, 0x6e, 0x73, 0x74, 0x61, 0x6e,
+ 0x63, 0x65, 0x7d, 0x2f, 0x64, 0x61, 0x74, 0x61, 0x62, 0x61, 0x73, 0x65, 0x73, 0x2f, 0x7b, 0x64,
+ 0x61, 0x74, 0x61, 0x62, 0x61, 0x73, 0x65, 0x7d, 0x2f, 0x62, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x53,
+ 0x63, 0x68, 0x65, 0x64, 0x75, 0x6c, 0x65, 0x73, 0x2f, 0x7b, 0x73, 0x63, 0x68, 0x65, 0x64, 0x75,
+ 0x6c, 0x65, 0x7d, 0x2a, 0x0f, 0x62, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x53, 0x63, 0x68, 0x65, 0x64,
+ 0x75, 0x6c, 0x65, 0x73, 0x32, 0x0e, 0x62, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x53, 0x63, 0x68, 0x65,
+ 0x64, 0x75, 0x6c, 0x65, 0x42, 0x12, 0x0a, 0x10, 0x62, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x5f, 0x74,
+ 0x79, 0x70, 0x65, 0x5f, 0x73, 0x70, 0x65, 0x63, 0x22, 0x91, 0x01, 0x0a, 0x0b, 0x43, 0x72, 0x6f,
+ 0x6e, 0x74, 0x61, 0x62, 0x53, 0x70, 0x65, 0x63, 0x12, 0x17, 0x0a, 0x04, 0x74, 0x65, 0x78, 0x74,
+ 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x04, 0x74, 0x65, 0x78,
+ 0x74, 0x12, 0x20, 0x0a, 0x09, 0x74, 0x69, 0x6d, 0x65, 0x5f, 0x7a, 0x6f, 0x6e, 0x65, 0x18, 0x02,
+ 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x03, 0x52, 0x08, 0x74, 0x69, 0x6d, 0x65, 0x5a,
+ 0x6f, 0x6e, 0x65, 0x12, 0x47, 0x0a, 0x0f, 0x63, 0x72, 0x65, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f,
+ 0x77, 0x69, 0x6e, 0x64, 0x6f, 0x77, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67,
+ 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44,
+ 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x42, 0x03, 0xe0, 0x41, 0x03, 0x52, 0x0e, 0x63, 0x72,
+ 0x65, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x57, 0x69, 0x6e, 0x64, 0x6f, 0x77, 0x22, 0xf1, 0x01, 0x0a,
+ 0x1b, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x42, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x53, 0x63, 0x68,
+ 0x65, 0x64, 0x75, 0x6c, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x3f, 0x0a, 0x06,
+ 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x27, 0xe0, 0x41,
+ 0x02, 0xfa, 0x41, 0x21, 0x0a, 0x1f, 0x73, 0x70, 0x61, 0x6e, 0x6e, 0x65, 0x72, 0x2e, 0x67, 0x6f,
+ 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x44, 0x61, 0x74,
+ 0x61, 0x62, 0x61, 0x73, 0x65, 0x52, 0x06, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x12, 0x31, 0x0a,
+ 0x12, 0x62, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x5f, 0x73, 0x63, 0x68, 0x65, 0x64, 0x75, 0x6c, 0x65,
+ 0x5f, 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x10,
+ 0x62, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x53, 0x63, 0x68, 0x65, 0x64, 0x75, 0x6c, 0x65, 0x49, 0x64,
+ 0x12, 0x5e, 0x0a, 0x0f, 0x62, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x5f, 0x73, 0x63, 0x68, 0x65, 0x64,
+ 0x75, 0x6c, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x30, 0x2e, 0x67, 0x6f, 0x6f, 0x67,
+ 0x6c, 0x65, 0x2e, 0x73, 0x70, 0x61, 0x6e, 0x6e, 0x65, 0x72, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e,
+ 0x2e, 0x64, 0x61, 0x74, 0x61, 0x62, 0x61, 0x73, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x42, 0x61, 0x63,
+ 0x6b, 0x75, 0x70, 0x53, 0x63, 0x68, 0x65, 0x64, 0x75, 0x6c, 0x65, 0x42, 0x03, 0xe0, 0x41, 0x02,
+ 0x52, 0x0e, 0x62, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x53, 0x63, 0x68, 0x65, 0x64, 0x75, 0x6c, 0x65,
+ 0x22, 0x5d, 0x0a, 0x18, 0x47, 0x65, 0x74, 0x42, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x53, 0x63, 0x68,
+ 0x65, 0x64, 0x75, 0x6c, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x41, 0x0a, 0x04,
+ 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x2d, 0xe0, 0x41, 0x02, 0xfa,
+ 0x41, 0x27, 0x0a, 0x25, 0x73, 0x70, 0x61, 0x6e, 0x6e, 0x65, 0x72, 0x2e, 0x67, 0x6f, 0x6f, 0x67,
+ 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x42, 0x61, 0x63, 0x6b, 0x75,
+ 0x70, 0x53, 0x63, 0x68, 0x65, 0x64, 0x75, 0x6c, 0x65, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x22,
+ 0x60, 0x0a, 0x1b, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x42, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x53,
+ 0x63, 0x68, 0x65, 0x64, 0x75, 0x6c, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x41,
+ 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x2d, 0xe0, 0x41,
+ 0x02, 0xfa, 0x41, 0x27, 0x0a, 0x25, 0x73, 0x70, 0x61, 0x6e, 0x6e, 0x65, 0x72, 0x2e, 0x67, 0x6f,
+ 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x42, 0x61, 0x63,
+ 0x6b, 0x75, 0x70, 0x53, 0x63, 0x68, 0x65, 0x64, 0x75, 0x6c, 0x65, 0x52, 0x04, 0x6e, 0x61, 0x6d,
+ 0x65, 0x22, 0xa3, 0x01, 0x0a, 0x1a, 0x4c, 0x69, 0x73, 0x74, 0x42, 0x61, 0x63, 0x6b, 0x75, 0x70,
+ 0x53, 0x63, 0x68, 0x65, 0x64, 0x75, 0x6c, 0x65, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74,
+ 0x12, 0x3f, 0x0a, 0x06, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09,
+ 0x42, 0x27, 0xe0, 0x41, 0x02, 0xfa, 0x41, 0x21, 0x0a, 0x1f, 0x73, 0x70, 0x61, 0x6e, 0x6e, 0x65,
+ 0x72, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d,
+ 0x2f, 0x44, 0x61, 0x74, 0x61, 0x62, 0x61, 0x73, 0x65, 0x52, 0x06, 0x70, 0x61, 0x72, 0x65, 0x6e,
+ 0x74, 0x12, 0x20, 0x0a, 0x09, 0x70, 0x61, 0x67, 0x65, 0x5f, 0x73, 0x69, 0x7a, 0x65, 0x18, 0x02,
+ 0x20, 0x01, 0x28, 0x05, 0x42, 0x03, 0xe0, 0x41, 0x01, 0x52, 0x08, 0x70, 0x61, 0x67, 0x65, 0x53,
+ 0x69, 0x7a, 0x65, 0x12, 0x22, 0x0a, 0x0a, 0x70, 0x61, 0x67, 0x65, 0x5f, 0x74, 0x6f, 0x6b, 0x65,
+ 0x6e, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x01, 0x52, 0x09, 0x70, 0x61,
+ 0x67, 0x65, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x22, 0xa2, 0x01, 0x0a, 0x1b, 0x4c, 0x69, 0x73, 0x74,
+ 0x42, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x53, 0x63, 0x68, 0x65, 0x64, 0x75, 0x6c, 0x65, 0x73, 0x52,
+ 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x5b, 0x0a, 0x10, 0x62, 0x61, 0x63, 0x6b, 0x75,
+ 0x70, 0x5f, 0x73, 0x63, 0x68, 0x65, 0x64, 0x75, 0x6c, 0x65, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28,
+ 0x0b, 0x32, 0x30, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x70, 0x61, 0x6e, 0x6e,
+ 0x65, 0x72, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x64, 0x61, 0x74, 0x61, 0x62, 0x61, 0x73,
+ 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x42, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x53, 0x63, 0x68, 0x65, 0x64,
+ 0x75, 0x6c, 0x65, 0x52, 0x0f, 0x62, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x53, 0x63, 0x68, 0x65, 0x64,
+ 0x75, 0x6c, 0x65, 0x73, 0x12, 0x26, 0x0a, 0x0f, 0x6e, 0x65, 0x78, 0x74, 0x5f, 0x70, 0x61, 0x67,
+ 0x65, 0x5f, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0d, 0x6e,
+ 0x65, 0x78, 0x74, 0x50, 0x61, 0x67, 0x65, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x22, 0xbf, 0x01, 0x0a,
+ 0x1b, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x42, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x53, 0x63, 0x68,
+ 0x65, 0x64, 0x75, 0x6c, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x5e, 0x0a, 0x0f,
+ 0x62, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x5f, 0x73, 0x63, 0x68, 0x65, 0x64, 0x75, 0x6c, 0x65, 0x18,
+ 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x30, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73,
+ 0x70, 0x61, 0x6e, 0x6e, 0x65, 0x72, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x64, 0x61, 0x74,
+ 0x61, 0x62, 0x61, 0x73, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x42, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x53,
+ 0x63, 0x68, 0x65, 0x64, 0x75, 0x6c, 0x65, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x0e, 0x62, 0x61,
+ 0x63, 0x6b, 0x75, 0x70, 0x53, 0x63, 0x68, 0x65, 0x64, 0x75, 0x6c, 0x65, 0x12, 0x40, 0x0a, 0x0b,
+ 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, 0x5f, 0x6d, 0x61, 0x73, 0x6b, 0x18, 0x02, 0x20, 0x01, 0x28,
+ 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f,
+ 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x4d, 0x61, 0x73, 0x6b, 0x42, 0x03, 0xe0,
+ 0x41, 0x02, 0x52, 0x0a, 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, 0x4d, 0x61, 0x73, 0x6b, 0x42, 0x85,
+ 0x02, 0x0a, 0x24, 0x63, 0x6f, 0x6d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x70,
+ 0x61, 0x6e, 0x6e, 0x65, 0x72, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x64, 0x61, 0x74, 0x61,
+ 0x62, 0x61, 0x73, 0x65, 0x2e, 0x76, 0x31, 0x42, 0x13, 0x42, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x53,
+ 0x63, 0x68, 0x65, 0x64, 0x75, 0x6c, 0x65, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x46,
+ 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x63, 0x6f, 0x6d,
+ 0x2f, 0x67, 0x6f, 0x2f, 0x73, 0x70, 0x61, 0x6e, 0x6e, 0x65, 0x72, 0x2f, 0x61, 0x64, 0x6d, 0x69,
+ 0x6e, 0x2f, 0x64, 0x61, 0x74, 0x61, 0x62, 0x61, 0x73, 0x65, 0x2f, 0x61, 0x70, 0x69, 0x76, 0x31,
+ 0x2f, 0x64, 0x61, 0x74, 0x61, 0x62, 0x61, 0x73, 0x65, 0x70, 0x62, 0x3b, 0x64, 0x61, 0x74, 0x61,
+ 0x62, 0x61, 0x73, 0x65, 0x70, 0x62, 0xaa, 0x02, 0x26, 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e,
+ 0x43, 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x53, 0x70, 0x61, 0x6e, 0x6e, 0x65, 0x72, 0x2e, 0x41, 0x64,
+ 0x6d, 0x69, 0x6e, 0x2e, 0x44, 0x61, 0x74, 0x61, 0x62, 0x61, 0x73, 0x65, 0x2e, 0x56, 0x31, 0xca,
+ 0x02, 0x26, 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x5c, 0x43, 0x6c, 0x6f, 0x75, 0x64, 0x5c, 0x53,
+ 0x70, 0x61, 0x6e, 0x6e, 0x65, 0x72, 0x5c, 0x41, 0x64, 0x6d, 0x69, 0x6e, 0x5c, 0x44, 0x61, 0x74,
+ 0x61, 0x62, 0x61, 0x73, 0x65, 0x5c, 0x56, 0x31, 0xea, 0x02, 0x2b, 0x47, 0x6f, 0x6f, 0x67, 0x6c,
+ 0x65, 0x3a, 0x3a, 0x43, 0x6c, 0x6f, 0x75, 0x64, 0x3a, 0x3a, 0x53, 0x70, 0x61, 0x6e, 0x6e, 0x65,
+ 0x72, 0x3a, 0x3a, 0x41, 0x64, 0x6d, 0x69, 0x6e, 0x3a, 0x3a, 0x44, 0x61, 0x74, 0x61, 0x62, 0x61,
+ 0x73, 0x65, 0x3a, 0x3a, 0x56, 0x31, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33,
+}
+
+var (
+ file_google_spanner_admin_database_v1_backup_schedule_proto_rawDescOnce sync.Once
+ file_google_spanner_admin_database_v1_backup_schedule_proto_rawDescData = file_google_spanner_admin_database_v1_backup_schedule_proto_rawDesc
+)
+
+func file_google_spanner_admin_database_v1_backup_schedule_proto_rawDescGZIP() []byte {
+ file_google_spanner_admin_database_v1_backup_schedule_proto_rawDescOnce.Do(func() {
+ file_google_spanner_admin_database_v1_backup_schedule_proto_rawDescData = protoimpl.X.CompressGZIP(file_google_spanner_admin_database_v1_backup_schedule_proto_rawDescData)
+ })
+ return file_google_spanner_admin_database_v1_backup_schedule_proto_rawDescData
+}
+
+var file_google_spanner_admin_database_v1_backup_schedule_proto_msgTypes = make([]protoimpl.MessageInfo, 9)
+var file_google_spanner_admin_database_v1_backup_schedule_proto_goTypes = []any{
+ (*BackupScheduleSpec)(nil), // 0: google.spanner.admin.database.v1.BackupScheduleSpec
+ (*BackupSchedule)(nil), // 1: google.spanner.admin.database.v1.BackupSchedule
+ (*CrontabSpec)(nil), // 2: google.spanner.admin.database.v1.CrontabSpec
+ (*CreateBackupScheduleRequest)(nil), // 3: google.spanner.admin.database.v1.CreateBackupScheduleRequest
+ (*GetBackupScheduleRequest)(nil), // 4: google.spanner.admin.database.v1.GetBackupScheduleRequest
+ (*DeleteBackupScheduleRequest)(nil), // 5: google.spanner.admin.database.v1.DeleteBackupScheduleRequest
+ (*ListBackupSchedulesRequest)(nil), // 6: google.spanner.admin.database.v1.ListBackupSchedulesRequest
+ (*ListBackupSchedulesResponse)(nil), // 7: google.spanner.admin.database.v1.ListBackupSchedulesResponse
+ (*UpdateBackupScheduleRequest)(nil), // 8: google.spanner.admin.database.v1.UpdateBackupScheduleRequest
+ (*durationpb.Duration)(nil), // 9: google.protobuf.Duration
+ (*CreateBackupEncryptionConfig)(nil), // 10: google.spanner.admin.database.v1.CreateBackupEncryptionConfig
+ (*FullBackupSpec)(nil), // 11: google.spanner.admin.database.v1.FullBackupSpec
+ (*IncrementalBackupSpec)(nil), // 12: google.spanner.admin.database.v1.IncrementalBackupSpec
+ (*timestamppb.Timestamp)(nil), // 13: google.protobuf.Timestamp
+ (*fieldmaskpb.FieldMask)(nil), // 14: google.protobuf.FieldMask
+}
+var file_google_spanner_admin_database_v1_backup_schedule_proto_depIdxs = []int32{
+ 2, // 0: google.spanner.admin.database.v1.BackupScheduleSpec.cron_spec:type_name -> google.spanner.admin.database.v1.CrontabSpec
+ 0, // 1: google.spanner.admin.database.v1.BackupSchedule.spec:type_name -> google.spanner.admin.database.v1.BackupScheduleSpec
+ 9, // 2: google.spanner.admin.database.v1.BackupSchedule.retention_duration:type_name -> google.protobuf.Duration
+ 10, // 3: google.spanner.admin.database.v1.BackupSchedule.encryption_config:type_name -> google.spanner.admin.database.v1.CreateBackupEncryptionConfig
+ 11, // 4: google.spanner.admin.database.v1.BackupSchedule.full_backup_spec:type_name -> google.spanner.admin.database.v1.FullBackupSpec
+ 12, // 5: google.spanner.admin.database.v1.BackupSchedule.incremental_backup_spec:type_name -> google.spanner.admin.database.v1.IncrementalBackupSpec
+ 13, // 6: google.spanner.admin.database.v1.BackupSchedule.update_time:type_name -> google.protobuf.Timestamp
+ 9, // 7: google.spanner.admin.database.v1.CrontabSpec.creation_window:type_name -> google.protobuf.Duration
+ 1, // 8: google.spanner.admin.database.v1.CreateBackupScheduleRequest.backup_schedule:type_name -> google.spanner.admin.database.v1.BackupSchedule
+ 1, // 9: google.spanner.admin.database.v1.ListBackupSchedulesResponse.backup_schedules:type_name -> google.spanner.admin.database.v1.BackupSchedule
+ 1, // 10: google.spanner.admin.database.v1.UpdateBackupScheduleRequest.backup_schedule:type_name -> google.spanner.admin.database.v1.BackupSchedule
+ 14, // 11: google.spanner.admin.database.v1.UpdateBackupScheduleRequest.update_mask:type_name -> google.protobuf.FieldMask
+ 12, // [12:12] is the sub-list for method output_type
+ 12, // [12:12] is the sub-list for method input_type
+ 12, // [12:12] is the sub-list for extension type_name
+ 12, // [12:12] is the sub-list for extension extendee
+ 0, // [0:12] is the sub-list for field type_name
+}
+
+func init() { file_google_spanner_admin_database_v1_backup_schedule_proto_init() }
+func file_google_spanner_admin_database_v1_backup_schedule_proto_init() {
+ if File_google_spanner_admin_database_v1_backup_schedule_proto != nil {
+ return
+ }
+ file_google_spanner_admin_database_v1_backup_proto_init()
+ if !protoimpl.UnsafeEnabled {
+ file_google_spanner_admin_database_v1_backup_schedule_proto_msgTypes[0].Exporter = func(v any, i int) any {
+ switch v := v.(*BackupScheduleSpec); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_google_spanner_admin_database_v1_backup_schedule_proto_msgTypes[1].Exporter = func(v any, i int) any {
+ switch v := v.(*BackupSchedule); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_google_spanner_admin_database_v1_backup_schedule_proto_msgTypes[2].Exporter = func(v any, i int) any {
+ switch v := v.(*CrontabSpec); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_google_spanner_admin_database_v1_backup_schedule_proto_msgTypes[3].Exporter = func(v any, i int) any {
+ switch v := v.(*CreateBackupScheduleRequest); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_google_spanner_admin_database_v1_backup_schedule_proto_msgTypes[4].Exporter = func(v any, i int) any {
+ switch v := v.(*GetBackupScheduleRequest); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_google_spanner_admin_database_v1_backup_schedule_proto_msgTypes[5].Exporter = func(v any, i int) any {
+ switch v := v.(*DeleteBackupScheduleRequest); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_google_spanner_admin_database_v1_backup_schedule_proto_msgTypes[6].Exporter = func(v any, i int) any {
+ switch v := v.(*ListBackupSchedulesRequest); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_google_spanner_admin_database_v1_backup_schedule_proto_msgTypes[7].Exporter = func(v any, i int) any {
+ switch v := v.(*ListBackupSchedulesResponse); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_google_spanner_admin_database_v1_backup_schedule_proto_msgTypes[8].Exporter = func(v any, i int) any {
+ switch v := v.(*UpdateBackupScheduleRequest); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ }
+ file_google_spanner_admin_database_v1_backup_schedule_proto_msgTypes[0].OneofWrappers = []any{
+ (*BackupScheduleSpec_CronSpec)(nil),
+ }
+ file_google_spanner_admin_database_v1_backup_schedule_proto_msgTypes[1].OneofWrappers = []any{
+ (*BackupSchedule_FullBackupSpec)(nil),
+ (*BackupSchedule_IncrementalBackupSpec)(nil),
+ }
+ type x struct{}
+ out := protoimpl.TypeBuilder{
+ File: protoimpl.DescBuilder{
+ GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
+ RawDescriptor: file_google_spanner_admin_database_v1_backup_schedule_proto_rawDesc,
+ NumEnums: 0,
+ NumMessages: 9,
+ NumExtensions: 0,
+ NumServices: 0,
+ },
+ GoTypes: file_google_spanner_admin_database_v1_backup_schedule_proto_goTypes,
+ DependencyIndexes: file_google_spanner_admin_database_v1_backup_schedule_proto_depIdxs,
+ MessageInfos: file_google_spanner_admin_database_v1_backup_schedule_proto_msgTypes,
+ }.Build()
+ File_google_spanner_admin_database_v1_backup_schedule_proto = out.File
+ file_google_spanner_admin_database_v1_backup_schedule_proto_rawDesc = nil
+ file_google_spanner_admin_database_v1_backup_schedule_proto_goTypes = nil
+ file_google_spanner_admin_database_v1_backup_schedule_proto_depIdxs = nil
+}
diff --git a/vendor/cloud.google.com/go/spanner/admin/database/apiv1/databasepb/common.pb.go b/vendor/cloud.google.com/go/spanner/admin/database/apiv1/databasepb/common.pb.go
new file mode 100644
index 000000000..ba93d9a26
--- /dev/null
+++ b/vendor/cloud.google.com/go/spanner/admin/database/apiv1/databasepb/common.pb.go
@@ -0,0 +1,567 @@
+// Copyright 2024 Google LLC
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Code generated by protoc-gen-go. DO NOT EDIT.
+// versions:
+// protoc-gen-go v1.34.2
+// protoc v4.25.3
+// source: google/spanner/admin/database/v1/common.proto
+
+package databasepb
+
+import (
+ reflect "reflect"
+ sync "sync"
+
+ _ "google.golang.org/genproto/googleapis/api/annotations"
+ status "google.golang.org/genproto/googleapis/rpc/status"
+ protoreflect "google.golang.org/protobuf/reflect/protoreflect"
+ protoimpl "google.golang.org/protobuf/runtime/protoimpl"
+ timestamppb "google.golang.org/protobuf/types/known/timestamppb"
+)
+
+const (
+ // Verify that this generated code is sufficiently up-to-date.
+ _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion)
+ // Verify that runtime/protoimpl is sufficiently up-to-date.
+ _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20)
+)
+
+// Indicates the dialect type of a database.
+type DatabaseDialect int32
+
+const (
+ // Default value. This value will create a database with the
+ // GOOGLE_STANDARD_SQL dialect.
+ DatabaseDialect_DATABASE_DIALECT_UNSPECIFIED DatabaseDialect = 0
+ // GoogleSQL supported SQL.
+ DatabaseDialect_GOOGLE_STANDARD_SQL DatabaseDialect = 1
+ // PostgreSQL supported SQL.
+ DatabaseDialect_POSTGRESQL DatabaseDialect = 2
+)
+
+// Enum value maps for DatabaseDialect.
+var (
+ DatabaseDialect_name = map[int32]string{
+ 0: "DATABASE_DIALECT_UNSPECIFIED",
+ 1: "GOOGLE_STANDARD_SQL",
+ 2: "POSTGRESQL",
+ }
+ DatabaseDialect_value = map[string]int32{
+ "DATABASE_DIALECT_UNSPECIFIED": 0,
+ "GOOGLE_STANDARD_SQL": 1,
+ "POSTGRESQL": 2,
+ }
+)
+
+func (x DatabaseDialect) Enum() *DatabaseDialect {
+ p := new(DatabaseDialect)
+ *p = x
+ return p
+}
+
+func (x DatabaseDialect) String() string {
+ return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x))
+}
+
+func (DatabaseDialect) Descriptor() protoreflect.EnumDescriptor {
+ return file_google_spanner_admin_database_v1_common_proto_enumTypes[0].Descriptor()
+}
+
+func (DatabaseDialect) Type() protoreflect.EnumType {
+ return &file_google_spanner_admin_database_v1_common_proto_enumTypes[0]
+}
+
+func (x DatabaseDialect) Number() protoreflect.EnumNumber {
+ return protoreflect.EnumNumber(x)
+}
+
+// Deprecated: Use DatabaseDialect.Descriptor instead.
+func (DatabaseDialect) EnumDescriptor() ([]byte, []int) {
+ return file_google_spanner_admin_database_v1_common_proto_rawDescGZIP(), []int{0}
+}
+
+// Possible encryption types.
+type EncryptionInfo_Type int32
+
+const (
+ // Encryption type was not specified, though data at rest remains encrypted.
+ EncryptionInfo_TYPE_UNSPECIFIED EncryptionInfo_Type = 0
+ // The data is encrypted at rest with a key that is
+ // fully managed by Google. No key version or status will be populated.
+ // This is the default state.
+ EncryptionInfo_GOOGLE_DEFAULT_ENCRYPTION EncryptionInfo_Type = 1
+ // The data is encrypted at rest with a key that is
+ // managed by the customer. The active version of the key. `kms_key_version`
+ // will be populated, and `encryption_status` may be populated.
+ EncryptionInfo_CUSTOMER_MANAGED_ENCRYPTION EncryptionInfo_Type = 2
+)
+
+// Enum value maps for EncryptionInfo_Type.
+var (
+ EncryptionInfo_Type_name = map[int32]string{
+ 0: "TYPE_UNSPECIFIED",
+ 1: "GOOGLE_DEFAULT_ENCRYPTION",
+ 2: "CUSTOMER_MANAGED_ENCRYPTION",
+ }
+ EncryptionInfo_Type_value = map[string]int32{
+ "TYPE_UNSPECIFIED": 0,
+ "GOOGLE_DEFAULT_ENCRYPTION": 1,
+ "CUSTOMER_MANAGED_ENCRYPTION": 2,
+ }
+)
+
+func (x EncryptionInfo_Type) Enum() *EncryptionInfo_Type {
+ p := new(EncryptionInfo_Type)
+ *p = x
+ return p
+}
+
+func (x EncryptionInfo_Type) String() string {
+ return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x))
+}
+
+func (EncryptionInfo_Type) Descriptor() protoreflect.EnumDescriptor {
+ return file_google_spanner_admin_database_v1_common_proto_enumTypes[1].Descriptor()
+}
+
+func (EncryptionInfo_Type) Type() protoreflect.EnumType {
+ return &file_google_spanner_admin_database_v1_common_proto_enumTypes[1]
+}
+
+func (x EncryptionInfo_Type) Number() protoreflect.EnumNumber {
+ return protoreflect.EnumNumber(x)
+}
+
+// Deprecated: Use EncryptionInfo_Type.Descriptor instead.
+func (EncryptionInfo_Type) EnumDescriptor() ([]byte, []int) {
+ return file_google_spanner_admin_database_v1_common_proto_rawDescGZIP(), []int{2, 0}
+}
+
+// Encapsulates progress related information for a Cloud Spanner long
+// running operation.
+type OperationProgress struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // Percent completion of the operation.
+ // Values are between 0 and 100 inclusive.
+ ProgressPercent int32 `protobuf:"varint,1,opt,name=progress_percent,json=progressPercent,proto3" json:"progress_percent,omitempty"`
+ // Time the request was received.
+ StartTime *timestamppb.Timestamp `protobuf:"bytes,2,opt,name=start_time,json=startTime,proto3" json:"start_time,omitempty"`
+ // If set, the time at which this operation failed or was completed
+ // successfully.
+ EndTime *timestamppb.Timestamp `protobuf:"bytes,3,opt,name=end_time,json=endTime,proto3" json:"end_time,omitempty"`
+}
+
+func (x *OperationProgress) Reset() {
+ *x = OperationProgress{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_google_spanner_admin_database_v1_common_proto_msgTypes[0]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *OperationProgress) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*OperationProgress) ProtoMessage() {}
+
+func (x *OperationProgress) ProtoReflect() protoreflect.Message {
+ mi := &file_google_spanner_admin_database_v1_common_proto_msgTypes[0]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use OperationProgress.ProtoReflect.Descriptor instead.
+func (*OperationProgress) Descriptor() ([]byte, []int) {
+ return file_google_spanner_admin_database_v1_common_proto_rawDescGZIP(), []int{0}
+}
+
+func (x *OperationProgress) GetProgressPercent() int32 {
+ if x != nil {
+ return x.ProgressPercent
+ }
+ return 0
+}
+
+func (x *OperationProgress) GetStartTime() *timestamppb.Timestamp {
+ if x != nil {
+ return x.StartTime
+ }
+ return nil
+}
+
+func (x *OperationProgress) GetEndTime() *timestamppb.Timestamp {
+ if x != nil {
+ return x.EndTime
+ }
+ return nil
+}
+
+// Encryption configuration for a Cloud Spanner database.
+type EncryptionConfig struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // The Cloud KMS key to be used for encrypting and decrypting
+ // the database. Values are of the form
+ // `projects/<project>/locations/<location>/keyRings/<key_ring>/cryptoKeys/<kms_key_name>`.
+ KmsKeyName string `protobuf:"bytes,2,opt,name=kms_key_name,json=kmsKeyName,proto3" json:"kms_key_name,omitempty"`
+ // Specifies the KMS configuration for the one or more keys used to encrypt
+ // the database. Values are of the form
+ // `projects/<project>/locations/<location>/keyRings/<key_ring>/cryptoKeys/<kms_key_name>`.
+ //
+ // The keys referenced by kms_key_names must fully cover all
+ // regions of the database instance configuration. Some examples:
+ // * For single region database instance configs, specify a single regional
+ // location KMS key.
+ // * For multi-regional database instance configs of type GOOGLE_MANAGED,
+ // either specify a multi-regional location KMS key or multiple regional
+ // location KMS keys that cover all regions in the instance config.
+ // * For a database instance config of type USER_MANAGED, please specify only
+ // regional location KMS keys to cover each region in the instance config.
+ // Multi-regional location KMS keys are not supported for USER_MANAGED
+ // instance configs.
+ KmsKeyNames []string `protobuf:"bytes,3,rep,name=kms_key_names,json=kmsKeyNames,proto3" json:"kms_key_names,omitempty"`
+}
+
+func (x *EncryptionConfig) Reset() {
+ *x = EncryptionConfig{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_google_spanner_admin_database_v1_common_proto_msgTypes[1]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *EncryptionConfig) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*EncryptionConfig) ProtoMessage() {}
+
+func (x *EncryptionConfig) ProtoReflect() protoreflect.Message {
+ mi := &file_google_spanner_admin_database_v1_common_proto_msgTypes[1]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use EncryptionConfig.ProtoReflect.Descriptor instead.
+func (*EncryptionConfig) Descriptor() ([]byte, []int) {
+ return file_google_spanner_admin_database_v1_common_proto_rawDescGZIP(), []int{1}
+}
+
+func (x *EncryptionConfig) GetKmsKeyName() string {
+ if x != nil {
+ return x.KmsKeyName
+ }
+ return ""
+}
+
+func (x *EncryptionConfig) GetKmsKeyNames() []string {
+ if x != nil {
+ return x.KmsKeyNames
+ }
+ return nil
+}
+
+// Encryption information for a Cloud Spanner database or backup.
+type EncryptionInfo struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // Output only. The type of encryption.
+ EncryptionType EncryptionInfo_Type `protobuf:"varint,3,opt,name=encryption_type,json=encryptionType,proto3,enum=google.spanner.admin.database.v1.EncryptionInfo_Type" json:"encryption_type,omitempty"`
+ // Output only. If present, the status of a recent encrypt/decrypt call on
+ // underlying data for this database or backup. Regardless of status, data is
+ // always encrypted at rest.
+ EncryptionStatus *status.Status `protobuf:"bytes,4,opt,name=encryption_status,json=encryptionStatus,proto3" json:"encryption_status,omitempty"`
+ // Output only. A Cloud KMS key version that is being used to protect the
+ // database or backup.
+ KmsKeyVersion string `protobuf:"bytes,2,opt,name=kms_key_version,json=kmsKeyVersion,proto3" json:"kms_key_version,omitempty"`
+}
+
+func (x *EncryptionInfo) Reset() {
+ *x = EncryptionInfo{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_google_spanner_admin_database_v1_common_proto_msgTypes[2]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *EncryptionInfo) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*EncryptionInfo) ProtoMessage() {}
+
+func (x *EncryptionInfo) ProtoReflect() protoreflect.Message {
+ mi := &file_google_spanner_admin_database_v1_common_proto_msgTypes[2]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use EncryptionInfo.ProtoReflect.Descriptor instead.
+func (*EncryptionInfo) Descriptor() ([]byte, []int) {
+ return file_google_spanner_admin_database_v1_common_proto_rawDescGZIP(), []int{2}
+}
+
+func (x *EncryptionInfo) GetEncryptionType() EncryptionInfo_Type {
+ if x != nil {
+ return x.EncryptionType
+ }
+ return EncryptionInfo_TYPE_UNSPECIFIED
+}
+
+func (x *EncryptionInfo) GetEncryptionStatus() *status.Status {
+ if x != nil {
+ return x.EncryptionStatus
+ }
+ return nil
+}
+
+func (x *EncryptionInfo) GetKmsKeyVersion() string {
+ if x != nil {
+ return x.KmsKeyVersion
+ }
+ return ""
+}
+
+var File_google_spanner_admin_database_v1_common_proto protoreflect.FileDescriptor
+
+var file_google_spanner_admin_database_v1_common_proto_rawDesc = []byte{
+ 0x0a, 0x2d, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x73, 0x70, 0x61, 0x6e, 0x6e, 0x65, 0x72,
+ 0x2f, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2f, 0x64, 0x61, 0x74, 0x61, 0x62, 0x61, 0x73, 0x65, 0x2f,
+ 0x76, 0x31, 0x2f, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12,
+ 0x20, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x70, 0x61, 0x6e, 0x6e, 0x65, 0x72, 0x2e,
+ 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x64, 0x61, 0x74, 0x61, 0x62, 0x61, 0x73, 0x65, 0x2e, 0x76,
+ 0x31, 0x1a, 0x1f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x66, 0x69,
+ 0x65, 0x6c, 0x64, 0x5f, 0x62, 0x65, 0x68, 0x61, 0x76, 0x69, 0x6f, 0x72, 0x2e, 0x70, 0x72, 0x6f,
+ 0x74, 0x6f, 0x1a, 0x19, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x72,
+ 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1f, 0x67,
+ 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x74,
+ 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x17,
+ 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x72, 0x70, 0x63, 0x2f, 0x73, 0x74, 0x61, 0x74, 0x75,
+ 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0xb0, 0x01, 0x0a, 0x11, 0x4f, 0x70, 0x65, 0x72,
+ 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x50, 0x72, 0x6f, 0x67, 0x72, 0x65, 0x73, 0x73, 0x12, 0x29, 0x0a,
+ 0x10, 0x70, 0x72, 0x6f, 0x67, 0x72, 0x65, 0x73, 0x73, 0x5f, 0x70, 0x65, 0x72, 0x63, 0x65, 0x6e,
+ 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x05, 0x52, 0x0f, 0x70, 0x72, 0x6f, 0x67, 0x72, 0x65, 0x73,
+ 0x73, 0x50, 0x65, 0x72, 0x63, 0x65, 0x6e, 0x74, 0x12, 0x39, 0x0a, 0x0a, 0x73, 0x74, 0x61, 0x72,
+ 0x74, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67,
+ 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54,
+ 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x09, 0x73, 0x74, 0x61, 0x72, 0x74, 0x54,
+ 0x69, 0x6d, 0x65, 0x12, 0x35, 0x0a, 0x08, 0x65, 0x6e, 0x64, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x18,
+ 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70,
+ 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d,
+ 0x70, 0x52, 0x07, 0x65, 0x6e, 0x64, 0x54, 0x69, 0x6d, 0x65, 0x22, 0xa8, 0x01, 0x0a, 0x10, 0x45,
+ 0x6e, 0x63, 0x72, 0x79, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12,
+ 0x48, 0x0a, 0x0c, 0x6b, 0x6d, 0x73, 0x5f, 0x6b, 0x65, 0x79, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18,
+ 0x02, 0x20, 0x01, 0x28, 0x09, 0x42, 0x26, 0xfa, 0x41, 0x23, 0x0a, 0x21, 0x63, 0x6c, 0x6f, 0x75,
+ 0x64, 0x6b, 0x6d, 0x73, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e,
+ 0x63, 0x6f, 0x6d, 0x2f, 0x43, 0x72, 0x79, 0x70, 0x74, 0x6f, 0x4b, 0x65, 0x79, 0x52, 0x0a, 0x6b,
+ 0x6d, 0x73, 0x4b, 0x65, 0x79, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x4a, 0x0a, 0x0d, 0x6b, 0x6d, 0x73,
+ 0x5f, 0x6b, 0x65, 0x79, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x09,
+ 0x42, 0x26, 0xfa, 0x41, 0x23, 0x0a, 0x21, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x6b, 0x6d, 0x73, 0x2e,
+ 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x43,
+ 0x72, 0x79, 0x70, 0x74, 0x6f, 0x4b, 0x65, 0x79, 0x52, 0x0b, 0x6b, 0x6d, 0x73, 0x4b, 0x65, 0x79,
+ 0x4e, 0x61, 0x6d, 0x65, 0x73, 0x22, 0xf3, 0x02, 0x0a, 0x0e, 0x45, 0x6e, 0x63, 0x72, 0x79, 0x70,
+ 0x74, 0x69, 0x6f, 0x6e, 0x49, 0x6e, 0x66, 0x6f, 0x12, 0x63, 0x0a, 0x0f, 0x65, 0x6e, 0x63, 0x72,
+ 0x79, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28,
+ 0x0e, 0x32, 0x35, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x70, 0x61, 0x6e, 0x6e,
+ 0x65, 0x72, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x64, 0x61, 0x74, 0x61, 0x62, 0x61, 0x73,
+ 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x45, 0x6e, 0x63, 0x72, 0x79, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x49,
+ 0x6e, 0x66, 0x6f, 0x2e, 0x54, 0x79, 0x70, 0x65, 0x42, 0x03, 0xe0, 0x41, 0x03, 0x52, 0x0e, 0x65,
+ 0x6e, 0x63, 0x72, 0x79, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x54, 0x79, 0x70, 0x65, 0x12, 0x44, 0x0a,
+ 0x11, 0x65, 0x6e, 0x63, 0x72, 0x79, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x73, 0x74, 0x61, 0x74,
+ 0x75, 0x73, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x12, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c,
+ 0x65, 0x2e, 0x72, 0x70, 0x63, 0x2e, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x42, 0x03, 0xe0, 0x41,
+ 0x03, 0x52, 0x10, 0x65, 0x6e, 0x63, 0x72, 0x79, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x53, 0x74, 0x61,
+ 0x74, 0x75, 0x73, 0x12, 0x58, 0x0a, 0x0f, 0x6b, 0x6d, 0x73, 0x5f, 0x6b, 0x65, 0x79, 0x5f, 0x76,
+ 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x42, 0x30, 0xe0, 0x41,
+ 0x03, 0xfa, 0x41, 0x2a, 0x0a, 0x28, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x6b, 0x6d, 0x73, 0x2e, 0x67,
+ 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x43, 0x72,
+ 0x79, 0x70, 0x74, 0x6f, 0x4b, 0x65, 0x79, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x0d,
+ 0x6b, 0x6d, 0x73, 0x4b, 0x65, 0x79, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x22, 0x5c, 0x0a,
+ 0x04, 0x54, 0x79, 0x70, 0x65, 0x12, 0x14, 0x0a, 0x10, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x55, 0x4e,
+ 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, 0x49, 0x45, 0x44, 0x10, 0x00, 0x12, 0x1d, 0x0a, 0x19, 0x47,
+ 0x4f, 0x4f, 0x47, 0x4c, 0x45, 0x5f, 0x44, 0x45, 0x46, 0x41, 0x55, 0x4c, 0x54, 0x5f, 0x45, 0x4e,
+ 0x43, 0x52, 0x59, 0x50, 0x54, 0x49, 0x4f, 0x4e, 0x10, 0x01, 0x12, 0x1f, 0x0a, 0x1b, 0x43, 0x55,
+ 0x53, 0x54, 0x4f, 0x4d, 0x45, 0x52, 0x5f, 0x4d, 0x41, 0x4e, 0x41, 0x47, 0x45, 0x44, 0x5f, 0x45,
+ 0x4e, 0x43, 0x52, 0x59, 0x50, 0x54, 0x49, 0x4f, 0x4e, 0x10, 0x02, 0x2a, 0x5c, 0x0a, 0x0f, 0x44,
+ 0x61, 0x74, 0x61, 0x62, 0x61, 0x73, 0x65, 0x44, 0x69, 0x61, 0x6c, 0x65, 0x63, 0x74, 0x12, 0x20,
+ 0x0a, 0x1c, 0x44, 0x41, 0x54, 0x41, 0x42, 0x41, 0x53, 0x45, 0x5f, 0x44, 0x49, 0x41, 0x4c, 0x45,
+ 0x43, 0x54, 0x5f, 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, 0x49, 0x45, 0x44, 0x10, 0x00,
+ 0x12, 0x17, 0x0a, 0x13, 0x47, 0x4f, 0x4f, 0x47, 0x4c, 0x45, 0x5f, 0x53, 0x54, 0x41, 0x4e, 0x44,
+ 0x41, 0x52, 0x44, 0x5f, 0x53, 0x51, 0x4c, 0x10, 0x01, 0x12, 0x0e, 0x0a, 0x0a, 0x50, 0x4f, 0x53,
+ 0x54, 0x47, 0x52, 0x45, 0x53, 0x51, 0x4c, 0x10, 0x02, 0x42, 0xa2, 0x04, 0xea, 0x41, 0x78, 0x0a,
+ 0x21, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x6b, 0x6d, 0x73, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65,
+ 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x43, 0x72, 0x79, 0x70, 0x74, 0x6f, 0x4b,
+ 0x65, 0x79, 0x12, 0x53, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x7b, 0x70, 0x72,
+ 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x7d, 0x2f, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73,
+ 0x2f, 0x7b, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x7d, 0x2f, 0x6b, 0x65, 0x79, 0x52,
+ 0x69, 0x6e, 0x67, 0x73, 0x2f, 0x7b, 0x6b, 0x65, 0x79, 0x5f, 0x72, 0x69, 0x6e, 0x67, 0x7d, 0x2f,
+ 0x63, 0x72, 0x79, 0x70, 0x74, 0x6f, 0x4b, 0x65, 0x79, 0x73, 0x2f, 0x7b, 0x63, 0x72, 0x79, 0x70,
+ 0x74, 0x6f, 0x5f, 0x6b, 0x65, 0x79, 0x7d, 0xea, 0x41, 0xa6, 0x01, 0x0a, 0x28, 0x63, 0x6c, 0x6f,
+ 0x75, 0x64, 0x6b, 0x6d, 0x73, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73,
+ 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x43, 0x72, 0x79, 0x70, 0x74, 0x6f, 0x4b, 0x65, 0x79, 0x56, 0x65,
+ 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x7a, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f,
+ 0x7b, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x7d, 0x2f, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x69,
+ 0x6f, 0x6e, 0x73, 0x2f, 0x7b, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x7d, 0x2f, 0x6b,
+ 0x65, 0x79, 0x52, 0x69, 0x6e, 0x67, 0x73, 0x2f, 0x7b, 0x6b, 0x65, 0x79, 0x5f, 0x72, 0x69, 0x6e,
+ 0x67, 0x7d, 0x2f, 0x63, 0x72, 0x79, 0x70, 0x74, 0x6f, 0x4b, 0x65, 0x79, 0x73, 0x2f, 0x7b, 0x63,
+ 0x72, 0x79, 0x70, 0x74, 0x6f, 0x5f, 0x6b, 0x65, 0x79, 0x7d, 0x2f, 0x63, 0x72, 0x79, 0x70, 0x74,
+ 0x6f, 0x4b, 0x65, 0x79, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x7b, 0x63, 0x72,
+ 0x79, 0x70, 0x74, 0x6f, 0x5f, 0x6b, 0x65, 0x79, 0x5f, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e,
+ 0x7d, 0x0a, 0x24, 0x63, 0x6f, 0x6d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x70,
+ 0x61, 0x6e, 0x6e, 0x65, 0x72, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x64, 0x61, 0x74, 0x61,
+ 0x62, 0x61, 0x73, 0x65, 0x2e, 0x76, 0x31, 0x42, 0x0b, 0x43, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x50,
+ 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x46, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x67, 0x6f,
+ 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x67, 0x6f, 0x2f, 0x73, 0x70, 0x61, 0x6e,
+ 0x6e, 0x65, 0x72, 0x2f, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2f, 0x64, 0x61, 0x74, 0x61, 0x62, 0x61,
+ 0x73, 0x65, 0x2f, 0x61, 0x70, 0x69, 0x76, 0x31, 0x2f, 0x64, 0x61, 0x74, 0x61, 0x62, 0x61, 0x73,
+ 0x65, 0x70, 0x62, 0x3b, 0x64, 0x61, 0x74, 0x61, 0x62, 0x61, 0x73, 0x65, 0x70, 0x62, 0xaa, 0x02,
+ 0x26, 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x43, 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x53, 0x70,
+ 0x61, 0x6e, 0x6e, 0x65, 0x72, 0x2e, 0x41, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x44, 0x61, 0x74, 0x61,
+ 0x62, 0x61, 0x73, 0x65, 0x2e, 0x56, 0x31, 0xca, 0x02, 0x26, 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65,
+ 0x5c, 0x43, 0x6c, 0x6f, 0x75, 0x64, 0x5c, 0x53, 0x70, 0x61, 0x6e, 0x6e, 0x65, 0x72, 0x5c, 0x41,
+ 0x64, 0x6d, 0x69, 0x6e, 0x5c, 0x44, 0x61, 0x74, 0x61, 0x62, 0x61, 0x73, 0x65, 0x5c, 0x56, 0x31,
+ 0xea, 0x02, 0x2b, 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x3a, 0x3a, 0x43, 0x6c, 0x6f, 0x75, 0x64,
+ 0x3a, 0x3a, 0x53, 0x70, 0x61, 0x6e, 0x6e, 0x65, 0x72, 0x3a, 0x3a, 0x41, 0x64, 0x6d, 0x69, 0x6e,
+ 0x3a, 0x3a, 0x44, 0x61, 0x74, 0x61, 0x62, 0x61, 0x73, 0x65, 0x3a, 0x3a, 0x56, 0x31, 0x62, 0x06,
+ 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33,
+}
+
+var (
+ file_google_spanner_admin_database_v1_common_proto_rawDescOnce sync.Once
+ file_google_spanner_admin_database_v1_common_proto_rawDescData = file_google_spanner_admin_database_v1_common_proto_rawDesc
+)
+
+func file_google_spanner_admin_database_v1_common_proto_rawDescGZIP() []byte {
+ file_google_spanner_admin_database_v1_common_proto_rawDescOnce.Do(func() {
+ file_google_spanner_admin_database_v1_common_proto_rawDescData = protoimpl.X.CompressGZIP(file_google_spanner_admin_database_v1_common_proto_rawDescData)
+ })
+ return file_google_spanner_admin_database_v1_common_proto_rawDescData
+}
+
+var file_google_spanner_admin_database_v1_common_proto_enumTypes = make([]protoimpl.EnumInfo, 2)
+var file_google_spanner_admin_database_v1_common_proto_msgTypes = make([]protoimpl.MessageInfo, 3)
+var file_google_spanner_admin_database_v1_common_proto_goTypes = []any{
+ (DatabaseDialect)(0), // 0: google.spanner.admin.database.v1.DatabaseDialect
+ (EncryptionInfo_Type)(0), // 1: google.spanner.admin.database.v1.EncryptionInfo.Type
+ (*OperationProgress)(nil), // 2: google.spanner.admin.database.v1.OperationProgress
+ (*EncryptionConfig)(nil), // 3: google.spanner.admin.database.v1.EncryptionConfig
+ (*EncryptionInfo)(nil), // 4: google.spanner.admin.database.v1.EncryptionInfo
+ (*timestamppb.Timestamp)(nil), // 5: google.protobuf.Timestamp
+ (*status.Status)(nil), // 6: google.rpc.Status
+}
+var file_google_spanner_admin_database_v1_common_proto_depIdxs = []int32{
+ 5, // 0: google.spanner.admin.database.v1.OperationProgress.start_time:type_name -> google.protobuf.Timestamp
+ 5, // 1: google.spanner.admin.database.v1.OperationProgress.end_time:type_name -> google.protobuf.Timestamp
+ 1, // 2: google.spanner.admin.database.v1.EncryptionInfo.encryption_type:type_name -> google.spanner.admin.database.v1.EncryptionInfo.Type
+ 6, // 3: google.spanner.admin.database.v1.EncryptionInfo.encryption_status:type_name -> google.rpc.Status
+ 4, // [4:4] is the sub-list for method output_type
+ 4, // [4:4] is the sub-list for method input_type
+ 4, // [4:4] is the sub-list for extension type_name
+ 4, // [4:4] is the sub-list for extension extendee
+ 0, // [0:4] is the sub-list for field type_name
+}
+
+func init() { file_google_spanner_admin_database_v1_common_proto_init() }
+func file_google_spanner_admin_database_v1_common_proto_init() {
+ if File_google_spanner_admin_database_v1_common_proto != nil {
+ return
+ }
+ if !protoimpl.UnsafeEnabled {
+ file_google_spanner_admin_database_v1_common_proto_msgTypes[0].Exporter = func(v any, i int) any {
+ switch v := v.(*OperationProgress); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_google_spanner_admin_database_v1_common_proto_msgTypes[1].Exporter = func(v any, i int) any {
+ switch v := v.(*EncryptionConfig); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_google_spanner_admin_database_v1_common_proto_msgTypes[2].Exporter = func(v any, i int) any {
+ switch v := v.(*EncryptionInfo); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ }
+ type x struct{}
+ out := protoimpl.TypeBuilder{
+ File: protoimpl.DescBuilder{
+ GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
+ RawDescriptor: file_google_spanner_admin_database_v1_common_proto_rawDesc,
+ NumEnums: 2,
+ NumMessages: 3,
+ NumExtensions: 0,
+ NumServices: 0,
+ },
+ GoTypes: file_google_spanner_admin_database_v1_common_proto_goTypes,
+ DependencyIndexes: file_google_spanner_admin_database_v1_common_proto_depIdxs,
+ EnumInfos: file_google_spanner_admin_database_v1_common_proto_enumTypes,
+ MessageInfos: file_google_spanner_admin_database_v1_common_proto_msgTypes,
+ }.Build()
+ File_google_spanner_admin_database_v1_common_proto = out.File
+ file_google_spanner_admin_database_v1_common_proto_rawDesc = nil
+ file_google_spanner_admin_database_v1_common_proto_goTypes = nil
+ file_google_spanner_admin_database_v1_common_proto_depIdxs = nil
+}
diff --git a/vendor/cloud.google.com/go/spanner/admin/database/apiv1/databasepb/spanner_database_admin.pb.go b/vendor/cloud.google.com/go/spanner/admin/database/apiv1/databasepb/spanner_database_admin.pb.go
new file mode 100644
index 000000000..2be6d9793
--- /dev/null
+++ b/vendor/cloud.google.com/go/spanner/admin/database/apiv1/databasepb/spanner_database_admin.pb.go
@@ -0,0 +1,4807 @@
+// Copyright 2024 Google LLC
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Code generated by protoc-gen-go. DO NOT EDIT.
+// versions:
+// protoc-gen-go v1.34.2
+// protoc v4.25.3
+// source: google/spanner/admin/database/v1/spanner_database_admin.proto
+
+package databasepb
+
+import (
+ context "context"
+ reflect "reflect"
+ sync "sync"
+
+ iampb "cloud.google.com/go/iam/apiv1/iampb"
+ longrunningpb "cloud.google.com/go/longrunning/autogen/longrunningpb"
+ _ "google.golang.org/genproto/googleapis/api/annotations"
+ grpc "google.golang.org/grpc"
+ codes "google.golang.org/grpc/codes"
+ status "google.golang.org/grpc/status"
+ protoreflect "google.golang.org/protobuf/reflect/protoreflect"
+ protoimpl "google.golang.org/protobuf/runtime/protoimpl"
+ emptypb "google.golang.org/protobuf/types/known/emptypb"
+ fieldmaskpb "google.golang.org/protobuf/types/known/fieldmaskpb"
+ timestamppb "google.golang.org/protobuf/types/known/timestamppb"
+)
+
+const (
+ // Verify that this generated code is sufficiently up-to-date.
+ _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion)
+ // Verify that runtime/protoimpl is sufficiently up-to-date.
+ _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20)
+)
+
+// Indicates the type of the restore source.
+type RestoreSourceType int32
+
+const (
+ // No restore associated.
+ RestoreSourceType_TYPE_UNSPECIFIED RestoreSourceType = 0
+ // A backup was used as the source of the restore.
+ RestoreSourceType_BACKUP RestoreSourceType = 1
+)
+
+// Enum value maps for RestoreSourceType.
+var (
+ RestoreSourceType_name = map[int32]string{
+ 0: "TYPE_UNSPECIFIED",
+ 1: "BACKUP",
+ }
+ RestoreSourceType_value = map[string]int32{
+ "TYPE_UNSPECIFIED": 0,
+ "BACKUP": 1,
+ }
+)
+
+func (x RestoreSourceType) Enum() *RestoreSourceType {
+ p := new(RestoreSourceType)
+ *p = x
+ return p
+}
+
+func (x RestoreSourceType) String() string {
+ return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x))
+}
+
+func (RestoreSourceType) Descriptor() protoreflect.EnumDescriptor {
+ return file_google_spanner_admin_database_v1_spanner_database_admin_proto_enumTypes[0].Descriptor()
+}
+
+func (RestoreSourceType) Type() protoreflect.EnumType {
+ return &file_google_spanner_admin_database_v1_spanner_database_admin_proto_enumTypes[0]
+}
+
+func (x RestoreSourceType) Number() protoreflect.EnumNumber {
+ return protoreflect.EnumNumber(x)
+}
+
+// Deprecated: Use RestoreSourceType.Descriptor instead.
+func (RestoreSourceType) EnumDescriptor() ([]byte, []int) {
+ return file_google_spanner_admin_database_v1_spanner_database_admin_proto_rawDescGZIP(), []int{0}
+}
+
+// Indicates the current state of the database.
+type Database_State int32
+
+const (
+ // Not specified.
+ Database_STATE_UNSPECIFIED Database_State = 0
+ // The database is still being created. Operations on the database may fail
+ // with `FAILED_PRECONDITION` in this state.
+ Database_CREATING Database_State = 1
+ // The database is fully created and ready for use.
+ Database_READY Database_State = 2
+ // The database is fully created and ready for use, but is still
+ // being optimized for performance and cannot handle full load.
+ //
+ // In this state, the database still references the backup
+ // it was restore from, preventing the backup
+ // from being deleted. When optimizations are complete, the full performance
+ // of the database will be restored, and the database will transition to
+ // `READY` state.
+ Database_READY_OPTIMIZING Database_State = 3
+)
+
+// Enum value maps for Database_State.
+var (
+ Database_State_name = map[int32]string{
+ 0: "STATE_UNSPECIFIED",
+ 1: "CREATING",
+ 2: "READY",
+ 3: "READY_OPTIMIZING",
+ }
+ Database_State_value = map[string]int32{
+ "STATE_UNSPECIFIED": 0,
+ "CREATING": 1,
+ "READY": 2,
+ "READY_OPTIMIZING": 3,
+ }
+)
+
+func (x Database_State) Enum() *Database_State {
+ p := new(Database_State)
+ *p = x
+ return p
+}
+
+func (x Database_State) String() string {
+ return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x))
+}
+
+func (Database_State) Descriptor() protoreflect.EnumDescriptor {
+ return file_google_spanner_admin_database_v1_spanner_database_admin_proto_enumTypes[1].Descriptor()
+}
+
+func (Database_State) Type() protoreflect.EnumType {
+ return &file_google_spanner_admin_database_v1_spanner_database_admin_proto_enumTypes[1]
+}
+
+func (x Database_State) Number() protoreflect.EnumNumber {
+ return protoreflect.EnumNumber(x)
+}
+
+// Deprecated: Use Database_State.Descriptor instead.
+func (Database_State) EnumDescriptor() ([]byte, []int) {
+ return file_google_spanner_admin_database_v1_spanner_database_admin_proto_rawDescGZIP(), []int{1, 0}
+}
+
+// Encryption types for the database to be restored.
+type RestoreDatabaseEncryptionConfig_EncryptionType int32
+
+const (
+ // Unspecified. Do not use.
+ RestoreDatabaseEncryptionConfig_ENCRYPTION_TYPE_UNSPECIFIED RestoreDatabaseEncryptionConfig_EncryptionType = 0
+ // This is the default option when
+ // [encryption_config][google.spanner.admin.database.v1.RestoreDatabaseEncryptionConfig]
+ // is not specified.
+ RestoreDatabaseEncryptionConfig_USE_CONFIG_DEFAULT_OR_BACKUP_ENCRYPTION RestoreDatabaseEncryptionConfig_EncryptionType = 1
+ // Use Google default encryption.
+ RestoreDatabaseEncryptionConfig_GOOGLE_DEFAULT_ENCRYPTION RestoreDatabaseEncryptionConfig_EncryptionType = 2
+ // Use customer managed encryption. If specified, `kms_key_name` must
+ // must contain a valid Cloud KMS key.
+ RestoreDatabaseEncryptionConfig_CUSTOMER_MANAGED_ENCRYPTION RestoreDatabaseEncryptionConfig_EncryptionType = 3
+)
+
+// Enum value maps for RestoreDatabaseEncryptionConfig_EncryptionType.
+var (
+ RestoreDatabaseEncryptionConfig_EncryptionType_name = map[int32]string{
+ 0: "ENCRYPTION_TYPE_UNSPECIFIED",
+ 1: "USE_CONFIG_DEFAULT_OR_BACKUP_ENCRYPTION",
+ 2: "GOOGLE_DEFAULT_ENCRYPTION",
+ 3: "CUSTOMER_MANAGED_ENCRYPTION",
+ }
+ RestoreDatabaseEncryptionConfig_EncryptionType_value = map[string]int32{
+ "ENCRYPTION_TYPE_UNSPECIFIED": 0,
+ "USE_CONFIG_DEFAULT_OR_BACKUP_ENCRYPTION": 1,
+ "GOOGLE_DEFAULT_ENCRYPTION": 2,
+ "CUSTOMER_MANAGED_ENCRYPTION": 3,
+ }
+)
+
+func (x RestoreDatabaseEncryptionConfig_EncryptionType) Enum() *RestoreDatabaseEncryptionConfig_EncryptionType {
+ p := new(RestoreDatabaseEncryptionConfig_EncryptionType)
+ *p = x
+ return p
+}
+
+func (x RestoreDatabaseEncryptionConfig_EncryptionType) String() string {
+ return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x))
+}
+
+func (RestoreDatabaseEncryptionConfig_EncryptionType) Descriptor() protoreflect.EnumDescriptor {
+ return file_google_spanner_admin_database_v1_spanner_database_admin_proto_enumTypes[2].Descriptor()
+}
+
+func (RestoreDatabaseEncryptionConfig_EncryptionType) Type() protoreflect.EnumType {
+ return &file_google_spanner_admin_database_v1_spanner_database_admin_proto_enumTypes[2]
+}
+
+func (x RestoreDatabaseEncryptionConfig_EncryptionType) Number() protoreflect.EnumNumber {
+ return protoreflect.EnumNumber(x)
+}
+
+// Deprecated: Use RestoreDatabaseEncryptionConfig_EncryptionType.Descriptor instead.
+func (RestoreDatabaseEncryptionConfig_EncryptionType) EnumDescriptor() ([]byte, []int) {
+ return file_google_spanner_admin_database_v1_spanner_database_admin_proto_rawDescGZIP(), []int{18, 0}
+}
+
+// Information about the database restore.
+type RestoreInfo struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // The type of the restore source.
+ SourceType RestoreSourceType `protobuf:"varint,1,opt,name=source_type,json=sourceType,proto3,enum=google.spanner.admin.database.v1.RestoreSourceType" json:"source_type,omitempty"`
+ // Information about the source used to restore the database.
+ //
+ // Types that are assignable to SourceInfo:
+ //
+ // *RestoreInfo_BackupInfo
+ SourceInfo isRestoreInfo_SourceInfo `protobuf_oneof:"source_info"`
+}
+
+func (x *RestoreInfo) Reset() {
+ *x = RestoreInfo{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_google_spanner_admin_database_v1_spanner_database_admin_proto_msgTypes[0]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *RestoreInfo) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*RestoreInfo) ProtoMessage() {}
+
+func (x *RestoreInfo) ProtoReflect() protoreflect.Message {
+ mi := &file_google_spanner_admin_database_v1_spanner_database_admin_proto_msgTypes[0]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use RestoreInfo.ProtoReflect.Descriptor instead.
+func (*RestoreInfo) Descriptor() ([]byte, []int) {
+ return file_google_spanner_admin_database_v1_spanner_database_admin_proto_rawDescGZIP(), []int{0}
+}
+
+func (x *RestoreInfo) GetSourceType() RestoreSourceType {
+ if x != nil {
+ return x.SourceType
+ }
+ return RestoreSourceType_TYPE_UNSPECIFIED
+}
+
+func (m *RestoreInfo) GetSourceInfo() isRestoreInfo_SourceInfo {
+ if m != nil {
+ return m.SourceInfo
+ }
+ return nil
+}
+
+func (x *RestoreInfo) GetBackupInfo() *BackupInfo {
+ if x, ok := x.GetSourceInfo().(*RestoreInfo_BackupInfo); ok {
+ return x.BackupInfo
+ }
+ return nil
+}
+
+type isRestoreInfo_SourceInfo interface {
+ isRestoreInfo_SourceInfo()
+}
+
+type RestoreInfo_BackupInfo struct {
+ // Information about the backup used to restore the database. The backup
+ // may no longer exist.
+ BackupInfo *BackupInfo `protobuf:"bytes,2,opt,name=backup_info,json=backupInfo,proto3,oneof"`
+}
+
+func (*RestoreInfo_BackupInfo) isRestoreInfo_SourceInfo() {}
+
+// A Cloud Spanner database.
+type Database struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // Required. The name of the database. Values are of the form
+ // `projects/<project>/instances/<instance>/databases/<database>`,
+ // where `<database>` is as specified in the `CREATE DATABASE`
+ // statement. This name can be passed to other API methods to
+ // identify the database.
+ Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"`
+ // Output only. The current database state.
+ State Database_State `protobuf:"varint,2,opt,name=state,proto3,enum=google.spanner.admin.database.v1.Database_State" json:"state,omitempty"`
+ // Output only. If exists, the time at which the database creation started.
+ CreateTime *timestamppb.Timestamp `protobuf:"bytes,3,opt,name=create_time,json=createTime,proto3" json:"create_time,omitempty"`
+ // Output only. Applicable only for restored databases. Contains information
+ // about the restore source.
+ RestoreInfo *RestoreInfo `protobuf:"bytes,4,opt,name=restore_info,json=restoreInfo,proto3" json:"restore_info,omitempty"`
+ // Output only. For databases that are using customer managed encryption, this
+ // field contains the encryption configuration for the database.
+ // For databases that are using Google default or other types of encryption,
+ // this field is empty.
+ EncryptionConfig *EncryptionConfig `protobuf:"bytes,5,opt,name=encryption_config,json=encryptionConfig,proto3" json:"encryption_config,omitempty"`
+ // Output only. For databases that are using customer managed encryption, this
+ // field contains the encryption information for the database, such as
+ // all Cloud KMS key versions that are in use. The `encryption_status' field
+ // inside of each `EncryptionInfo` is not populated.
+ //
+ // For databases that are using Google default or other types of encryption,
+ // this field is empty.
+ //
+ // This field is propagated lazily from the backend. There might be a delay
+ // from when a key version is being used and when it appears in this field.
+ EncryptionInfo []*EncryptionInfo `protobuf:"bytes,8,rep,name=encryption_info,json=encryptionInfo,proto3" json:"encryption_info,omitempty"`
+ // Output only. The period in which Cloud Spanner retains all versions of data
+ // for the database. This is the same as the value of version_retention_period
+ // database option set using
+ // [UpdateDatabaseDdl][google.spanner.admin.database.v1.DatabaseAdmin.UpdateDatabaseDdl].
+ // Defaults to 1 hour, if not set.
+ VersionRetentionPeriod string `protobuf:"bytes,6,opt,name=version_retention_period,json=versionRetentionPeriod,proto3" json:"version_retention_period,omitempty"`
+ // Output only. Earliest timestamp at which older versions of the data can be
+ // read. This value is continuously updated by Cloud Spanner and becomes stale
+ // the moment it is queried. If you are using this value to recover data, make
+ // sure to account for the time from the moment when the value is queried to
+ // the moment when you initiate the recovery.
+ EarliestVersionTime *timestamppb.Timestamp `protobuf:"bytes,7,opt,name=earliest_version_time,json=earliestVersionTime,proto3" json:"earliest_version_time,omitempty"`
+ // Output only. The read-write region which contains the database's leader
+ // replicas.
+ //
+ // This is the same as the value of default_leader
+ // database option set using DatabaseAdmin.CreateDatabase or
+ // DatabaseAdmin.UpdateDatabaseDdl. If not explicitly set, this is empty.
+ DefaultLeader string `protobuf:"bytes,9,opt,name=default_leader,json=defaultLeader,proto3" json:"default_leader,omitempty"`
+ // Output only. The dialect of the Cloud Spanner Database.
+ DatabaseDialect DatabaseDialect `protobuf:"varint,10,opt,name=database_dialect,json=databaseDialect,proto3,enum=google.spanner.admin.database.v1.DatabaseDialect" json:"database_dialect,omitempty"`
+ // Whether drop protection is enabled for this database. Defaults to false,
+ // if not set. For more details, please see how to [prevent accidental
+ // database
+ // deletion](https://cloud.google.com/spanner/docs/prevent-database-deletion).
+ EnableDropProtection bool `protobuf:"varint,11,opt,name=enable_drop_protection,json=enableDropProtection,proto3" json:"enable_drop_protection,omitempty"`
+ // Output only. If true, the database is being updated. If false, there are no
+ // ongoing update operations for the database.
+ Reconciling bool `protobuf:"varint,12,opt,name=reconciling,proto3" json:"reconciling,omitempty"`
+}
+
+func (x *Database) Reset() {
+ *x = Database{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_google_spanner_admin_database_v1_spanner_database_admin_proto_msgTypes[1]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *Database) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*Database) ProtoMessage() {}
+
+func (x *Database) ProtoReflect() protoreflect.Message {
+ mi := &file_google_spanner_admin_database_v1_spanner_database_admin_proto_msgTypes[1]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use Database.ProtoReflect.Descriptor instead.
+func (*Database) Descriptor() ([]byte, []int) {
+ return file_google_spanner_admin_database_v1_spanner_database_admin_proto_rawDescGZIP(), []int{1}
+}
+
+func (x *Database) GetName() string {
+ if x != nil {
+ return x.Name
+ }
+ return ""
+}
+
+func (x *Database) GetState() Database_State {
+ if x != nil {
+ return x.State
+ }
+ return Database_STATE_UNSPECIFIED
+}
+
+func (x *Database) GetCreateTime() *timestamppb.Timestamp {
+ if x != nil {
+ return x.CreateTime
+ }
+ return nil
+}
+
+func (x *Database) GetRestoreInfo() *RestoreInfo {
+ if x != nil {
+ return x.RestoreInfo
+ }
+ return nil
+}
+
+func (x *Database) GetEncryptionConfig() *EncryptionConfig {
+ if x != nil {
+ return x.EncryptionConfig
+ }
+ return nil
+}
+
+func (x *Database) GetEncryptionInfo() []*EncryptionInfo {
+ if x != nil {
+ return x.EncryptionInfo
+ }
+ return nil
+}
+
+func (x *Database) GetVersionRetentionPeriod() string {
+ if x != nil {
+ return x.VersionRetentionPeriod
+ }
+ return ""
+}
+
+func (x *Database) GetEarliestVersionTime() *timestamppb.Timestamp {
+ if x != nil {
+ return x.EarliestVersionTime
+ }
+ return nil
+}
+
+func (x *Database) GetDefaultLeader() string {
+ if x != nil {
+ return x.DefaultLeader
+ }
+ return ""
+}
+
+func (x *Database) GetDatabaseDialect() DatabaseDialect {
+ if x != nil {
+ return x.DatabaseDialect
+ }
+ return DatabaseDialect_DATABASE_DIALECT_UNSPECIFIED
+}
+
+func (x *Database) GetEnableDropProtection() bool {
+ if x != nil {
+ return x.EnableDropProtection
+ }
+ return false
+}
+
+func (x *Database) GetReconciling() bool {
+ if x != nil {
+ return x.Reconciling
+ }
+ return false
+}
+
+// The request for
+// [ListDatabases][google.spanner.admin.database.v1.DatabaseAdmin.ListDatabases].
+type ListDatabasesRequest struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // Required. The instance whose databases should be listed.
+ // Values are of the form `projects/<project>/instances/<instance>`.
+ Parent string `protobuf:"bytes,1,opt,name=parent,proto3" json:"parent,omitempty"`
+ // Number of databases to be returned in the response. If 0 or less,
+ // defaults to the server's maximum allowed page size.
+ PageSize int32 `protobuf:"varint,3,opt,name=page_size,json=pageSize,proto3" json:"page_size,omitempty"`
+ // If non-empty, `page_token` should contain a
+ // [next_page_token][google.spanner.admin.database.v1.ListDatabasesResponse.next_page_token]
+ // from a previous
+ // [ListDatabasesResponse][google.spanner.admin.database.v1.ListDatabasesResponse].
+ PageToken string `protobuf:"bytes,4,opt,name=page_token,json=pageToken,proto3" json:"page_token,omitempty"`
+}
+
+func (x *ListDatabasesRequest) Reset() {
+ *x = ListDatabasesRequest{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_google_spanner_admin_database_v1_spanner_database_admin_proto_msgTypes[2]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *ListDatabasesRequest) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*ListDatabasesRequest) ProtoMessage() {}
+
+func (x *ListDatabasesRequest) ProtoReflect() protoreflect.Message {
+ mi := &file_google_spanner_admin_database_v1_spanner_database_admin_proto_msgTypes[2]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use ListDatabasesRequest.ProtoReflect.Descriptor instead.
+func (*ListDatabasesRequest) Descriptor() ([]byte, []int) {
+ return file_google_spanner_admin_database_v1_spanner_database_admin_proto_rawDescGZIP(), []int{2}
+}
+
+func (x *ListDatabasesRequest) GetParent() string {
+ if x != nil {
+ return x.Parent
+ }
+ return ""
+}
+
+func (x *ListDatabasesRequest) GetPageSize() int32 {
+ if x != nil {
+ return x.PageSize
+ }
+ return 0
+}
+
+func (x *ListDatabasesRequest) GetPageToken() string {
+ if x != nil {
+ return x.PageToken
+ }
+ return ""
+}
+
+// The response for
+// [ListDatabases][google.spanner.admin.database.v1.DatabaseAdmin.ListDatabases].
+type ListDatabasesResponse struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // Databases that matched the request.
+ Databases []*Database `protobuf:"bytes,1,rep,name=databases,proto3" json:"databases,omitempty"`
+ // `next_page_token` can be sent in a subsequent
+ // [ListDatabases][google.spanner.admin.database.v1.DatabaseAdmin.ListDatabases]
+ // call to fetch more of the matching databases.
+ NextPageToken string `protobuf:"bytes,2,opt,name=next_page_token,json=nextPageToken,proto3" json:"next_page_token,omitempty"`
+}
+
+func (x *ListDatabasesResponse) Reset() {
+ *x = ListDatabasesResponse{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_google_spanner_admin_database_v1_spanner_database_admin_proto_msgTypes[3]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *ListDatabasesResponse) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*ListDatabasesResponse) ProtoMessage() {}
+
+func (x *ListDatabasesResponse) ProtoReflect() protoreflect.Message {
+ mi := &file_google_spanner_admin_database_v1_spanner_database_admin_proto_msgTypes[3]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use ListDatabasesResponse.ProtoReflect.Descriptor instead.
+func (*ListDatabasesResponse) Descriptor() ([]byte, []int) {
+ return file_google_spanner_admin_database_v1_spanner_database_admin_proto_rawDescGZIP(), []int{3}
+}
+
+func (x *ListDatabasesResponse) GetDatabases() []*Database {
+ if x != nil {
+ return x.Databases
+ }
+ return nil
+}
+
+func (x *ListDatabasesResponse) GetNextPageToken() string {
+ if x != nil {
+ return x.NextPageToken
+ }
+ return ""
+}
+
+// The request for
+// [CreateDatabase][google.spanner.admin.database.v1.DatabaseAdmin.CreateDatabase].
+type CreateDatabaseRequest struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // Required. The name of the instance that will serve the new database.
+ // Values are of the form `projects/<project>/instances/<instance>`.
+ Parent string `protobuf:"bytes,1,opt,name=parent,proto3" json:"parent,omitempty"`
+ // Required. A `CREATE DATABASE` statement, which specifies the ID of the
+ // new database. The database ID must conform to the regular expression
+ // `[a-z][a-z0-9_\-]*[a-z0-9]` and be between 2 and 30 characters in length.
+ // If the database ID is a reserved word or if it contains a hyphen, the
+ // database ID must be enclosed in backticks (“ ` “).
+ CreateStatement string `protobuf:"bytes,2,opt,name=create_statement,json=createStatement,proto3" json:"create_statement,omitempty"`
+ // Optional. A list of DDL statements to run inside the newly created
+ // database. Statements can create tables, indexes, etc. These
+ // statements execute atomically with the creation of the database:
+ // if there is an error in any statement, the database is not created.
+ ExtraStatements []string `protobuf:"bytes,3,rep,name=extra_statements,json=extraStatements,proto3" json:"extra_statements,omitempty"`
+ // Optional. The encryption configuration for the database. If this field is
+ // not specified, Cloud Spanner will encrypt/decrypt all data at rest using
+ // Google default encryption.
+ EncryptionConfig *EncryptionConfig `protobuf:"bytes,4,opt,name=encryption_config,json=encryptionConfig,proto3" json:"encryption_config,omitempty"`
+ // Optional. The dialect of the Cloud Spanner Database.
+ DatabaseDialect DatabaseDialect `protobuf:"varint,5,opt,name=database_dialect,json=databaseDialect,proto3,enum=google.spanner.admin.database.v1.DatabaseDialect" json:"database_dialect,omitempty"`
+ // Optional. Proto descriptors used by CREATE/ALTER PROTO BUNDLE statements in
+ // 'extra_statements' above.
+ // Contains a protobuf-serialized
+ // [google.protobuf.FileDescriptorSet](https://github.com/protocolbuffers/protobuf/blob/main/src/google/protobuf/descriptor.proto).
+ // To generate it, [install](https://grpc.io/docs/protoc-installation/) and
+ // run `protoc` with --include_imports and --descriptor_set_out. For example,
+ // to generate for moon/shot/app.proto, run
+ // ```
+ //
+ // $protoc --proto_path=/app_path --proto_path=/lib_path \
+ // --include_imports \
+ // --descriptor_set_out=descriptors.data \
+ // moon/shot/app.proto
+ //
+ // ```
+ // For more details, see protobuffer [self
+ // description](https://developers.google.com/protocol-buffers/docs/techniques#self-description).
+ ProtoDescriptors []byte `protobuf:"bytes,6,opt,name=proto_descriptors,json=protoDescriptors,proto3" json:"proto_descriptors,omitempty"`
+}
+
+func (x *CreateDatabaseRequest) Reset() {
+ *x = CreateDatabaseRequest{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_google_spanner_admin_database_v1_spanner_database_admin_proto_msgTypes[4]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *CreateDatabaseRequest) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*CreateDatabaseRequest) ProtoMessage() {}
+
+func (x *CreateDatabaseRequest) ProtoReflect() protoreflect.Message {
+ mi := &file_google_spanner_admin_database_v1_spanner_database_admin_proto_msgTypes[4]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use CreateDatabaseRequest.ProtoReflect.Descriptor instead.
+func (*CreateDatabaseRequest) Descriptor() ([]byte, []int) {
+ return file_google_spanner_admin_database_v1_spanner_database_admin_proto_rawDescGZIP(), []int{4}
+}
+
+func (x *CreateDatabaseRequest) GetParent() string {
+ if x != nil {
+ return x.Parent
+ }
+ return ""
+}
+
+func (x *CreateDatabaseRequest) GetCreateStatement() string {
+ if x != nil {
+ return x.CreateStatement
+ }
+ return ""
+}
+
+func (x *CreateDatabaseRequest) GetExtraStatements() []string {
+ if x != nil {
+ return x.ExtraStatements
+ }
+ return nil
+}
+
+func (x *CreateDatabaseRequest) GetEncryptionConfig() *EncryptionConfig {
+ if x != nil {
+ return x.EncryptionConfig
+ }
+ return nil
+}
+
+func (x *CreateDatabaseRequest) GetDatabaseDialect() DatabaseDialect {
+ if x != nil {
+ return x.DatabaseDialect
+ }
+ return DatabaseDialect_DATABASE_DIALECT_UNSPECIFIED
+}
+
+func (x *CreateDatabaseRequest) GetProtoDescriptors() []byte {
+ if x != nil {
+ return x.ProtoDescriptors
+ }
+ return nil
+}
+
+// Metadata type for the operation returned by
+// [CreateDatabase][google.spanner.admin.database.v1.DatabaseAdmin.CreateDatabase].
+type CreateDatabaseMetadata struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // The database being created.
+ Database string `protobuf:"bytes,1,opt,name=database,proto3" json:"database,omitempty"`
+}
+
+func (x *CreateDatabaseMetadata) Reset() {
+ *x = CreateDatabaseMetadata{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_google_spanner_admin_database_v1_spanner_database_admin_proto_msgTypes[5]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *CreateDatabaseMetadata) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*CreateDatabaseMetadata) ProtoMessage() {}
+
+func (x *CreateDatabaseMetadata) ProtoReflect() protoreflect.Message {
+ mi := &file_google_spanner_admin_database_v1_spanner_database_admin_proto_msgTypes[5]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use CreateDatabaseMetadata.ProtoReflect.Descriptor instead.
+func (*CreateDatabaseMetadata) Descriptor() ([]byte, []int) {
+ return file_google_spanner_admin_database_v1_spanner_database_admin_proto_rawDescGZIP(), []int{5}
+}
+
+func (x *CreateDatabaseMetadata) GetDatabase() string {
+ if x != nil {
+ return x.Database
+ }
+ return ""
+}
+
+// The request for
+// [GetDatabase][google.spanner.admin.database.v1.DatabaseAdmin.GetDatabase].
+type GetDatabaseRequest struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // Required. The name of the requested database. Values are of the form
+ // `projects/<project>/instances/<instance>/databases/<database>`.
+ Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"`
+}
+
+func (x *GetDatabaseRequest) Reset() {
+ *x = GetDatabaseRequest{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_google_spanner_admin_database_v1_spanner_database_admin_proto_msgTypes[6]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *GetDatabaseRequest) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*GetDatabaseRequest) ProtoMessage() {}
+
+func (x *GetDatabaseRequest) ProtoReflect() protoreflect.Message {
+ mi := &file_google_spanner_admin_database_v1_spanner_database_admin_proto_msgTypes[6]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use GetDatabaseRequest.ProtoReflect.Descriptor instead.
+func (*GetDatabaseRequest) Descriptor() ([]byte, []int) {
+ return file_google_spanner_admin_database_v1_spanner_database_admin_proto_rawDescGZIP(), []int{6}
+}
+
+func (x *GetDatabaseRequest) GetName() string {
+ if x != nil {
+ return x.Name
+ }
+ return ""
+}
+
+// The request for
+// [UpdateDatabase][google.spanner.admin.database.v1.DatabaseAdmin.UpdateDatabase].
+type UpdateDatabaseRequest struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // Required. The database to update.
+ // The `name` field of the database is of the form
+ // `projects/<project>/instances/<instance>/databases/<database>`.
+ Database *Database `protobuf:"bytes,1,opt,name=database,proto3" json:"database,omitempty"`
+ // Required. The list of fields to update. Currently, only
+ // `enable_drop_protection` field can be updated.
+ UpdateMask *fieldmaskpb.FieldMask `protobuf:"bytes,2,opt,name=update_mask,json=updateMask,proto3" json:"update_mask,omitempty"`
+}
+
+func (x *UpdateDatabaseRequest) Reset() {
+ *x = UpdateDatabaseRequest{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_google_spanner_admin_database_v1_spanner_database_admin_proto_msgTypes[7]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *UpdateDatabaseRequest) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*UpdateDatabaseRequest) ProtoMessage() {}
+
+func (x *UpdateDatabaseRequest) ProtoReflect() protoreflect.Message {
+ mi := &file_google_spanner_admin_database_v1_spanner_database_admin_proto_msgTypes[7]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use UpdateDatabaseRequest.ProtoReflect.Descriptor instead.
+func (*UpdateDatabaseRequest) Descriptor() ([]byte, []int) {
+ return file_google_spanner_admin_database_v1_spanner_database_admin_proto_rawDescGZIP(), []int{7}
+}
+
+func (x *UpdateDatabaseRequest) GetDatabase() *Database {
+ if x != nil {
+ return x.Database
+ }
+ return nil
+}
+
+func (x *UpdateDatabaseRequest) GetUpdateMask() *fieldmaskpb.FieldMask {
+ if x != nil {
+ return x.UpdateMask
+ }
+ return nil
+}
+
+// Metadata type for the operation returned by
+// [UpdateDatabase][google.spanner.admin.database.v1.DatabaseAdmin.UpdateDatabase].
+type UpdateDatabaseMetadata struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // The request for
+ // [UpdateDatabase][google.spanner.admin.database.v1.DatabaseAdmin.UpdateDatabase].
+ Request *UpdateDatabaseRequest `protobuf:"bytes,1,opt,name=request,proto3" json:"request,omitempty"`
+ // The progress of the
+ // [UpdateDatabase][google.spanner.admin.database.v1.DatabaseAdmin.UpdateDatabase]
+ // operation.
+ Progress *OperationProgress `protobuf:"bytes,2,opt,name=progress,proto3" json:"progress,omitempty"`
+ // The time at which this operation was cancelled. If set, this operation is
+ // in the process of undoing itself (which is best-effort).
+ CancelTime *timestamppb.Timestamp `protobuf:"bytes,3,opt,name=cancel_time,json=cancelTime,proto3" json:"cancel_time,omitempty"`
+}
+
+func (x *UpdateDatabaseMetadata) Reset() {
+ *x = UpdateDatabaseMetadata{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_google_spanner_admin_database_v1_spanner_database_admin_proto_msgTypes[8]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *UpdateDatabaseMetadata) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*UpdateDatabaseMetadata) ProtoMessage() {}
+
+func (x *UpdateDatabaseMetadata) ProtoReflect() protoreflect.Message {
+ mi := &file_google_spanner_admin_database_v1_spanner_database_admin_proto_msgTypes[8]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use UpdateDatabaseMetadata.ProtoReflect.Descriptor instead.
+func (*UpdateDatabaseMetadata) Descriptor() ([]byte, []int) {
+ return file_google_spanner_admin_database_v1_spanner_database_admin_proto_rawDescGZIP(), []int{8}
+}
+
+func (x *UpdateDatabaseMetadata) GetRequest() *UpdateDatabaseRequest {
+ if x != nil {
+ return x.Request
+ }
+ return nil
+}
+
+func (x *UpdateDatabaseMetadata) GetProgress() *OperationProgress {
+ if x != nil {
+ return x.Progress
+ }
+ return nil
+}
+
+func (x *UpdateDatabaseMetadata) GetCancelTime() *timestamppb.Timestamp {
+ if x != nil {
+ return x.CancelTime
+ }
+ return nil
+}
+
+// Enqueues the given DDL statements to be applied, in order but not
+// necessarily all at once, to the database schema at some point (or
+// points) in the future. The server checks that the statements
+// are executable (syntactically valid, name tables that exist, etc.)
+// before enqueueing them, but they may still fail upon
+// later execution (e.g., if a statement from another batch of
+// statements is applied first and it conflicts in some way, or if
+// there is some data-related problem like a `NULL` value in a column to
+// which `NOT NULL` would be added). If a statement fails, all
+// subsequent statements in the batch are automatically cancelled.
+//
+// Each batch of statements is assigned a name which can be used with
+// the [Operations][google.longrunning.Operations] API to monitor
+// progress. See the
+// [operation_id][google.spanner.admin.database.v1.UpdateDatabaseDdlRequest.operation_id]
+// field for more details.
+type UpdateDatabaseDdlRequest struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // Required. The database to update.
+ Database string `protobuf:"bytes,1,opt,name=database,proto3" json:"database,omitempty"`
+ // Required. DDL statements to be applied to the database.
+ Statements []string `protobuf:"bytes,2,rep,name=statements,proto3" json:"statements,omitempty"`
+ // If empty, the new update request is assigned an
+ // automatically-generated operation ID. Otherwise, `operation_id`
+ // is used to construct the name of the resulting
+ // [Operation][google.longrunning.Operation].
+ //
+ // Specifying an explicit operation ID simplifies determining
+ // whether the statements were executed in the event that the
+ // [UpdateDatabaseDdl][google.spanner.admin.database.v1.DatabaseAdmin.UpdateDatabaseDdl]
+ // call is replayed, or the return value is otherwise lost: the
+ // [database][google.spanner.admin.database.v1.UpdateDatabaseDdlRequest.database]
+ // and `operation_id` fields can be combined to form the
+ // [name][google.longrunning.Operation.name] of the resulting
+ // [longrunning.Operation][google.longrunning.Operation]:
+ // `<database>/operations/<operation_id>`.
+ //
+ // `operation_id` should be unique within the database, and must be
+ // a valid identifier: `[a-z][a-z0-9_]*`. Note that
+ // automatically-generated operation IDs always begin with an
+ // underscore. If the named operation already exists,
+ // [UpdateDatabaseDdl][google.spanner.admin.database.v1.DatabaseAdmin.UpdateDatabaseDdl]
+ // returns `ALREADY_EXISTS`.
+ OperationId string `protobuf:"bytes,3,opt,name=operation_id,json=operationId,proto3" json:"operation_id,omitempty"`
+ // Optional. Proto descriptors used by CREATE/ALTER PROTO BUNDLE statements.
+ // Contains a protobuf-serialized
+ // [google.protobuf.FileDescriptorSet](https://github.com/protocolbuffers/protobuf/blob/main/src/google/protobuf/descriptor.proto).
+ // To generate it, [install](https://grpc.io/docs/protoc-installation/) and
+ // run `protoc` with --include_imports and --descriptor_set_out. For example,
+ // to generate for moon/shot/app.proto, run
+ // ```
+ //
+ // $protoc --proto_path=/app_path --proto_path=/lib_path \
+ // --include_imports \
+ // --descriptor_set_out=descriptors.data \
+ // moon/shot/app.proto
+ //
+ // ```
+ // For more details, see protobuffer [self
+ // description](https://developers.google.com/protocol-buffers/docs/techniques#self-description).
+ ProtoDescriptors []byte `protobuf:"bytes,4,opt,name=proto_descriptors,json=protoDescriptors,proto3" json:"proto_descriptors,omitempty"`
+}
+
+func (x *UpdateDatabaseDdlRequest) Reset() {
+ *x = UpdateDatabaseDdlRequest{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_google_spanner_admin_database_v1_spanner_database_admin_proto_msgTypes[9]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *UpdateDatabaseDdlRequest) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*UpdateDatabaseDdlRequest) ProtoMessage() {}
+
+func (x *UpdateDatabaseDdlRequest) ProtoReflect() protoreflect.Message {
+ mi := &file_google_spanner_admin_database_v1_spanner_database_admin_proto_msgTypes[9]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use UpdateDatabaseDdlRequest.ProtoReflect.Descriptor instead.
+func (*UpdateDatabaseDdlRequest) Descriptor() ([]byte, []int) {
+ return file_google_spanner_admin_database_v1_spanner_database_admin_proto_rawDescGZIP(), []int{9}
+}
+
+func (x *UpdateDatabaseDdlRequest) GetDatabase() string {
+ if x != nil {
+ return x.Database
+ }
+ return ""
+}
+
+func (x *UpdateDatabaseDdlRequest) GetStatements() []string {
+ if x != nil {
+ return x.Statements
+ }
+ return nil
+}
+
+func (x *UpdateDatabaseDdlRequest) GetOperationId() string {
+ if x != nil {
+ return x.OperationId
+ }
+ return ""
+}
+
+func (x *UpdateDatabaseDdlRequest) GetProtoDescriptors() []byte {
+ if x != nil {
+ return x.ProtoDescriptors
+ }
+ return nil
+}
+
+// Action information extracted from a DDL statement. This proto is used to
+// display the brief info of the DDL statement for the operation
+// [UpdateDatabaseDdl][google.spanner.admin.database.v1.DatabaseAdmin.UpdateDatabaseDdl].
+type DdlStatementActionInfo struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // The action for the DDL statement, e.g. CREATE, ALTER, DROP, GRANT, etc.
+ // This field is a non-empty string.
+ Action string `protobuf:"bytes,1,opt,name=action,proto3" json:"action,omitempty"`
+ // The entity type for the DDL statement, e.g. TABLE, INDEX, VIEW, etc.
+ // This field can be empty string for some DDL statement,
+ // e.g. for statement "ANALYZE", `entity_type` = "".
+ EntityType string `protobuf:"bytes,2,opt,name=entity_type,json=entityType,proto3" json:"entity_type,omitempty"`
+ // The entity name(s) being operated on the DDL statement.
+ // E.g.
+ // 1. For statement "CREATE TABLE t1(...)", `entity_names` = ["t1"].
+ // 2. For statement "GRANT ROLE r1, r2 ...", `entity_names` = ["r1", "r2"].
+ // 3. For statement "ANALYZE", `entity_names` = [].
+ EntityNames []string `protobuf:"bytes,3,rep,name=entity_names,json=entityNames,proto3" json:"entity_names,omitempty"`
+}
+
+func (x *DdlStatementActionInfo) Reset() {
+ *x = DdlStatementActionInfo{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_google_spanner_admin_database_v1_spanner_database_admin_proto_msgTypes[10]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *DdlStatementActionInfo) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*DdlStatementActionInfo) ProtoMessage() {}
+
+func (x *DdlStatementActionInfo) ProtoReflect() protoreflect.Message {
+ mi := &file_google_spanner_admin_database_v1_spanner_database_admin_proto_msgTypes[10]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use DdlStatementActionInfo.ProtoReflect.Descriptor instead.
+func (*DdlStatementActionInfo) Descriptor() ([]byte, []int) {
+ return file_google_spanner_admin_database_v1_spanner_database_admin_proto_rawDescGZIP(), []int{10}
+}
+
+func (x *DdlStatementActionInfo) GetAction() string {
+ if x != nil {
+ return x.Action
+ }
+ return ""
+}
+
+func (x *DdlStatementActionInfo) GetEntityType() string {
+ if x != nil {
+ return x.EntityType
+ }
+ return ""
+}
+
+func (x *DdlStatementActionInfo) GetEntityNames() []string {
+ if x != nil {
+ return x.EntityNames
+ }
+ return nil
+}
+
+// Metadata type for the operation returned by
+// [UpdateDatabaseDdl][google.spanner.admin.database.v1.DatabaseAdmin.UpdateDatabaseDdl].
+type UpdateDatabaseDdlMetadata struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // The database being modified.
+ Database string `protobuf:"bytes,1,opt,name=database,proto3" json:"database,omitempty"`
+ // For an update this list contains all the statements. For an
+ // individual statement, this list contains only that statement.
+ Statements []string `protobuf:"bytes,2,rep,name=statements,proto3" json:"statements,omitempty"`
+ // Reports the commit timestamps of all statements that have
+ // succeeded so far, where `commit_timestamps[i]` is the commit
+ // timestamp for the statement `statements[i]`.
+ CommitTimestamps []*timestamppb.Timestamp `protobuf:"bytes,3,rep,name=commit_timestamps,json=commitTimestamps,proto3" json:"commit_timestamps,omitempty"`
+ // Output only. When true, indicates that the operation is throttled e.g.
+ // due to resource constraints. When resources become available the operation
+ // will resume and this field will be false again.
+ Throttled bool `protobuf:"varint,4,opt,name=throttled,proto3" json:"throttled,omitempty"`
+ // The progress of the
+ // [UpdateDatabaseDdl][google.spanner.admin.database.v1.DatabaseAdmin.UpdateDatabaseDdl]
+ // operations. All DDL statements will have continuously updating progress,
+ // and `progress[i]` is the operation progress for `statements[i]`. Also,
+ // `progress[i]` will have start time and end time populated with commit
+ // timestamp of operation, as well as a progress of 100% once the operation
+ // has completed.
+ Progress []*OperationProgress `protobuf:"bytes,5,rep,name=progress,proto3" json:"progress,omitempty"`
+ // The brief action info for the DDL statements.
+ // `actions[i]` is the brief info for `statements[i]`.
+ Actions []*DdlStatementActionInfo `protobuf:"bytes,6,rep,name=actions,proto3" json:"actions,omitempty"`
+}
+
+func (x *UpdateDatabaseDdlMetadata) Reset() {
+ *x = UpdateDatabaseDdlMetadata{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_google_spanner_admin_database_v1_spanner_database_admin_proto_msgTypes[11]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *UpdateDatabaseDdlMetadata) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*UpdateDatabaseDdlMetadata) ProtoMessage() {}
+
+func (x *UpdateDatabaseDdlMetadata) ProtoReflect() protoreflect.Message {
+ mi := &file_google_spanner_admin_database_v1_spanner_database_admin_proto_msgTypes[11]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use UpdateDatabaseDdlMetadata.ProtoReflect.Descriptor instead.
+func (*UpdateDatabaseDdlMetadata) Descriptor() ([]byte, []int) {
+ return file_google_spanner_admin_database_v1_spanner_database_admin_proto_rawDescGZIP(), []int{11}
+}
+
+func (x *UpdateDatabaseDdlMetadata) GetDatabase() string {
+ if x != nil {
+ return x.Database
+ }
+ return ""
+}
+
+func (x *UpdateDatabaseDdlMetadata) GetStatements() []string {
+ if x != nil {
+ return x.Statements
+ }
+ return nil
+}
+
+func (x *UpdateDatabaseDdlMetadata) GetCommitTimestamps() []*timestamppb.Timestamp {
+ if x != nil {
+ return x.CommitTimestamps
+ }
+ return nil
+}
+
+func (x *UpdateDatabaseDdlMetadata) GetThrottled() bool {
+ if x != nil {
+ return x.Throttled
+ }
+ return false
+}
+
+func (x *UpdateDatabaseDdlMetadata) GetProgress() []*OperationProgress {
+ if x != nil {
+ return x.Progress
+ }
+ return nil
+}
+
+func (x *UpdateDatabaseDdlMetadata) GetActions() []*DdlStatementActionInfo {
+ if x != nil {
+ return x.Actions
+ }
+ return nil
+}
+
+// The request for
+// [DropDatabase][google.spanner.admin.database.v1.DatabaseAdmin.DropDatabase].
+type DropDatabaseRequest struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // Required. The database to be dropped.
+ Database string `protobuf:"bytes,1,opt,name=database,proto3" json:"database,omitempty"`
+}
+
+func (x *DropDatabaseRequest) Reset() {
+ *x = DropDatabaseRequest{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_google_spanner_admin_database_v1_spanner_database_admin_proto_msgTypes[12]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *DropDatabaseRequest) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*DropDatabaseRequest) ProtoMessage() {}
+
+func (x *DropDatabaseRequest) ProtoReflect() protoreflect.Message {
+ mi := &file_google_spanner_admin_database_v1_spanner_database_admin_proto_msgTypes[12]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use DropDatabaseRequest.ProtoReflect.Descriptor instead.
+func (*DropDatabaseRequest) Descriptor() ([]byte, []int) {
+ return file_google_spanner_admin_database_v1_spanner_database_admin_proto_rawDescGZIP(), []int{12}
+}
+
+func (x *DropDatabaseRequest) GetDatabase() string {
+ if x != nil {
+ return x.Database
+ }
+ return ""
+}
+
+// The request for
+// [GetDatabaseDdl][google.spanner.admin.database.v1.DatabaseAdmin.GetDatabaseDdl].
+type GetDatabaseDdlRequest struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // Required. The database whose schema we wish to get.
+ // Values are of the form
+ // `projects/<project>/instances/<instance>/databases/<database>`
+ Database string `protobuf:"bytes,1,opt,name=database,proto3" json:"database,omitempty"`
+}
+
+func (x *GetDatabaseDdlRequest) Reset() {
+ *x = GetDatabaseDdlRequest{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_google_spanner_admin_database_v1_spanner_database_admin_proto_msgTypes[13]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *GetDatabaseDdlRequest) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*GetDatabaseDdlRequest) ProtoMessage() {}
+
+func (x *GetDatabaseDdlRequest) ProtoReflect() protoreflect.Message {
+ mi := &file_google_spanner_admin_database_v1_spanner_database_admin_proto_msgTypes[13]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use GetDatabaseDdlRequest.ProtoReflect.Descriptor instead.
+func (*GetDatabaseDdlRequest) Descriptor() ([]byte, []int) {
+ return file_google_spanner_admin_database_v1_spanner_database_admin_proto_rawDescGZIP(), []int{13}
+}
+
+func (x *GetDatabaseDdlRequest) GetDatabase() string {
+ if x != nil {
+ return x.Database
+ }
+ return ""
+}
+
+// The response for
+// [GetDatabaseDdl][google.spanner.admin.database.v1.DatabaseAdmin.GetDatabaseDdl].
+type GetDatabaseDdlResponse struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // A list of formatted DDL statements defining the schema of the database
+ // specified in the request.
+ Statements []string `protobuf:"bytes,1,rep,name=statements,proto3" json:"statements,omitempty"`
+ // Proto descriptors stored in the database.
+ // Contains a protobuf-serialized
+ // [google.protobuf.FileDescriptorSet](https://github.com/protocolbuffers/protobuf/blob/main/src/google/protobuf/descriptor.proto).
+ // For more details, see protobuffer [self
+ // description](https://developers.google.com/protocol-buffers/docs/techniques#self-description).
+ ProtoDescriptors []byte `protobuf:"bytes,2,opt,name=proto_descriptors,json=protoDescriptors,proto3" json:"proto_descriptors,omitempty"`
+}
+
+func (x *GetDatabaseDdlResponse) Reset() {
+ *x = GetDatabaseDdlResponse{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_google_spanner_admin_database_v1_spanner_database_admin_proto_msgTypes[14]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *GetDatabaseDdlResponse) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*GetDatabaseDdlResponse) ProtoMessage() {}
+
+func (x *GetDatabaseDdlResponse) ProtoReflect() protoreflect.Message {
+ mi := &file_google_spanner_admin_database_v1_spanner_database_admin_proto_msgTypes[14]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use GetDatabaseDdlResponse.ProtoReflect.Descriptor instead.
+func (*GetDatabaseDdlResponse) Descriptor() ([]byte, []int) {
+ return file_google_spanner_admin_database_v1_spanner_database_admin_proto_rawDescGZIP(), []int{14}
+}
+
+func (x *GetDatabaseDdlResponse) GetStatements() []string {
+ if x != nil {
+ return x.Statements
+ }
+ return nil
+}
+
+func (x *GetDatabaseDdlResponse) GetProtoDescriptors() []byte {
+ if x != nil {
+ return x.ProtoDescriptors
+ }
+ return nil
+}
+
+// The request for
+// [ListDatabaseOperations][google.spanner.admin.database.v1.DatabaseAdmin.ListDatabaseOperations].
+type ListDatabaseOperationsRequest struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // Required. The instance of the database operations.
+ // Values are of the form `projects/<project>/instances/<instance>`.
+ Parent string `protobuf:"bytes,1,opt,name=parent,proto3" json:"parent,omitempty"`
+ // An expression that filters the list of returned operations.
+ //
+ // A filter expression consists of a field name, a
+ // comparison operator, and a value for filtering.
+ // The value must be a string, a number, or a boolean. The comparison operator
+ // must be one of: `<`, `>`, `<=`, `>=`, `!=`, `=`, or `:`.
+ // Colon `:` is the contains operator. Filter rules are not case sensitive.
+ //
+ // The following fields in the [Operation][google.longrunning.Operation]
+ // are eligible for filtering:
+ //
+ // - `name` - The name of the long-running operation
+ // - `done` - False if the operation is in progress, else true.
+ // - `metadata.@type` - the type of metadata. For example, the type string
+ // for
+ // [RestoreDatabaseMetadata][google.spanner.admin.database.v1.RestoreDatabaseMetadata]
+ // is
+ // `type.googleapis.com/google.spanner.admin.database.v1.RestoreDatabaseMetadata`.
+ // - `metadata.<field_name>` - any field in metadata.value.
+ // `metadata.@type` must be specified first, if filtering on metadata
+ // fields.
+ // - `error` - Error associated with the long-running operation.
+ // - `response.@type` - the type of response.
+ // - `response.<field_name>` - any field in response.value.
+ //
+ // You can combine multiple expressions by enclosing each expression in
+ // parentheses. By default, expressions are combined with AND logic. However,
+ // you can specify AND, OR, and NOT logic explicitly.
+ //
+ // Here are a few examples:
+ //
+ // - `done:true` - The operation is complete.
+ // - `(metadata.@type=type.googleapis.com/google.spanner.admin.database.v1.RestoreDatabaseMetadata) AND` \
+ // `(metadata.source_type:BACKUP) AND` \
+ // `(metadata.backup_info.backup:backup_howl) AND` \
+ // `(metadata.name:restored_howl) AND` \
+ // `(metadata.progress.start_time < \"2018-03-28T14:50:00Z\") AND` \
+ // `(error:*)` - Return operations where:
+ // - The operation's metadata type is
+ // [RestoreDatabaseMetadata][google.spanner.admin.database.v1.RestoreDatabaseMetadata].
+ // - The database is restored from a backup.
+ // - The backup name contains "backup_howl".
+ // - The restored database's name contains "restored_howl".
+ // - The operation started before 2018-03-28T14:50:00Z.
+ // - The operation resulted in an error.
+ Filter string `protobuf:"bytes,2,opt,name=filter,proto3" json:"filter,omitempty"`
+ // Number of operations to be returned in the response. If 0 or
+ // less, defaults to the server's maximum allowed page size.
+ PageSize int32 `protobuf:"varint,3,opt,name=page_size,json=pageSize,proto3" json:"page_size,omitempty"`
+ // If non-empty, `page_token` should contain a
+ // [next_page_token][google.spanner.admin.database.v1.ListDatabaseOperationsResponse.next_page_token]
+ // from a previous
+ // [ListDatabaseOperationsResponse][google.spanner.admin.database.v1.ListDatabaseOperationsResponse]
+ // to the same `parent` and with the same `filter`.
+ PageToken string `protobuf:"bytes,4,opt,name=page_token,json=pageToken,proto3" json:"page_token,omitempty"`
+}
+
+func (x *ListDatabaseOperationsRequest) Reset() {
+ *x = ListDatabaseOperationsRequest{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_google_spanner_admin_database_v1_spanner_database_admin_proto_msgTypes[15]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *ListDatabaseOperationsRequest) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*ListDatabaseOperationsRequest) ProtoMessage() {}
+
+func (x *ListDatabaseOperationsRequest) ProtoReflect() protoreflect.Message {
+ mi := &file_google_spanner_admin_database_v1_spanner_database_admin_proto_msgTypes[15]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use ListDatabaseOperationsRequest.ProtoReflect.Descriptor instead.
+func (*ListDatabaseOperationsRequest) Descriptor() ([]byte, []int) {
+ return file_google_spanner_admin_database_v1_spanner_database_admin_proto_rawDescGZIP(), []int{15}
+}
+
+func (x *ListDatabaseOperationsRequest) GetParent() string {
+ if x != nil {
+ return x.Parent
+ }
+ return ""
+}
+
+func (x *ListDatabaseOperationsRequest) GetFilter() string {
+ if x != nil {
+ return x.Filter
+ }
+ return ""
+}
+
+func (x *ListDatabaseOperationsRequest) GetPageSize() int32 {
+ if x != nil {
+ return x.PageSize
+ }
+ return 0
+}
+
+func (x *ListDatabaseOperationsRequest) GetPageToken() string {
+ if x != nil {
+ return x.PageToken
+ }
+ return ""
+}
+
+// The response for
+// [ListDatabaseOperations][google.spanner.admin.database.v1.DatabaseAdmin.ListDatabaseOperations].
+type ListDatabaseOperationsResponse struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // The list of matching database [long-running
+ // operations][google.longrunning.Operation]. Each operation's name will be
+ // prefixed by the database's name. The operation's
+ // [metadata][google.longrunning.Operation.metadata] field type
+ // `metadata.type_url` describes the type of the metadata.
+ Operations []*longrunningpb.Operation `protobuf:"bytes,1,rep,name=operations,proto3" json:"operations,omitempty"`
+ // `next_page_token` can be sent in a subsequent
+ // [ListDatabaseOperations][google.spanner.admin.database.v1.DatabaseAdmin.ListDatabaseOperations]
+ // call to fetch more of the matching metadata.
+ NextPageToken string `protobuf:"bytes,2,opt,name=next_page_token,json=nextPageToken,proto3" json:"next_page_token,omitempty"`
+}
+
+func (x *ListDatabaseOperationsResponse) Reset() {
+ *x = ListDatabaseOperationsResponse{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_google_spanner_admin_database_v1_spanner_database_admin_proto_msgTypes[16]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *ListDatabaseOperationsResponse) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*ListDatabaseOperationsResponse) ProtoMessage() {}
+
+func (x *ListDatabaseOperationsResponse) ProtoReflect() protoreflect.Message {
+ mi := &file_google_spanner_admin_database_v1_spanner_database_admin_proto_msgTypes[16]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use ListDatabaseOperationsResponse.ProtoReflect.Descriptor instead.
+func (*ListDatabaseOperationsResponse) Descriptor() ([]byte, []int) {
+ return file_google_spanner_admin_database_v1_spanner_database_admin_proto_rawDescGZIP(), []int{16}
+}
+
+func (x *ListDatabaseOperationsResponse) GetOperations() []*longrunningpb.Operation {
+ if x != nil {
+ return x.Operations
+ }
+ return nil
+}
+
+func (x *ListDatabaseOperationsResponse) GetNextPageToken() string {
+ if x != nil {
+ return x.NextPageToken
+ }
+ return ""
+}
+
+// The request for
+// [RestoreDatabase][google.spanner.admin.database.v1.DatabaseAdmin.RestoreDatabase].
+type RestoreDatabaseRequest struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // Required. The name of the instance in which to create the
+ // restored database. This instance must be in the same project and
+ // have the same instance configuration as the instance containing
+ // the source backup. Values are of the form
+ // `projects/<project>/instances/<instance>`.
+ Parent string `protobuf:"bytes,1,opt,name=parent,proto3" json:"parent,omitempty"`
+ // Required. The id of the database to create and restore to. This
+ // database must not already exist. The `database_id` appended to
+ // `parent` forms the full database name of the form
+ // `projects/<project>/instances/<instance>/databases/<database_id>`.
+ DatabaseId string `protobuf:"bytes,2,opt,name=database_id,json=databaseId,proto3" json:"database_id,omitempty"`
+ // Required. The source from which to restore.
+ //
+ // Types that are assignable to Source:
+ //
+ // *RestoreDatabaseRequest_Backup
+ Source isRestoreDatabaseRequest_Source `protobuf_oneof:"source"`
+ // Optional. An encryption configuration describing the encryption type and
+ // key resources in Cloud KMS used to encrypt/decrypt the database to restore
+ // to. If this field is not specified, the restored database will use the same
+ // encryption configuration as the backup by default, namely
+ // [encryption_type][google.spanner.admin.database.v1.RestoreDatabaseEncryptionConfig.encryption_type]
+ // = `USE_CONFIG_DEFAULT_OR_BACKUP_ENCRYPTION`.
+ EncryptionConfig *RestoreDatabaseEncryptionConfig `protobuf:"bytes,4,opt,name=encryption_config,json=encryptionConfig,proto3" json:"encryption_config,omitempty"`
+}
+
+func (x *RestoreDatabaseRequest) Reset() {
+ *x = RestoreDatabaseRequest{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_google_spanner_admin_database_v1_spanner_database_admin_proto_msgTypes[17]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *RestoreDatabaseRequest) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*RestoreDatabaseRequest) ProtoMessage() {}
+
+func (x *RestoreDatabaseRequest) ProtoReflect() protoreflect.Message {
+ mi := &file_google_spanner_admin_database_v1_spanner_database_admin_proto_msgTypes[17]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use RestoreDatabaseRequest.ProtoReflect.Descriptor instead.
+func (*RestoreDatabaseRequest) Descriptor() ([]byte, []int) {
+ return file_google_spanner_admin_database_v1_spanner_database_admin_proto_rawDescGZIP(), []int{17}
+}
+
+func (x *RestoreDatabaseRequest) GetParent() string {
+ if x != nil {
+ return x.Parent
+ }
+ return ""
+}
+
+func (x *RestoreDatabaseRequest) GetDatabaseId() string {
+ if x != nil {
+ return x.DatabaseId
+ }
+ return ""
+}
+
+func (m *RestoreDatabaseRequest) GetSource() isRestoreDatabaseRequest_Source {
+ if m != nil {
+ return m.Source
+ }
+ return nil
+}
+
+func (x *RestoreDatabaseRequest) GetBackup() string {
+ if x, ok := x.GetSource().(*RestoreDatabaseRequest_Backup); ok {
+ return x.Backup
+ }
+ return ""
+}
+
+func (x *RestoreDatabaseRequest) GetEncryptionConfig() *RestoreDatabaseEncryptionConfig {
+ if x != nil {
+ return x.EncryptionConfig
+ }
+ return nil
+}
+
+type isRestoreDatabaseRequest_Source interface {
+ isRestoreDatabaseRequest_Source()
+}
+
+type RestoreDatabaseRequest_Backup struct {
+ // Name of the backup from which to restore. Values are of the form
+ // `projects/<project>/instances/<instance>/backups/<backup>`.
+ Backup string `protobuf:"bytes,3,opt,name=backup,proto3,oneof"`
+}
+
+func (*RestoreDatabaseRequest_Backup) isRestoreDatabaseRequest_Source() {}
+
+// Encryption configuration for the restored database.
+type RestoreDatabaseEncryptionConfig struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // Required. The encryption type of the restored database.
+ EncryptionType RestoreDatabaseEncryptionConfig_EncryptionType `protobuf:"varint,1,opt,name=encryption_type,json=encryptionType,proto3,enum=google.spanner.admin.database.v1.RestoreDatabaseEncryptionConfig_EncryptionType" json:"encryption_type,omitempty"`
+ // Optional. The Cloud KMS key that will be used to encrypt/decrypt the
+ // restored database. This field should be set only when
+ // [encryption_type][google.spanner.admin.database.v1.RestoreDatabaseEncryptionConfig.encryption_type]
+ // is `CUSTOMER_MANAGED_ENCRYPTION`. Values are of the form
+ // `projects/<project>/locations/<location>/keyRings/<key_ring>/cryptoKeys/<kms_key_name>`.
+ KmsKeyName string `protobuf:"bytes,2,opt,name=kms_key_name,json=kmsKeyName,proto3" json:"kms_key_name,omitempty"`
+ // Optional. Specifies the KMS configuration for the one or more keys used to
+ // encrypt the database. Values are of the form
+ // `projects/<project>/locations/<location>/keyRings/<key_ring>/cryptoKeys/<kms_key_name>`.
+ //
+ // The keys referenced by kms_key_names must fully cover all
+ // regions of the database instance configuration. Some examples:
+ // * For single region database instance configs, specify a single regional
+ // location KMS key.
+ // * For multi-regional database instance configs of type GOOGLE_MANAGED,
+ // either specify a multi-regional location KMS key or multiple regional
+ // location KMS keys that cover all regions in the instance config.
+ // * For a database instance config of type USER_MANAGED, please specify only
+ // regional location KMS keys to cover each region in the instance config.
+ // Multi-regional location KMS keys are not supported for USER_MANAGED
+ // instance configs.
+ KmsKeyNames []string `protobuf:"bytes,3,rep,name=kms_key_names,json=kmsKeyNames,proto3" json:"kms_key_names,omitempty"`
+}
+
+func (x *RestoreDatabaseEncryptionConfig) Reset() {
+ *x = RestoreDatabaseEncryptionConfig{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_google_spanner_admin_database_v1_spanner_database_admin_proto_msgTypes[18]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *RestoreDatabaseEncryptionConfig) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*RestoreDatabaseEncryptionConfig) ProtoMessage() {}
+
+func (x *RestoreDatabaseEncryptionConfig) ProtoReflect() protoreflect.Message {
+ mi := &file_google_spanner_admin_database_v1_spanner_database_admin_proto_msgTypes[18]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use RestoreDatabaseEncryptionConfig.ProtoReflect.Descriptor instead.
+func (*RestoreDatabaseEncryptionConfig) Descriptor() ([]byte, []int) {
+ return file_google_spanner_admin_database_v1_spanner_database_admin_proto_rawDescGZIP(), []int{18}
+}
+
+func (x *RestoreDatabaseEncryptionConfig) GetEncryptionType() RestoreDatabaseEncryptionConfig_EncryptionType {
+ if x != nil {
+ return x.EncryptionType
+ }
+ return RestoreDatabaseEncryptionConfig_ENCRYPTION_TYPE_UNSPECIFIED
+}
+
+func (x *RestoreDatabaseEncryptionConfig) GetKmsKeyName() string {
+ if x != nil {
+ return x.KmsKeyName
+ }
+ return ""
+}
+
+func (x *RestoreDatabaseEncryptionConfig) GetKmsKeyNames() []string {
+ if x != nil {
+ return x.KmsKeyNames
+ }
+ return nil
+}
+
+// Metadata type for the long-running operation returned by
+// [RestoreDatabase][google.spanner.admin.database.v1.DatabaseAdmin.RestoreDatabase].
+type RestoreDatabaseMetadata struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // Name of the database being created and restored to.
+ Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"`
+ // The type of the restore source.
+ SourceType RestoreSourceType `protobuf:"varint,2,opt,name=source_type,json=sourceType,proto3,enum=google.spanner.admin.database.v1.RestoreSourceType" json:"source_type,omitempty"`
+ // Information about the source used to restore the database, as specified by
+ // `source` in
+ // [RestoreDatabaseRequest][google.spanner.admin.database.v1.RestoreDatabaseRequest].
+ //
+ // Types that are assignable to SourceInfo:
+ //
+ // *RestoreDatabaseMetadata_BackupInfo
+ SourceInfo isRestoreDatabaseMetadata_SourceInfo `protobuf_oneof:"source_info"`
+ // The progress of the
+ // [RestoreDatabase][google.spanner.admin.database.v1.DatabaseAdmin.RestoreDatabase]
+ // operation.
+ Progress *OperationProgress `protobuf:"bytes,4,opt,name=progress,proto3" json:"progress,omitempty"`
+ // The time at which cancellation of this operation was received.
+ // [Operations.CancelOperation][google.longrunning.Operations.CancelOperation]
+ // starts asynchronous cancellation on a long-running operation. The server
+ // makes a best effort to cancel the operation, but success is not guaranteed.
+ // Clients can use
+ // [Operations.GetOperation][google.longrunning.Operations.GetOperation] or
+ // other methods to check whether the cancellation succeeded or whether the
+ // operation completed despite cancellation. On successful cancellation,
+ // the operation is not deleted; instead, it becomes an operation with
+ // an [Operation.error][google.longrunning.Operation.error] value with a
+ // [google.rpc.Status.code][google.rpc.Status.code] of 1, corresponding to
+ // `Code.CANCELLED`.
+ CancelTime *timestamppb.Timestamp `protobuf:"bytes,5,opt,name=cancel_time,json=cancelTime,proto3" json:"cancel_time,omitempty"`
+ // If exists, the name of the long-running operation that will be used to
+ // track the post-restore optimization process to optimize the performance of
+ // the restored database, and remove the dependency on the restore source.
+ // The name is of the form
+ // `projects/<project>/instances/<instance>/databases/<database>/operations/<operation>`
+ // where the <database> is the name of database being created and restored to.
+ // The metadata type of the long-running operation is
+ // [OptimizeRestoredDatabaseMetadata][google.spanner.admin.database.v1.OptimizeRestoredDatabaseMetadata].
+ // This long-running operation will be automatically created by the system
+ // after the RestoreDatabase long-running operation completes successfully.
+ // This operation will not be created if the restore was not successful.
+ OptimizeDatabaseOperationName string `protobuf:"bytes,6,opt,name=optimize_database_operation_name,json=optimizeDatabaseOperationName,proto3" json:"optimize_database_operation_name,omitempty"`
+}
+
+func (x *RestoreDatabaseMetadata) Reset() {
+ *x = RestoreDatabaseMetadata{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_google_spanner_admin_database_v1_spanner_database_admin_proto_msgTypes[19]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *RestoreDatabaseMetadata) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*RestoreDatabaseMetadata) ProtoMessage() {}
+
+func (x *RestoreDatabaseMetadata) ProtoReflect() protoreflect.Message {
+ mi := &file_google_spanner_admin_database_v1_spanner_database_admin_proto_msgTypes[19]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use RestoreDatabaseMetadata.ProtoReflect.Descriptor instead.
+func (*RestoreDatabaseMetadata) Descriptor() ([]byte, []int) {
+ return file_google_spanner_admin_database_v1_spanner_database_admin_proto_rawDescGZIP(), []int{19}
+}
+
+func (x *RestoreDatabaseMetadata) GetName() string {
+ if x != nil {
+ return x.Name
+ }
+ return ""
+}
+
+func (x *RestoreDatabaseMetadata) GetSourceType() RestoreSourceType {
+ if x != nil {
+ return x.SourceType
+ }
+ return RestoreSourceType_TYPE_UNSPECIFIED
+}
+
+func (m *RestoreDatabaseMetadata) GetSourceInfo() isRestoreDatabaseMetadata_SourceInfo {
+ if m != nil {
+ return m.SourceInfo
+ }
+ return nil
+}
+
+func (x *RestoreDatabaseMetadata) GetBackupInfo() *BackupInfo {
+ if x, ok := x.GetSourceInfo().(*RestoreDatabaseMetadata_BackupInfo); ok {
+ return x.BackupInfo
+ }
+ return nil
+}
+
+func (x *RestoreDatabaseMetadata) GetProgress() *OperationProgress {
+ if x != nil {
+ return x.Progress
+ }
+ return nil
+}
+
+func (x *RestoreDatabaseMetadata) GetCancelTime() *timestamppb.Timestamp {
+ if x != nil {
+ return x.CancelTime
+ }
+ return nil
+}
+
+func (x *RestoreDatabaseMetadata) GetOptimizeDatabaseOperationName() string {
+ if x != nil {
+ return x.OptimizeDatabaseOperationName
+ }
+ return ""
+}
+
+type isRestoreDatabaseMetadata_SourceInfo interface {
+ isRestoreDatabaseMetadata_SourceInfo()
+}
+
+type RestoreDatabaseMetadata_BackupInfo struct {
+ // Information about the backup used to restore the database.
+ BackupInfo *BackupInfo `protobuf:"bytes,3,opt,name=backup_info,json=backupInfo,proto3,oneof"`
+}
+
+func (*RestoreDatabaseMetadata_BackupInfo) isRestoreDatabaseMetadata_SourceInfo() {}
+
+// Metadata type for the long-running operation used to track the progress
+// of optimizations performed on a newly restored database. This long-running
+// operation is automatically created by the system after the successful
+// completion of a database restore, and cannot be cancelled.
+type OptimizeRestoredDatabaseMetadata struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // Name of the restored database being optimized.
+ Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"`
+ // The progress of the post-restore optimizations.
+ Progress *OperationProgress `protobuf:"bytes,2,opt,name=progress,proto3" json:"progress,omitempty"`
+}
+
+func (x *OptimizeRestoredDatabaseMetadata) Reset() {
+ *x = OptimizeRestoredDatabaseMetadata{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_google_spanner_admin_database_v1_spanner_database_admin_proto_msgTypes[20]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *OptimizeRestoredDatabaseMetadata) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*OptimizeRestoredDatabaseMetadata) ProtoMessage() {}
+
+func (x *OptimizeRestoredDatabaseMetadata) ProtoReflect() protoreflect.Message {
+ mi := &file_google_spanner_admin_database_v1_spanner_database_admin_proto_msgTypes[20]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use OptimizeRestoredDatabaseMetadata.ProtoReflect.Descriptor instead.
+func (*OptimizeRestoredDatabaseMetadata) Descriptor() ([]byte, []int) {
+ return file_google_spanner_admin_database_v1_spanner_database_admin_proto_rawDescGZIP(), []int{20}
+}
+
+func (x *OptimizeRestoredDatabaseMetadata) GetName() string {
+ if x != nil {
+ return x.Name
+ }
+ return ""
+}
+
+func (x *OptimizeRestoredDatabaseMetadata) GetProgress() *OperationProgress {
+ if x != nil {
+ return x.Progress
+ }
+ return nil
+}
+
+// A Cloud Spanner database role.
+type DatabaseRole struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // Required. The name of the database role. Values are of the form
+ // `projects/<project>/instances/<instance>/databases/<database>/databaseRoles/<role>`
+ // where `<role>` is as specified in the `CREATE ROLE` DDL statement.
+ Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"`
+}
+
+func (x *DatabaseRole) Reset() {
+ *x = DatabaseRole{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_google_spanner_admin_database_v1_spanner_database_admin_proto_msgTypes[21]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *DatabaseRole) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*DatabaseRole) ProtoMessage() {}
+
+func (x *DatabaseRole) ProtoReflect() protoreflect.Message {
+ mi := &file_google_spanner_admin_database_v1_spanner_database_admin_proto_msgTypes[21]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use DatabaseRole.ProtoReflect.Descriptor instead.
+func (*DatabaseRole) Descriptor() ([]byte, []int) {
+ return file_google_spanner_admin_database_v1_spanner_database_admin_proto_rawDescGZIP(), []int{21}
+}
+
+func (x *DatabaseRole) GetName() string {
+ if x != nil {
+ return x.Name
+ }
+ return ""
+}
+
+// The request for
+// [ListDatabaseRoles][google.spanner.admin.database.v1.DatabaseAdmin.ListDatabaseRoles].
+type ListDatabaseRolesRequest struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // Required. The database whose roles should be listed.
+ // Values are of the form
+ // `projects/<project>/instances/<instance>/databases/<database>`.
+ Parent string `protobuf:"bytes,1,opt,name=parent,proto3" json:"parent,omitempty"`
+ // Number of database roles to be returned in the response. If 0 or less,
+ // defaults to the server's maximum allowed page size.
+ PageSize int32 `protobuf:"varint,2,opt,name=page_size,json=pageSize,proto3" json:"page_size,omitempty"`
+ // If non-empty, `page_token` should contain a
+ // [next_page_token][google.spanner.admin.database.v1.ListDatabaseRolesResponse.next_page_token]
+ // from a previous
+ // [ListDatabaseRolesResponse][google.spanner.admin.database.v1.ListDatabaseRolesResponse].
+ PageToken string `protobuf:"bytes,3,opt,name=page_token,json=pageToken,proto3" json:"page_token,omitempty"`
+}
+
+func (x *ListDatabaseRolesRequest) Reset() {
+ *x = ListDatabaseRolesRequest{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_google_spanner_admin_database_v1_spanner_database_admin_proto_msgTypes[22]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *ListDatabaseRolesRequest) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*ListDatabaseRolesRequest) ProtoMessage() {}
+
+func (x *ListDatabaseRolesRequest) ProtoReflect() protoreflect.Message {
+ mi := &file_google_spanner_admin_database_v1_spanner_database_admin_proto_msgTypes[22]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use ListDatabaseRolesRequest.ProtoReflect.Descriptor instead.
+func (*ListDatabaseRolesRequest) Descriptor() ([]byte, []int) {
+ return file_google_spanner_admin_database_v1_spanner_database_admin_proto_rawDescGZIP(), []int{22}
+}
+
+func (x *ListDatabaseRolesRequest) GetParent() string {
+ if x != nil {
+ return x.Parent
+ }
+ return ""
+}
+
+func (x *ListDatabaseRolesRequest) GetPageSize() int32 {
+ if x != nil {
+ return x.PageSize
+ }
+ return 0
+}
+
+func (x *ListDatabaseRolesRequest) GetPageToken() string {
+ if x != nil {
+ return x.PageToken
+ }
+ return ""
+}
+
+// The response for
+// [ListDatabaseRoles][google.spanner.admin.database.v1.DatabaseAdmin.ListDatabaseRoles].
+type ListDatabaseRolesResponse struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // Database roles that matched the request.
+ DatabaseRoles []*DatabaseRole `protobuf:"bytes,1,rep,name=database_roles,json=databaseRoles,proto3" json:"database_roles,omitempty"`
+ // `next_page_token` can be sent in a subsequent
+ // [ListDatabaseRoles][google.spanner.admin.database.v1.DatabaseAdmin.ListDatabaseRoles]
+ // call to fetch more of the matching roles.
+ NextPageToken string `protobuf:"bytes,2,opt,name=next_page_token,json=nextPageToken,proto3" json:"next_page_token,omitempty"`
+}
+
+func (x *ListDatabaseRolesResponse) Reset() {
+ *x = ListDatabaseRolesResponse{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_google_spanner_admin_database_v1_spanner_database_admin_proto_msgTypes[23]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *ListDatabaseRolesResponse) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*ListDatabaseRolesResponse) ProtoMessage() {}
+
+func (x *ListDatabaseRolesResponse) ProtoReflect() protoreflect.Message {
+ mi := &file_google_spanner_admin_database_v1_spanner_database_admin_proto_msgTypes[23]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use ListDatabaseRolesResponse.ProtoReflect.Descriptor instead.
+func (*ListDatabaseRolesResponse) Descriptor() ([]byte, []int) {
+ return file_google_spanner_admin_database_v1_spanner_database_admin_proto_rawDescGZIP(), []int{23}
+}
+
+func (x *ListDatabaseRolesResponse) GetDatabaseRoles() []*DatabaseRole {
+ if x != nil {
+ return x.DatabaseRoles
+ }
+ return nil
+}
+
+func (x *ListDatabaseRolesResponse) GetNextPageToken() string {
+ if x != nil {
+ return x.NextPageToken
+ }
+ return ""
+}
+
+var File_google_spanner_admin_database_v1_spanner_database_admin_proto protoreflect.FileDescriptor
+
+var file_google_spanner_admin_database_v1_spanner_database_admin_proto_rawDesc = []byte{
+ 0x0a, 0x3d, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x73, 0x70, 0x61, 0x6e, 0x6e, 0x65, 0x72,
+ 0x2f, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2f, 0x64, 0x61, 0x74, 0x61, 0x62, 0x61, 0x73, 0x65, 0x2f,
+ 0x76, 0x31, 0x2f, 0x73, 0x70, 0x61, 0x6e, 0x6e, 0x65, 0x72, 0x5f, 0x64, 0x61, 0x74, 0x61, 0x62,
+ 0x61, 0x73, 0x65, 0x5f, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12,
+ 0x20, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x70, 0x61, 0x6e, 0x6e, 0x65, 0x72, 0x2e,
+ 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x64, 0x61, 0x74, 0x61, 0x62, 0x61, 0x73, 0x65, 0x2e, 0x76,
+ 0x31, 0x1a, 0x1c, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x61, 0x6e,
+ 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a,
+ 0x17, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x63, 0x6c, 0x69, 0x65,
+ 0x6e, 0x74, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65,
+ 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x5f, 0x62, 0x65, 0x68, 0x61, 0x76,
+ 0x69, 0x6f, 0x72, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x19, 0x67, 0x6f, 0x6f, 0x67, 0x6c,
+ 0x65, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x2e, 0x70,
+ 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x69, 0x61, 0x6d,
+ 0x2f, 0x76, 0x31, 0x2f, 0x69, 0x61, 0x6d, 0x5f, 0x70, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x2e, 0x70,
+ 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1a, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x69, 0x61, 0x6d,
+ 0x2f, 0x76, 0x31, 0x2f, 0x70, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f,
+ 0x1a, 0x23, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x6c, 0x6f, 0x6e, 0x67, 0x72, 0x75, 0x6e,
+ 0x6e, 0x69, 0x6e, 0x67, 0x2f, 0x6f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e,
+ 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1b, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72,
+ 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x65, 0x6d, 0x70, 0x74, 0x79, 0x2e, 0x70, 0x72, 0x6f,
+ 0x74, 0x6f, 0x1a, 0x20, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f,
+ 0x62, 0x75, 0x66, 0x2f, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x5f, 0x6d, 0x61, 0x73, 0x6b, 0x2e, 0x70,
+ 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f,
+ 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x2e,
+ 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x2d, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x73, 0x70,
+ 0x61, 0x6e, 0x6e, 0x65, 0x72, 0x2f, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2f, 0x64, 0x61, 0x74, 0x61,
+ 0x62, 0x61, 0x73, 0x65, 0x2f, 0x76, 0x31, 0x2f, 0x62, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x2e, 0x70,
+ 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x36, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x73, 0x70, 0x61,
+ 0x6e, 0x6e, 0x65, 0x72, 0x2f, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2f, 0x64, 0x61, 0x74, 0x61, 0x62,
+ 0x61, 0x73, 0x65, 0x2f, 0x76, 0x31, 0x2f, 0x62, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x5f, 0x73, 0x63,
+ 0x68, 0x65, 0x64, 0x75, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x2d, 0x67, 0x6f,
+ 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x73, 0x70, 0x61, 0x6e, 0x6e, 0x65, 0x72, 0x2f, 0x61, 0x64, 0x6d,
+ 0x69, 0x6e, 0x2f, 0x64, 0x61, 0x74, 0x61, 0x62, 0x61, 0x73, 0x65, 0x2f, 0x76, 0x31, 0x2f, 0x63,
+ 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0xc3, 0x01, 0x0a, 0x0b,
+ 0x52, 0x65, 0x73, 0x74, 0x6f, 0x72, 0x65, 0x49, 0x6e, 0x66, 0x6f, 0x12, 0x54, 0x0a, 0x0b, 0x73,
+ 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0e,
+ 0x32, 0x33, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x70, 0x61, 0x6e, 0x6e, 0x65,
+ 0x72, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x64, 0x61, 0x74, 0x61, 0x62, 0x61, 0x73, 0x65,
+ 0x2e, 0x76, 0x31, 0x2e, 0x52, 0x65, 0x73, 0x74, 0x6f, 0x72, 0x65, 0x53, 0x6f, 0x75, 0x72, 0x63,
+ 0x65, 0x54, 0x79, 0x70, 0x65, 0x52, 0x0a, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x54, 0x79, 0x70,
+ 0x65, 0x12, 0x4f, 0x0a, 0x0b, 0x62, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x5f, 0x69, 0x6e, 0x66, 0x6f,
+ 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e,
+ 0x73, 0x70, 0x61, 0x6e, 0x6e, 0x65, 0x72, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x64, 0x61,
+ 0x74, 0x61, 0x62, 0x61, 0x73, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x42, 0x61, 0x63, 0x6b, 0x75, 0x70,
+ 0x49, 0x6e, 0x66, 0x6f, 0x48, 0x00, 0x52, 0x0a, 0x62, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x49, 0x6e,
+ 0x66, 0x6f, 0x42, 0x0d, 0x0a, 0x0b, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x69, 0x6e, 0x66,
+ 0x6f, 0x22, 0x82, 0x08, 0x0a, 0x08, 0x44, 0x61, 0x74, 0x61, 0x62, 0x61, 0x73, 0x65, 0x12, 0x17,
+ 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41,
+ 0x02, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x4b, 0x0a, 0x05, 0x73, 0x74, 0x61, 0x74, 0x65,
+ 0x18, 0x02, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x30, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e,
+ 0x73, 0x70, 0x61, 0x6e, 0x6e, 0x65, 0x72, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x64, 0x61,
+ 0x74, 0x61, 0x62, 0x61, 0x73, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x44, 0x61, 0x74, 0x61, 0x62, 0x61,
+ 0x73, 0x65, 0x2e, 0x53, 0x74, 0x61, 0x74, 0x65, 0x42, 0x03, 0xe0, 0x41, 0x03, 0x52, 0x05, 0x73,
+ 0x74, 0x61, 0x74, 0x65, 0x12, 0x40, 0x0a, 0x0b, 0x63, 0x72, 0x65, 0x61, 0x74, 0x65, 0x5f, 0x74,
+ 0x69, 0x6d, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67,
+ 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65,
+ 0x73, 0x74, 0x61, 0x6d, 0x70, 0x42, 0x03, 0xe0, 0x41, 0x03, 0x52, 0x0a, 0x63, 0x72, 0x65, 0x61,
+ 0x74, 0x65, 0x54, 0x69, 0x6d, 0x65, 0x12, 0x55, 0x0a, 0x0c, 0x72, 0x65, 0x73, 0x74, 0x6f, 0x72,
+ 0x65, 0x5f, 0x69, 0x6e, 0x66, 0x6f, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2d, 0x2e, 0x67,
+ 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x70, 0x61, 0x6e, 0x6e, 0x65, 0x72, 0x2e, 0x61, 0x64,
+ 0x6d, 0x69, 0x6e, 0x2e, 0x64, 0x61, 0x74, 0x61, 0x62, 0x61, 0x73, 0x65, 0x2e, 0x76, 0x31, 0x2e,
+ 0x52, 0x65, 0x73, 0x74, 0x6f, 0x72, 0x65, 0x49, 0x6e, 0x66, 0x6f, 0x42, 0x03, 0xe0, 0x41, 0x03,
+ 0x52, 0x0b, 0x72, 0x65, 0x73, 0x74, 0x6f, 0x72, 0x65, 0x49, 0x6e, 0x66, 0x6f, 0x12, 0x64, 0x0a,
+ 0x11, 0x65, 0x6e, 0x63, 0x72, 0x79, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x63, 0x6f, 0x6e, 0x66,
+ 0x69, 0x67, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x32, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c,
+ 0x65, 0x2e, 0x73, 0x70, 0x61, 0x6e, 0x6e, 0x65, 0x72, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e,
+ 0x64, 0x61, 0x74, 0x61, 0x62, 0x61, 0x73, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x45, 0x6e, 0x63, 0x72,
+ 0x79, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x42, 0x03, 0xe0, 0x41,
+ 0x03, 0x52, 0x10, 0x65, 0x6e, 0x63, 0x72, 0x79, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x6f, 0x6e,
+ 0x66, 0x69, 0x67, 0x12, 0x5e, 0x0a, 0x0f, 0x65, 0x6e, 0x63, 0x72, 0x79, 0x70, 0x74, 0x69, 0x6f,
+ 0x6e, 0x5f, 0x69, 0x6e, 0x66, 0x6f, 0x18, 0x08, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x30, 0x2e, 0x67,
+ 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x70, 0x61, 0x6e, 0x6e, 0x65, 0x72, 0x2e, 0x61, 0x64,
+ 0x6d, 0x69, 0x6e, 0x2e, 0x64, 0x61, 0x74, 0x61, 0x62, 0x61, 0x73, 0x65, 0x2e, 0x76, 0x31, 0x2e,
+ 0x45, 0x6e, 0x63, 0x72, 0x79, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x49, 0x6e, 0x66, 0x6f, 0x42, 0x03,
+ 0xe0, 0x41, 0x03, 0x52, 0x0e, 0x65, 0x6e, 0x63, 0x72, 0x79, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x49,
+ 0x6e, 0x66, 0x6f, 0x12, 0x3d, 0x0a, 0x18, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x5f, 0x72,
+ 0x65, 0x74, 0x65, 0x6e, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x70, 0x65, 0x72, 0x69, 0x6f, 0x64, 0x18,
+ 0x06, 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x03, 0x52, 0x16, 0x76, 0x65, 0x72, 0x73,
+ 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x74, 0x65, 0x6e, 0x74, 0x69, 0x6f, 0x6e, 0x50, 0x65, 0x72, 0x69,
+ 0x6f, 0x64, 0x12, 0x53, 0x0a, 0x15, 0x65, 0x61, 0x72, 0x6c, 0x69, 0x65, 0x73, 0x74, 0x5f, 0x76,
+ 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x18, 0x07, 0x20, 0x01, 0x28,
+ 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f,
+ 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x42, 0x03, 0xe0,
+ 0x41, 0x03, 0x52, 0x13, 0x65, 0x61, 0x72, 0x6c, 0x69, 0x65, 0x73, 0x74, 0x56, 0x65, 0x72, 0x73,
+ 0x69, 0x6f, 0x6e, 0x54, 0x69, 0x6d, 0x65, 0x12, 0x2a, 0x0a, 0x0e, 0x64, 0x65, 0x66, 0x61, 0x75,
+ 0x6c, 0x74, 0x5f, 0x6c, 0x65, 0x61, 0x64, 0x65, 0x72, 0x18, 0x09, 0x20, 0x01, 0x28, 0x09, 0x42,
+ 0x03, 0xe0, 0x41, 0x03, 0x52, 0x0d, 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x4c, 0x65, 0x61,
+ 0x64, 0x65, 0x72, 0x12, 0x61, 0x0a, 0x10, 0x64, 0x61, 0x74, 0x61, 0x62, 0x61, 0x73, 0x65, 0x5f,
+ 0x64, 0x69, 0x61, 0x6c, 0x65, 0x63, 0x74, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x31, 0x2e,
+ 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x70, 0x61, 0x6e, 0x6e, 0x65, 0x72, 0x2e, 0x61,
+ 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x64, 0x61, 0x74, 0x61, 0x62, 0x61, 0x73, 0x65, 0x2e, 0x76, 0x31,
+ 0x2e, 0x44, 0x61, 0x74, 0x61, 0x62, 0x61, 0x73, 0x65, 0x44, 0x69, 0x61, 0x6c, 0x65, 0x63, 0x74,
+ 0x42, 0x03, 0xe0, 0x41, 0x03, 0x52, 0x0f, 0x64, 0x61, 0x74, 0x61, 0x62, 0x61, 0x73, 0x65, 0x44,
+ 0x69, 0x61, 0x6c, 0x65, 0x63, 0x74, 0x12, 0x34, 0x0a, 0x16, 0x65, 0x6e, 0x61, 0x62, 0x6c, 0x65,
+ 0x5f, 0x64, 0x72, 0x6f, 0x70, 0x5f, 0x70, 0x72, 0x6f, 0x74, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e,
+ 0x18, 0x0b, 0x20, 0x01, 0x28, 0x08, 0x52, 0x14, 0x65, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x44, 0x72,
+ 0x6f, 0x70, 0x50, 0x72, 0x6f, 0x74, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x25, 0x0a, 0x0b,
+ 0x72, 0x65, 0x63, 0x6f, 0x6e, 0x63, 0x69, 0x6c, 0x69, 0x6e, 0x67, 0x18, 0x0c, 0x20, 0x01, 0x28,
+ 0x08, 0x42, 0x03, 0xe0, 0x41, 0x03, 0x52, 0x0b, 0x72, 0x65, 0x63, 0x6f, 0x6e, 0x63, 0x69, 0x6c,
+ 0x69, 0x6e, 0x67, 0x22, 0x4d, 0x0a, 0x05, 0x53, 0x74, 0x61, 0x74, 0x65, 0x12, 0x15, 0x0a, 0x11,
+ 0x53, 0x54, 0x41, 0x54, 0x45, 0x5f, 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, 0x49, 0x45,
+ 0x44, 0x10, 0x00, 0x12, 0x0c, 0x0a, 0x08, 0x43, 0x52, 0x45, 0x41, 0x54, 0x49, 0x4e, 0x47, 0x10,
+ 0x01, 0x12, 0x09, 0x0a, 0x05, 0x52, 0x45, 0x41, 0x44, 0x59, 0x10, 0x02, 0x12, 0x14, 0x0a, 0x10,
+ 0x52, 0x45, 0x41, 0x44, 0x59, 0x5f, 0x4f, 0x50, 0x54, 0x49, 0x4d, 0x49, 0x5a, 0x49, 0x4e, 0x47,
+ 0x10, 0x03, 0x3a, 0x62, 0xea, 0x41, 0x5f, 0x0a, 0x1f, 0x73, 0x70, 0x61, 0x6e, 0x6e, 0x65, 0x72,
+ 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f,
+ 0x44, 0x61, 0x74, 0x61, 0x62, 0x61, 0x73, 0x65, 0x12, 0x3c, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63,
+ 0x74, 0x73, 0x2f, 0x7b, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x7d, 0x2f, 0x69, 0x6e, 0x73,
+ 0x74, 0x61, 0x6e, 0x63, 0x65, 0x73, 0x2f, 0x7b, 0x69, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65,
+ 0x7d, 0x2f, 0x64, 0x61, 0x74, 0x61, 0x62, 0x61, 0x73, 0x65, 0x73, 0x2f, 0x7b, 0x64, 0x61, 0x74,
+ 0x61, 0x62, 0x61, 0x73, 0x65, 0x7d, 0x22, 0x93, 0x01, 0x0a, 0x14, 0x4c, 0x69, 0x73, 0x74, 0x44,
+ 0x61, 0x74, 0x61, 0x62, 0x61, 0x73, 0x65, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12,
+ 0x3f, 0x0a, 0x06, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42,
+ 0x27, 0xe0, 0x41, 0x02, 0xfa, 0x41, 0x21, 0x0a, 0x1f, 0x73, 0x70, 0x61, 0x6e, 0x6e, 0x65, 0x72,
+ 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f,
+ 0x49, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x52, 0x06, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74,
+ 0x12, 0x1b, 0x0a, 0x09, 0x70, 0x61, 0x67, 0x65, 0x5f, 0x73, 0x69, 0x7a, 0x65, 0x18, 0x03, 0x20,
+ 0x01, 0x28, 0x05, 0x52, 0x08, 0x70, 0x61, 0x67, 0x65, 0x53, 0x69, 0x7a, 0x65, 0x12, 0x1d, 0x0a,
+ 0x0a, 0x70, 0x61, 0x67, 0x65, 0x5f, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x18, 0x04, 0x20, 0x01, 0x28,
+ 0x09, 0x52, 0x09, 0x70, 0x61, 0x67, 0x65, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x22, 0x89, 0x01, 0x0a,
+ 0x15, 0x4c, 0x69, 0x73, 0x74, 0x44, 0x61, 0x74, 0x61, 0x62, 0x61, 0x73, 0x65, 0x73, 0x52, 0x65,
+ 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x48, 0x0a, 0x09, 0x64, 0x61, 0x74, 0x61, 0x62, 0x61,
+ 0x73, 0x65, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x2a, 0x2e, 0x67, 0x6f, 0x6f, 0x67,
+ 0x6c, 0x65, 0x2e, 0x73, 0x70, 0x61, 0x6e, 0x6e, 0x65, 0x72, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e,
+ 0x2e, 0x64, 0x61, 0x74, 0x61, 0x62, 0x61, 0x73, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x44, 0x61, 0x74,
+ 0x61, 0x62, 0x61, 0x73, 0x65, 0x52, 0x09, 0x64, 0x61, 0x74, 0x61, 0x62, 0x61, 0x73, 0x65, 0x73,
+ 0x12, 0x26, 0x0a, 0x0f, 0x6e, 0x65, 0x78, 0x74, 0x5f, 0x70, 0x61, 0x67, 0x65, 0x5f, 0x74, 0x6f,
+ 0x6b, 0x65, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0d, 0x6e, 0x65, 0x78, 0x74, 0x50,
+ 0x61, 0x67, 0x65, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x22, 0xb3, 0x03, 0x0a, 0x15, 0x43, 0x72, 0x65,
+ 0x61, 0x74, 0x65, 0x44, 0x61, 0x74, 0x61, 0x62, 0x61, 0x73, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65,
+ 0x73, 0x74, 0x12, 0x3f, 0x0a, 0x06, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x18, 0x01, 0x20, 0x01,
+ 0x28, 0x09, 0x42, 0x27, 0xe0, 0x41, 0x02, 0xfa, 0x41, 0x21, 0x0a, 0x1f, 0x73, 0x70, 0x61, 0x6e,
+ 0x6e, 0x65, 0x72, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63,
+ 0x6f, 0x6d, 0x2f, 0x49, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x52, 0x06, 0x70, 0x61, 0x72,
+ 0x65, 0x6e, 0x74, 0x12, 0x2e, 0x0a, 0x10, 0x63, 0x72, 0x65, 0x61, 0x74, 0x65, 0x5f, 0x73, 0x74,
+ 0x61, 0x74, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0,
+ 0x41, 0x02, 0x52, 0x0f, 0x63, 0x72, 0x65, 0x61, 0x74, 0x65, 0x53, 0x74, 0x61, 0x74, 0x65, 0x6d,
+ 0x65, 0x6e, 0x74, 0x12, 0x2e, 0x0a, 0x10, 0x65, 0x78, 0x74, 0x72, 0x61, 0x5f, 0x73, 0x74, 0x61,
+ 0x74, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x09, 0x42, 0x03, 0xe0,
+ 0x41, 0x01, 0x52, 0x0f, 0x65, 0x78, 0x74, 0x72, 0x61, 0x53, 0x74, 0x61, 0x74, 0x65, 0x6d, 0x65,
+ 0x6e, 0x74, 0x73, 0x12, 0x64, 0x0a, 0x11, 0x65, 0x6e, 0x63, 0x72, 0x79, 0x70, 0x74, 0x69, 0x6f,
+ 0x6e, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x32,
+ 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x70, 0x61, 0x6e, 0x6e, 0x65, 0x72, 0x2e,
+ 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x64, 0x61, 0x74, 0x61, 0x62, 0x61, 0x73, 0x65, 0x2e, 0x76,
+ 0x31, 0x2e, 0x45, 0x6e, 0x63, 0x72, 0x79, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x6f, 0x6e, 0x66,
+ 0x69, 0x67, 0x42, 0x03, 0xe0, 0x41, 0x01, 0x52, 0x10, 0x65, 0x6e, 0x63, 0x72, 0x79, 0x70, 0x74,
+ 0x69, 0x6f, 0x6e, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x61, 0x0a, 0x10, 0x64, 0x61, 0x74,
+ 0x61, 0x62, 0x61, 0x73, 0x65, 0x5f, 0x64, 0x69, 0x61, 0x6c, 0x65, 0x63, 0x74, 0x18, 0x05, 0x20,
+ 0x01, 0x28, 0x0e, 0x32, 0x31, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x70, 0x61,
+ 0x6e, 0x6e, 0x65, 0x72, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x64, 0x61, 0x74, 0x61, 0x62,
+ 0x61, 0x73, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x44, 0x61, 0x74, 0x61, 0x62, 0x61, 0x73, 0x65, 0x44,
+ 0x69, 0x61, 0x6c, 0x65, 0x63, 0x74, 0x42, 0x03, 0xe0, 0x41, 0x01, 0x52, 0x0f, 0x64, 0x61, 0x74,
+ 0x61, 0x62, 0x61, 0x73, 0x65, 0x44, 0x69, 0x61, 0x6c, 0x65, 0x63, 0x74, 0x12, 0x30, 0x0a, 0x11,
+ 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x5f, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72,
+ 0x73, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0c, 0x42, 0x03, 0xe0, 0x41, 0x01, 0x52, 0x10, 0x70, 0x72,
+ 0x6f, 0x74, 0x6f, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x73, 0x22, 0x5a,
+ 0x0a, 0x16, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x44, 0x61, 0x74, 0x61, 0x62, 0x61, 0x73, 0x65,
+ 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x12, 0x40, 0x0a, 0x08, 0x64, 0x61, 0x74, 0x61,
+ 0x62, 0x61, 0x73, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x24, 0xfa, 0x41, 0x21, 0x0a,
+ 0x1f, 0x73, 0x70, 0x61, 0x6e, 0x6e, 0x65, 0x72, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61,
+ 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x44, 0x61, 0x74, 0x61, 0x62, 0x61, 0x73, 0x65,
+ 0x52, 0x08, 0x64, 0x61, 0x74, 0x61, 0x62, 0x61, 0x73, 0x65, 0x22, 0x51, 0x0a, 0x12, 0x47, 0x65,
+ 0x74, 0x44, 0x61, 0x74, 0x61, 0x62, 0x61, 0x73, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74,
+ 0x12, 0x3b, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x27,
+ 0xe0, 0x41, 0x02, 0xfa, 0x41, 0x21, 0x0a, 0x1f, 0x73, 0x70, 0x61, 0x6e, 0x6e, 0x65, 0x72, 0x2e,
+ 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x44,
+ 0x61, 0x74, 0x61, 0x62, 0x61, 0x73, 0x65, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x22, 0xa6, 0x01,
+ 0x0a, 0x15, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x44, 0x61, 0x74, 0x61, 0x62, 0x61, 0x73, 0x65,
+ 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x4b, 0x0a, 0x08, 0x64, 0x61, 0x74, 0x61, 0x62,
+ 0x61, 0x73, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2a, 0x2e, 0x67, 0x6f, 0x6f, 0x67,
+ 0x6c, 0x65, 0x2e, 0x73, 0x70, 0x61, 0x6e, 0x6e, 0x65, 0x72, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e,
+ 0x2e, 0x64, 0x61, 0x74, 0x61, 0x62, 0x61, 0x73, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x44, 0x61, 0x74,
+ 0x61, 0x62, 0x61, 0x73, 0x65, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x08, 0x64, 0x61, 0x74, 0x61,
+ 0x62, 0x61, 0x73, 0x65, 0x12, 0x40, 0x0a, 0x0b, 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, 0x5f, 0x6d,
+ 0x61, 0x73, 0x6b, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67,
+ 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, 0x6c,
+ 0x64, 0x4d, 0x61, 0x73, 0x6b, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x0a, 0x75, 0x70, 0x64, 0x61,
+ 0x74, 0x65, 0x4d, 0x61, 0x73, 0x6b, 0x22, 0xf9, 0x01, 0x0a, 0x16, 0x55, 0x70, 0x64, 0x61, 0x74,
+ 0x65, 0x44, 0x61, 0x74, 0x61, 0x62, 0x61, 0x73, 0x65, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74,
+ 0x61, 0x12, 0x51, 0x0a, 0x07, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x18, 0x01, 0x20, 0x01,
+ 0x28, 0x0b, 0x32, 0x37, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x70, 0x61, 0x6e,
+ 0x6e, 0x65, 0x72, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x64, 0x61, 0x74, 0x61, 0x62, 0x61,
+ 0x73, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x44, 0x61, 0x74, 0x61,
+ 0x62, 0x61, 0x73, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x52, 0x07, 0x72, 0x65, 0x71,
+ 0x75, 0x65, 0x73, 0x74, 0x12, 0x4f, 0x0a, 0x08, 0x70, 0x72, 0x6f, 0x67, 0x72, 0x65, 0x73, 0x73,
+ 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x33, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e,
+ 0x73, 0x70, 0x61, 0x6e, 0x6e, 0x65, 0x72, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x64, 0x61,
+ 0x74, 0x61, 0x62, 0x61, 0x73, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x4f, 0x70, 0x65, 0x72, 0x61, 0x74,
+ 0x69, 0x6f, 0x6e, 0x50, 0x72, 0x6f, 0x67, 0x72, 0x65, 0x73, 0x73, 0x52, 0x08, 0x70, 0x72, 0x6f,
+ 0x67, 0x72, 0x65, 0x73, 0x73, 0x12, 0x3b, 0x0a, 0x0b, 0x63, 0x61, 0x6e, 0x63, 0x65, 0x6c, 0x5f,
+ 0x74, 0x69, 0x6d, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f,
+ 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d,
+ 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x0a, 0x63, 0x61, 0x6e, 0x63, 0x65, 0x6c, 0x54, 0x69,
+ 0x6d, 0x65, 0x22, 0xd9, 0x01, 0x0a, 0x18, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x44, 0x61, 0x74,
+ 0x61, 0x62, 0x61, 0x73, 0x65, 0x44, 0x64, 0x6c, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12,
+ 0x43, 0x0a, 0x08, 0x64, 0x61, 0x74, 0x61, 0x62, 0x61, 0x73, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28,
+ 0x09, 0x42, 0x27, 0xe0, 0x41, 0x02, 0xfa, 0x41, 0x21, 0x0a, 0x1f, 0x73, 0x70, 0x61, 0x6e, 0x6e,
+ 0x65, 0x72, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f,
+ 0x6d, 0x2f, 0x44, 0x61, 0x74, 0x61, 0x62, 0x61, 0x73, 0x65, 0x52, 0x08, 0x64, 0x61, 0x74, 0x61,
+ 0x62, 0x61, 0x73, 0x65, 0x12, 0x23, 0x0a, 0x0a, 0x73, 0x74, 0x61, 0x74, 0x65, 0x6d, 0x65, 0x6e,
+ 0x74, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x0a, 0x73,
+ 0x74, 0x61, 0x74, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x73, 0x12, 0x21, 0x0a, 0x0c, 0x6f, 0x70, 0x65,
+ 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x69, 0x64, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52,
+ 0x0b, 0x6f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x49, 0x64, 0x12, 0x30, 0x0a, 0x11,
+ 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x5f, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72,
+ 0x73, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0c, 0x42, 0x03, 0xe0, 0x41, 0x01, 0x52, 0x10, 0x70, 0x72,
+ 0x6f, 0x74, 0x6f, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x73, 0x22, 0x74,
+ 0x0a, 0x16, 0x44, 0x64, 0x6c, 0x53, 0x74, 0x61, 0x74, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x41, 0x63,
+ 0x74, 0x69, 0x6f, 0x6e, 0x49, 0x6e, 0x66, 0x6f, 0x12, 0x16, 0x0a, 0x06, 0x61, 0x63, 0x74, 0x69,
+ 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e,
+ 0x12, 0x1f, 0x0a, 0x0b, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18,
+ 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x54, 0x79, 0x70,
+ 0x65, 0x12, 0x21, 0x0a, 0x0c, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x5f, 0x6e, 0x61, 0x6d, 0x65,
+ 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x09, 0x52, 0x0b, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x4e,
+ 0x61, 0x6d, 0x65, 0x73, 0x22, 0x8e, 0x03, 0x0a, 0x19, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x44,
+ 0x61, 0x74, 0x61, 0x62, 0x61, 0x73, 0x65, 0x44, 0x64, 0x6c, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61,
+ 0x74, 0x61, 0x12, 0x40, 0x0a, 0x08, 0x64, 0x61, 0x74, 0x61, 0x62, 0x61, 0x73, 0x65, 0x18, 0x01,
+ 0x20, 0x01, 0x28, 0x09, 0x42, 0x24, 0xfa, 0x41, 0x21, 0x0a, 0x1f, 0x73, 0x70, 0x61, 0x6e, 0x6e,
+ 0x65, 0x72, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f,
+ 0x6d, 0x2f, 0x44, 0x61, 0x74, 0x61, 0x62, 0x61, 0x73, 0x65, 0x52, 0x08, 0x64, 0x61, 0x74, 0x61,
+ 0x62, 0x61, 0x73, 0x65, 0x12, 0x1e, 0x0a, 0x0a, 0x73, 0x74, 0x61, 0x74, 0x65, 0x6d, 0x65, 0x6e,
+ 0x74, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x09, 0x52, 0x0a, 0x73, 0x74, 0x61, 0x74, 0x65, 0x6d,
+ 0x65, 0x6e, 0x74, 0x73, 0x12, 0x47, 0x0a, 0x11, 0x63, 0x6f, 0x6d, 0x6d, 0x69, 0x74, 0x5f, 0x74,
+ 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32,
+ 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75,
+ 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x10, 0x63, 0x6f, 0x6d,
+ 0x6d, 0x69, 0x74, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x73, 0x12, 0x21, 0x0a,
+ 0x09, 0x74, 0x68, 0x72, 0x6f, 0x74, 0x74, 0x6c, 0x65, 0x64, 0x18, 0x04, 0x20, 0x01, 0x28, 0x08,
+ 0x42, 0x03, 0xe0, 0x41, 0x03, 0x52, 0x09, 0x74, 0x68, 0x72, 0x6f, 0x74, 0x74, 0x6c, 0x65, 0x64,
+ 0x12, 0x4f, 0x0a, 0x08, 0x70, 0x72, 0x6f, 0x67, 0x72, 0x65, 0x73, 0x73, 0x18, 0x05, 0x20, 0x03,
+ 0x28, 0x0b, 0x32, 0x33, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x70, 0x61, 0x6e,
+ 0x6e, 0x65, 0x72, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x64, 0x61, 0x74, 0x61, 0x62, 0x61,
+ 0x73, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x4f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x50,
+ 0x72, 0x6f, 0x67, 0x72, 0x65, 0x73, 0x73, 0x52, 0x08, 0x70, 0x72, 0x6f, 0x67, 0x72, 0x65, 0x73,
+ 0x73, 0x12, 0x52, 0x0a, 0x07, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x06, 0x20, 0x03,
+ 0x28, 0x0b, 0x32, 0x38, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x70, 0x61, 0x6e,
+ 0x6e, 0x65, 0x72, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x64, 0x61, 0x74, 0x61, 0x62, 0x61,
+ 0x73, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x44, 0x64, 0x6c, 0x53, 0x74, 0x61, 0x74, 0x65, 0x6d, 0x65,
+ 0x6e, 0x74, 0x41, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x49, 0x6e, 0x66, 0x6f, 0x52, 0x07, 0x61, 0x63,
+ 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x22, 0x5a, 0x0a, 0x13, 0x44, 0x72, 0x6f, 0x70, 0x44, 0x61, 0x74,
+ 0x61, 0x62, 0x61, 0x73, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x43, 0x0a, 0x08,
+ 0x64, 0x61, 0x74, 0x61, 0x62, 0x61, 0x73, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x27,
+ 0xe0, 0x41, 0x02, 0xfa, 0x41, 0x21, 0x0a, 0x1f, 0x73, 0x70, 0x61, 0x6e, 0x6e, 0x65, 0x72, 0x2e,
+ 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x44,
+ 0x61, 0x74, 0x61, 0x62, 0x61, 0x73, 0x65, 0x52, 0x08, 0x64, 0x61, 0x74, 0x61, 0x62, 0x61, 0x73,
+ 0x65, 0x22, 0x5c, 0x0a, 0x15, 0x47, 0x65, 0x74, 0x44, 0x61, 0x74, 0x61, 0x62, 0x61, 0x73, 0x65,
+ 0x44, 0x64, 0x6c, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x43, 0x0a, 0x08, 0x64, 0x61,
+ 0x74, 0x61, 0x62, 0x61, 0x73, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x27, 0xe0, 0x41,
+ 0x02, 0xfa, 0x41, 0x21, 0x0a, 0x1f, 0x73, 0x70, 0x61, 0x6e, 0x6e, 0x65, 0x72, 0x2e, 0x67, 0x6f,
+ 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x44, 0x61, 0x74,
+ 0x61, 0x62, 0x61, 0x73, 0x65, 0x52, 0x08, 0x64, 0x61, 0x74, 0x61, 0x62, 0x61, 0x73, 0x65, 0x22,
+ 0x65, 0x0a, 0x16, 0x47, 0x65, 0x74, 0x44, 0x61, 0x74, 0x61, 0x62, 0x61, 0x73, 0x65, 0x44, 0x64,
+ 0x6c, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x1e, 0x0a, 0x0a, 0x73, 0x74, 0x61,
+ 0x74, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x09, 0x52, 0x0a, 0x73,
+ 0x74, 0x61, 0x74, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x73, 0x12, 0x2b, 0x0a, 0x11, 0x70, 0x72, 0x6f,
+ 0x74, 0x6f, 0x5f, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x73, 0x18, 0x02,
+ 0x20, 0x01, 0x28, 0x0c, 0x52, 0x10, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x44, 0x65, 0x73, 0x63, 0x72,
+ 0x69, 0x70, 0x74, 0x6f, 0x72, 0x73, 0x22, 0xb4, 0x01, 0x0a, 0x1d, 0x4c, 0x69, 0x73, 0x74, 0x44,
+ 0x61, 0x74, 0x61, 0x62, 0x61, 0x73, 0x65, 0x4f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e,
+ 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x3f, 0x0a, 0x06, 0x70, 0x61, 0x72, 0x65,
+ 0x6e, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x27, 0xe0, 0x41, 0x02, 0xfa, 0x41, 0x21,
+ 0x0a, 0x1f, 0x73, 0x70, 0x61, 0x6e, 0x6e, 0x65, 0x72, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65,
+ 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x49, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63,
+ 0x65, 0x52, 0x06, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x12, 0x16, 0x0a, 0x06, 0x66, 0x69, 0x6c,
+ 0x74, 0x65, 0x72, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x66, 0x69, 0x6c, 0x74, 0x65,
+ 0x72, 0x12, 0x1b, 0x0a, 0x09, 0x70, 0x61, 0x67, 0x65, 0x5f, 0x73, 0x69, 0x7a, 0x65, 0x18, 0x03,
+ 0x20, 0x01, 0x28, 0x05, 0x52, 0x08, 0x70, 0x61, 0x67, 0x65, 0x53, 0x69, 0x7a, 0x65, 0x12, 0x1d,
+ 0x0a, 0x0a, 0x70, 0x61, 0x67, 0x65, 0x5f, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x18, 0x04, 0x20, 0x01,
+ 0x28, 0x09, 0x52, 0x09, 0x70, 0x61, 0x67, 0x65, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x22, 0x87, 0x01,
+ 0x0a, 0x1e, 0x4c, 0x69, 0x73, 0x74, 0x44, 0x61, 0x74, 0x61, 0x62, 0x61, 0x73, 0x65, 0x4f, 0x70,
+ 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65,
+ 0x12, 0x3d, 0x0a, 0x0a, 0x6f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x01,
+ 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6c, 0x6f,
+ 0x6e, 0x67, 0x72, 0x75, 0x6e, 0x6e, 0x69, 0x6e, 0x67, 0x2e, 0x4f, 0x70, 0x65, 0x72, 0x61, 0x74,
+ 0x69, 0x6f, 0x6e, 0x52, 0x0a, 0x6f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12,
+ 0x26, 0x0a, 0x0f, 0x6e, 0x65, 0x78, 0x74, 0x5f, 0x70, 0x61, 0x67, 0x65, 0x5f, 0x74, 0x6f, 0x6b,
+ 0x65, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0d, 0x6e, 0x65, 0x78, 0x74, 0x50, 0x61,
+ 0x67, 0x65, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x22, 0xbc, 0x02, 0x0a, 0x16, 0x52, 0x65, 0x73, 0x74,
+ 0x6f, 0x72, 0x65, 0x44, 0x61, 0x74, 0x61, 0x62, 0x61, 0x73, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65,
+ 0x73, 0x74, 0x12, 0x3f, 0x0a, 0x06, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x18, 0x01, 0x20, 0x01,
+ 0x28, 0x09, 0x42, 0x27, 0xe0, 0x41, 0x02, 0xfa, 0x41, 0x21, 0x0a, 0x1f, 0x73, 0x70, 0x61, 0x6e,
+ 0x6e, 0x65, 0x72, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63,
+ 0x6f, 0x6d, 0x2f, 0x49, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x52, 0x06, 0x70, 0x61, 0x72,
+ 0x65, 0x6e, 0x74, 0x12, 0x24, 0x0a, 0x0b, 0x64, 0x61, 0x74, 0x61, 0x62, 0x61, 0x73, 0x65, 0x5f,
+ 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x0a, 0x64,
+ 0x61, 0x74, 0x61, 0x62, 0x61, 0x73, 0x65, 0x49, 0x64, 0x12, 0x3c, 0x0a, 0x06, 0x62, 0x61, 0x63,
+ 0x6b, 0x75, 0x70, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x42, 0x22, 0xfa, 0x41, 0x1f, 0x0a, 0x1d,
+ 0x73, 0x70, 0x61, 0x6e, 0x6e, 0x65, 0x72, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70,
+ 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x42, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x48, 0x00, 0x52,
+ 0x06, 0x62, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x12, 0x73, 0x0a, 0x11, 0x65, 0x6e, 0x63, 0x72, 0x79,
+ 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x18, 0x04, 0x20, 0x01,
+ 0x28, 0x0b, 0x32, 0x41, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x70, 0x61, 0x6e,
+ 0x6e, 0x65, 0x72, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x64, 0x61, 0x74, 0x61, 0x62, 0x61,
+ 0x73, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x52, 0x65, 0x73, 0x74, 0x6f, 0x72, 0x65, 0x44, 0x61, 0x74,
+ 0x61, 0x62, 0x61, 0x73, 0x65, 0x45, 0x6e, 0x63, 0x72, 0x79, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x43,
+ 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x42, 0x03, 0xe0, 0x41, 0x01, 0x52, 0x10, 0x65, 0x6e, 0x63, 0x72,
+ 0x79, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x42, 0x08, 0x0a, 0x06,
+ 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x22, 0xde, 0x03, 0x0a, 0x1f, 0x52, 0x65, 0x73, 0x74, 0x6f,
+ 0x72, 0x65, 0x44, 0x61, 0x74, 0x61, 0x62, 0x61, 0x73, 0x65, 0x45, 0x6e, 0x63, 0x72, 0x79, 0x70,
+ 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x7e, 0x0a, 0x0f, 0x65, 0x6e,
+ 0x63, 0x72, 0x79, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x01, 0x20,
+ 0x01, 0x28, 0x0e, 0x32, 0x50, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x70, 0x61,
+ 0x6e, 0x6e, 0x65, 0x72, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x64, 0x61, 0x74, 0x61, 0x62,
+ 0x61, 0x73, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x52, 0x65, 0x73, 0x74, 0x6f, 0x72, 0x65, 0x44, 0x61,
+ 0x74, 0x61, 0x62, 0x61, 0x73, 0x65, 0x45, 0x6e, 0x63, 0x72, 0x79, 0x70, 0x74, 0x69, 0x6f, 0x6e,
+ 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x45, 0x6e, 0x63, 0x72, 0x79, 0x70, 0x74, 0x69, 0x6f,
+ 0x6e, 0x54, 0x79, 0x70, 0x65, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x0e, 0x65, 0x6e, 0x63, 0x72,
+ 0x79, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x54, 0x79, 0x70, 0x65, 0x12, 0x4b, 0x0a, 0x0c, 0x6b, 0x6d,
+ 0x73, 0x5f, 0x6b, 0x65, 0x79, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09,
+ 0x42, 0x29, 0xe0, 0x41, 0x01, 0xfa, 0x41, 0x23, 0x0a, 0x21, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x6b,
+ 0x6d, 0x73, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f,
+ 0x6d, 0x2f, 0x43, 0x72, 0x79, 0x70, 0x74, 0x6f, 0x4b, 0x65, 0x79, 0x52, 0x0a, 0x6b, 0x6d, 0x73,
+ 0x4b, 0x65, 0x79, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x4d, 0x0a, 0x0d, 0x6b, 0x6d, 0x73, 0x5f, 0x6b,
+ 0x65, 0x79, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x09, 0x42, 0x29,
+ 0xe0, 0x41, 0x01, 0xfa, 0x41, 0x23, 0x0a, 0x21, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x6b, 0x6d, 0x73,
+ 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f,
+ 0x43, 0x72, 0x79, 0x70, 0x74, 0x6f, 0x4b, 0x65, 0x79, 0x52, 0x0b, 0x6b, 0x6d, 0x73, 0x4b, 0x65,
+ 0x79, 0x4e, 0x61, 0x6d, 0x65, 0x73, 0x22, 0x9e, 0x01, 0x0a, 0x0e, 0x45, 0x6e, 0x63, 0x72, 0x79,
+ 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x54, 0x79, 0x70, 0x65, 0x12, 0x1f, 0x0a, 0x1b, 0x45, 0x4e, 0x43,
+ 0x52, 0x59, 0x50, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x55, 0x4e, 0x53,
+ 0x50, 0x45, 0x43, 0x49, 0x46, 0x49, 0x45, 0x44, 0x10, 0x00, 0x12, 0x2b, 0x0a, 0x27, 0x55, 0x53,
+ 0x45, 0x5f, 0x43, 0x4f, 0x4e, 0x46, 0x49, 0x47, 0x5f, 0x44, 0x45, 0x46, 0x41, 0x55, 0x4c, 0x54,
+ 0x5f, 0x4f, 0x52, 0x5f, 0x42, 0x41, 0x43, 0x4b, 0x55, 0x50, 0x5f, 0x45, 0x4e, 0x43, 0x52, 0x59,
+ 0x50, 0x54, 0x49, 0x4f, 0x4e, 0x10, 0x01, 0x12, 0x1d, 0x0a, 0x19, 0x47, 0x4f, 0x4f, 0x47, 0x4c,
+ 0x45, 0x5f, 0x44, 0x45, 0x46, 0x41, 0x55, 0x4c, 0x54, 0x5f, 0x45, 0x4e, 0x43, 0x52, 0x59, 0x50,
+ 0x54, 0x49, 0x4f, 0x4e, 0x10, 0x02, 0x12, 0x1f, 0x0a, 0x1b, 0x43, 0x55, 0x53, 0x54, 0x4f, 0x4d,
+ 0x45, 0x52, 0x5f, 0x4d, 0x41, 0x4e, 0x41, 0x47, 0x45, 0x44, 0x5f, 0x45, 0x4e, 0x43, 0x52, 0x59,
+ 0x50, 0x54, 0x49, 0x4f, 0x4e, 0x10, 0x03, 0x22, 0xe0, 0x03, 0x0a, 0x17, 0x52, 0x65, 0x73, 0x74,
+ 0x6f, 0x72, 0x65, 0x44, 0x61, 0x74, 0x61, 0x62, 0x61, 0x73, 0x65, 0x4d, 0x65, 0x74, 0x61, 0x64,
+ 0x61, 0x74, 0x61, 0x12, 0x38, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28,
+ 0x09, 0x42, 0x24, 0xfa, 0x41, 0x21, 0x0a, 0x1f, 0x73, 0x70, 0x61, 0x6e, 0x6e, 0x65, 0x72, 0x2e,
+ 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x44,
+ 0x61, 0x74, 0x61, 0x62, 0x61, 0x73, 0x65, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x54, 0x0a,
+ 0x0b, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x02, 0x20, 0x01,
+ 0x28, 0x0e, 0x32, 0x33, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x70, 0x61, 0x6e,
+ 0x6e, 0x65, 0x72, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x64, 0x61, 0x74, 0x61, 0x62, 0x61,
+ 0x73, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x52, 0x65, 0x73, 0x74, 0x6f, 0x72, 0x65, 0x53, 0x6f, 0x75,
+ 0x72, 0x63, 0x65, 0x54, 0x79, 0x70, 0x65, 0x52, 0x0a, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x54,
+ 0x79, 0x70, 0x65, 0x12, 0x4f, 0x0a, 0x0b, 0x62, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x5f, 0x69, 0x6e,
+ 0x66, 0x6f, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c,
+ 0x65, 0x2e, 0x73, 0x70, 0x61, 0x6e, 0x6e, 0x65, 0x72, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e,
+ 0x64, 0x61, 0x74, 0x61, 0x62, 0x61, 0x73, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x42, 0x61, 0x63, 0x6b,
+ 0x75, 0x70, 0x49, 0x6e, 0x66, 0x6f, 0x48, 0x00, 0x52, 0x0a, 0x62, 0x61, 0x63, 0x6b, 0x75, 0x70,
+ 0x49, 0x6e, 0x66, 0x6f, 0x12, 0x4f, 0x0a, 0x08, 0x70, 0x72, 0x6f, 0x67, 0x72, 0x65, 0x73, 0x73,
+ 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x33, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e,
+ 0x73, 0x70, 0x61, 0x6e, 0x6e, 0x65, 0x72, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x64, 0x61,
+ 0x74, 0x61, 0x62, 0x61, 0x73, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x4f, 0x70, 0x65, 0x72, 0x61, 0x74,
+ 0x69, 0x6f, 0x6e, 0x50, 0x72, 0x6f, 0x67, 0x72, 0x65, 0x73, 0x73, 0x52, 0x08, 0x70, 0x72, 0x6f,
+ 0x67, 0x72, 0x65, 0x73, 0x73, 0x12, 0x3b, 0x0a, 0x0b, 0x63, 0x61, 0x6e, 0x63, 0x65, 0x6c, 0x5f,
+ 0x74, 0x69, 0x6d, 0x65, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f,
+ 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d,
+ 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x0a, 0x63, 0x61, 0x6e, 0x63, 0x65, 0x6c, 0x54, 0x69,
+ 0x6d, 0x65, 0x12, 0x47, 0x0a, 0x20, 0x6f, 0x70, 0x74, 0x69, 0x6d, 0x69, 0x7a, 0x65, 0x5f, 0x64,
+ 0x61, 0x74, 0x61, 0x62, 0x61, 0x73, 0x65, 0x5f, 0x6f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f,
+ 0x6e, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x06, 0x20, 0x01, 0x28, 0x09, 0x52, 0x1d, 0x6f, 0x70,
+ 0x74, 0x69, 0x6d, 0x69, 0x7a, 0x65, 0x44, 0x61, 0x74, 0x61, 0x62, 0x61, 0x73, 0x65, 0x4f, 0x70,
+ 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4e, 0x61, 0x6d, 0x65, 0x42, 0x0d, 0x0a, 0x0b, 0x73,
+ 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x69, 0x6e, 0x66, 0x6f, 0x22, 0xad, 0x01, 0x0a, 0x20, 0x4f,
+ 0x70, 0x74, 0x69, 0x6d, 0x69, 0x7a, 0x65, 0x52, 0x65, 0x73, 0x74, 0x6f, 0x72, 0x65, 0x64, 0x44,
+ 0x61, 0x74, 0x61, 0x62, 0x61, 0x73, 0x65, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x12,
+ 0x38, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x24, 0xfa,
+ 0x41, 0x21, 0x0a, 0x1f, 0x73, 0x70, 0x61, 0x6e, 0x6e, 0x65, 0x72, 0x2e, 0x67, 0x6f, 0x6f, 0x67,
+ 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x44, 0x61, 0x74, 0x61, 0x62,
+ 0x61, 0x73, 0x65, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x4f, 0x0a, 0x08, 0x70, 0x72, 0x6f,
+ 0x67, 0x72, 0x65, 0x73, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x33, 0x2e, 0x67, 0x6f,
+ 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x70, 0x61, 0x6e, 0x6e, 0x65, 0x72, 0x2e, 0x61, 0x64, 0x6d,
+ 0x69, 0x6e, 0x2e, 0x64, 0x61, 0x74, 0x61, 0x62, 0x61, 0x73, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x4f,
+ 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x50, 0x72, 0x6f, 0x67, 0x72, 0x65, 0x73, 0x73,
+ 0x52, 0x08, 0x70, 0x72, 0x6f, 0x67, 0x72, 0x65, 0x73, 0x73, 0x22, 0xa4, 0x01, 0x0a, 0x0c, 0x44,
+ 0x61, 0x74, 0x61, 0x62, 0x61, 0x73, 0x65, 0x52, 0x6f, 0x6c, 0x65, 0x12, 0x17, 0x0a, 0x04, 0x6e,
+ 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x04,
+ 0x6e, 0x61, 0x6d, 0x65, 0x3a, 0x7b, 0xea, 0x41, 0x78, 0x0a, 0x23, 0x73, 0x70, 0x61, 0x6e, 0x6e,
+ 0x65, 0x72, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f,
+ 0x6d, 0x2f, 0x44, 0x61, 0x74, 0x61, 0x62, 0x61, 0x73, 0x65, 0x52, 0x6f, 0x6c, 0x65, 0x12, 0x51,
+ 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x7b, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63,
+ 0x74, 0x7d, 0x2f, 0x69, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x73, 0x2f, 0x7b, 0x69, 0x6e,
+ 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x7d, 0x2f, 0x64, 0x61, 0x74, 0x61, 0x62, 0x61, 0x73, 0x65,
+ 0x73, 0x2f, 0x7b, 0x64, 0x61, 0x74, 0x61, 0x62, 0x61, 0x73, 0x65, 0x7d, 0x2f, 0x64, 0x61, 0x74,
+ 0x61, 0x62, 0x61, 0x73, 0x65, 0x52, 0x6f, 0x6c, 0x65, 0x73, 0x2f, 0x7b, 0x72, 0x6f, 0x6c, 0x65,
+ 0x7d, 0x22, 0x97, 0x01, 0x0a, 0x18, 0x4c, 0x69, 0x73, 0x74, 0x44, 0x61, 0x74, 0x61, 0x62, 0x61,
+ 0x73, 0x65, 0x52, 0x6f, 0x6c, 0x65, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x3f,
+ 0x0a, 0x06, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x27,
+ 0xe0, 0x41, 0x02, 0xfa, 0x41, 0x21, 0x0a, 0x1f, 0x73, 0x70, 0x61, 0x6e, 0x6e, 0x65, 0x72, 0x2e,
+ 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x44,
+ 0x61, 0x74, 0x61, 0x62, 0x61, 0x73, 0x65, 0x52, 0x06, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x12,
+ 0x1b, 0x0a, 0x09, 0x70, 0x61, 0x67, 0x65, 0x5f, 0x73, 0x69, 0x7a, 0x65, 0x18, 0x02, 0x20, 0x01,
+ 0x28, 0x05, 0x52, 0x08, 0x70, 0x61, 0x67, 0x65, 0x53, 0x69, 0x7a, 0x65, 0x12, 0x1d, 0x0a, 0x0a,
+ 0x70, 0x61, 0x67, 0x65, 0x5f, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09,
+ 0x52, 0x09, 0x70, 0x61, 0x67, 0x65, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x22, 0x9a, 0x01, 0x0a, 0x19,
+ 0x4c, 0x69, 0x73, 0x74, 0x44, 0x61, 0x74, 0x61, 0x62, 0x61, 0x73, 0x65, 0x52, 0x6f, 0x6c, 0x65,
+ 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x55, 0x0a, 0x0e, 0x64, 0x61, 0x74,
+ 0x61, 0x62, 0x61, 0x73, 0x65, 0x5f, 0x72, 0x6f, 0x6c, 0x65, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28,
+ 0x0b, 0x32, 0x2e, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x70, 0x61, 0x6e, 0x6e,
+ 0x65, 0x72, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x64, 0x61, 0x74, 0x61, 0x62, 0x61, 0x73,
+ 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x44, 0x61, 0x74, 0x61, 0x62, 0x61, 0x73, 0x65, 0x52, 0x6f, 0x6c,
+ 0x65, 0x52, 0x0d, 0x64, 0x61, 0x74, 0x61, 0x62, 0x61, 0x73, 0x65, 0x52, 0x6f, 0x6c, 0x65, 0x73,
+ 0x12, 0x26, 0x0a, 0x0f, 0x6e, 0x65, 0x78, 0x74, 0x5f, 0x70, 0x61, 0x67, 0x65, 0x5f, 0x74, 0x6f,
+ 0x6b, 0x65, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0d, 0x6e, 0x65, 0x78, 0x74, 0x50,
+ 0x61, 0x67, 0x65, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x2a, 0x35, 0x0a, 0x11, 0x52, 0x65, 0x73, 0x74,
+ 0x6f, 0x72, 0x65, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x54, 0x79, 0x70, 0x65, 0x12, 0x14, 0x0a,
+ 0x10, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, 0x49, 0x45,
+ 0x44, 0x10, 0x00, 0x12, 0x0a, 0x0a, 0x06, 0x42, 0x41, 0x43, 0x4b, 0x55, 0x50, 0x10, 0x01, 0x32,
+ 0x98, 0x31, 0x0a, 0x0d, 0x44, 0x61, 0x74, 0x61, 0x62, 0x61, 0x73, 0x65, 0x41, 0x64, 0x6d, 0x69,
+ 0x6e, 0x12, 0xc0, 0x01, 0x0a, 0x0d, 0x4c, 0x69, 0x73, 0x74, 0x44, 0x61, 0x74, 0x61, 0x62, 0x61,
+ 0x73, 0x65, 0x73, 0x12, 0x36, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x70, 0x61,
+ 0x6e, 0x6e, 0x65, 0x72, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x64, 0x61, 0x74, 0x61, 0x62,
+ 0x61, 0x73, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x44, 0x61, 0x74, 0x61, 0x62,
+ 0x61, 0x73, 0x65, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x37, 0x2e, 0x67, 0x6f,
+ 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x70, 0x61, 0x6e, 0x6e, 0x65, 0x72, 0x2e, 0x61, 0x64, 0x6d,
+ 0x69, 0x6e, 0x2e, 0x64, 0x61, 0x74, 0x61, 0x62, 0x61, 0x73, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x4c,
+ 0x69, 0x73, 0x74, 0x44, 0x61, 0x74, 0x61, 0x62, 0x61, 0x73, 0x65, 0x73, 0x52, 0x65, 0x73, 0x70,
+ 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x3e, 0xda, 0x41, 0x06, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x82,
+ 0xd3, 0xe4, 0x93, 0x02, 0x2f, 0x12, 0x2d, 0x2f, 0x76, 0x31, 0x2f, 0x7b, 0x70, 0x61, 0x72, 0x65,
+ 0x6e, 0x74, 0x3d, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x2a, 0x2f, 0x69, 0x6e,
+ 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x73, 0x2f, 0x2a, 0x7d, 0x2f, 0x64, 0x61, 0x74, 0x61, 0x62,
+ 0x61, 0x73, 0x65, 0x73, 0x12, 0xa4, 0x02, 0x0a, 0x0e, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x44,
+ 0x61, 0x74, 0x61, 0x62, 0x61, 0x73, 0x65, 0x12, 0x37, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65,
+ 0x2e, 0x73, 0x70, 0x61, 0x6e, 0x6e, 0x65, 0x72, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x64,
+ 0x61, 0x74, 0x61, 0x62, 0x61, 0x73, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x43, 0x72, 0x65, 0x61, 0x74,
+ 0x65, 0x44, 0x61, 0x74, 0x61, 0x62, 0x61, 0x73, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74,
+ 0x1a, 0x1d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6c, 0x6f, 0x6e, 0x67, 0x72, 0x75,
+ 0x6e, 0x6e, 0x69, 0x6e, 0x67, 0x2e, 0x4f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x22,
+ 0xb9, 0x01, 0xca, 0x41, 0x64, 0x0a, 0x29, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x70,
+ 0x61, 0x6e, 0x6e, 0x65, 0x72, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x64, 0x61, 0x74, 0x61,
+ 0x62, 0x61, 0x73, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x44, 0x61, 0x74, 0x61, 0x62, 0x61, 0x73, 0x65,
+ 0x12, 0x37, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x70, 0x61, 0x6e, 0x6e, 0x65, 0x72,
+ 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x64, 0x61, 0x74, 0x61, 0x62, 0x61, 0x73, 0x65, 0x2e,
+ 0x76, 0x31, 0x2e, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x44, 0x61, 0x74, 0x61, 0x62, 0x61, 0x73,
+ 0x65, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0xda, 0x41, 0x17, 0x70, 0x61, 0x72, 0x65,
+ 0x6e, 0x74, 0x2c, 0x63, 0x72, 0x65, 0x61, 0x74, 0x65, 0x5f, 0x73, 0x74, 0x61, 0x74, 0x65, 0x6d,
+ 0x65, 0x6e, 0x74, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x32, 0x3a, 0x01, 0x2a, 0x22, 0x2d, 0x2f, 0x76,
+ 0x31, 0x2f, 0x7b, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x3d, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63,
+ 0x74, 0x73, 0x2f, 0x2a, 0x2f, 0x69, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x73, 0x2f, 0x2a,
+ 0x7d, 0x2f, 0x64, 0x61, 0x74, 0x61, 0x62, 0x61, 0x73, 0x65, 0x73, 0x12, 0xad, 0x01, 0x0a, 0x0b,
+ 0x47, 0x65, 0x74, 0x44, 0x61, 0x74, 0x61, 0x62, 0x61, 0x73, 0x65, 0x12, 0x34, 0x2e, 0x67, 0x6f,
+ 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x70, 0x61, 0x6e, 0x6e, 0x65, 0x72, 0x2e, 0x61, 0x64, 0x6d,
+ 0x69, 0x6e, 0x2e, 0x64, 0x61, 0x74, 0x61, 0x62, 0x61, 0x73, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x47,
+ 0x65, 0x74, 0x44, 0x61, 0x74, 0x61, 0x62, 0x61, 0x73, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73,
+ 0x74, 0x1a, 0x2a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x70, 0x61, 0x6e, 0x6e,
+ 0x65, 0x72, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x64, 0x61, 0x74, 0x61, 0x62, 0x61, 0x73,
+ 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x44, 0x61, 0x74, 0x61, 0x62, 0x61, 0x73, 0x65, 0x22, 0x3c, 0xda,
+ 0x41, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x2f, 0x12, 0x2d, 0x2f, 0x76,
+ 0x31, 0x2f, 0x7b, 0x6e, 0x61, 0x6d, 0x65, 0x3d, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73,
+ 0x2f, 0x2a, 0x2f, 0x69, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x73, 0x2f, 0x2a, 0x2f, 0x64,
+ 0x61, 0x74, 0x61, 0x62, 0x61, 0x73, 0x65, 0x73, 0x2f, 0x2a, 0x7d, 0x12, 0xef, 0x01, 0x0a, 0x0e,
+ 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x44, 0x61, 0x74, 0x61, 0x62, 0x61, 0x73, 0x65, 0x12, 0x37,
+ 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x70, 0x61, 0x6e, 0x6e, 0x65, 0x72, 0x2e,
+ 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x64, 0x61, 0x74, 0x61, 0x62, 0x61, 0x73, 0x65, 0x2e, 0x76,
+ 0x31, 0x2e, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x44, 0x61, 0x74, 0x61, 0x62, 0x61, 0x73, 0x65,
+ 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65,
+ 0x2e, 0x6c, 0x6f, 0x6e, 0x67, 0x72, 0x75, 0x6e, 0x6e, 0x69, 0x6e, 0x67, 0x2e, 0x4f, 0x70, 0x65,
+ 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x22, 0x84, 0x01, 0xca, 0x41, 0x22, 0x0a, 0x08, 0x44, 0x61,
+ 0x74, 0x61, 0x62, 0x61, 0x73, 0x65, 0x12, 0x16, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x44, 0x61,
+ 0x74, 0x61, 0x62, 0x61, 0x73, 0x65, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0xda, 0x41,
+ 0x14, 0x64, 0x61, 0x74, 0x61, 0x62, 0x61, 0x73, 0x65, 0x2c, 0x75, 0x70, 0x64, 0x61, 0x74, 0x65,
+ 0x5f, 0x6d, 0x61, 0x73, 0x6b, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x42, 0x3a, 0x08, 0x64, 0x61, 0x74,
+ 0x61, 0x62, 0x61, 0x73, 0x65, 0x32, 0x36, 0x2f, 0x76, 0x31, 0x2f, 0x7b, 0x64, 0x61, 0x74, 0x61,
+ 0x62, 0x61, 0x73, 0x65, 0x2e, 0x6e, 0x61, 0x6d, 0x65, 0x3d, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63,
+ 0x74, 0x73, 0x2f, 0x2a, 0x2f, 0x69, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x73, 0x2f, 0x2a,
+ 0x2f, 0x64, 0x61, 0x74, 0x61, 0x62, 0x61, 0x73, 0x65, 0x73, 0x2f, 0x2a, 0x7d, 0x12, 0x9d, 0x02,
+ 0x0a, 0x11, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x44, 0x61, 0x74, 0x61, 0x62, 0x61, 0x73, 0x65,
+ 0x44, 0x64, 0x6c, 0x12, 0x3a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x70, 0x61,
+ 0x6e, 0x6e, 0x65, 0x72, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x64, 0x61, 0x74, 0x61, 0x62,
+ 0x61, 0x73, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x44, 0x61, 0x74,
+ 0x61, 0x62, 0x61, 0x73, 0x65, 0x44, 0x64, 0x6c, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a,
+ 0x1d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6c, 0x6f, 0x6e, 0x67, 0x72, 0x75, 0x6e,
+ 0x6e, 0x69, 0x6e, 0x67, 0x2e, 0x4f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x22, 0xac,
+ 0x01, 0xca, 0x41, 0x53, 0x0a, 0x15, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f,
+ 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x12, 0x3a, 0x67, 0x6f, 0x6f,
+ 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x70, 0x61, 0x6e, 0x6e, 0x65, 0x72, 0x2e, 0x61, 0x64, 0x6d, 0x69,
+ 0x6e, 0x2e, 0x64, 0x61, 0x74, 0x61, 0x62, 0x61, 0x73, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x55, 0x70,
+ 0x64, 0x61, 0x74, 0x65, 0x44, 0x61, 0x74, 0x61, 0x62, 0x61, 0x73, 0x65, 0x44, 0x64, 0x6c, 0x4d,
+ 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0xda, 0x41, 0x13, 0x64, 0x61, 0x74, 0x61, 0x62, 0x61,
+ 0x73, 0x65, 0x2c, 0x73, 0x74, 0x61, 0x74, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x73, 0x82, 0xd3, 0xe4,
+ 0x93, 0x02, 0x3a, 0x3a, 0x01, 0x2a, 0x32, 0x35, 0x2f, 0x76, 0x31, 0x2f, 0x7b, 0x64, 0x61, 0x74,
+ 0x61, 0x62, 0x61, 0x73, 0x65, 0x3d, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x2a,
+ 0x2f, 0x69, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x73, 0x2f, 0x2a, 0x2f, 0x64, 0x61, 0x74,
+ 0x61, 0x62, 0x61, 0x73, 0x65, 0x73, 0x2f, 0x2a, 0x7d, 0x2f, 0x64, 0x64, 0x6c, 0x12, 0xa3, 0x01,
+ 0x0a, 0x0c, 0x44, 0x72, 0x6f, 0x70, 0x44, 0x61, 0x74, 0x61, 0x62, 0x61, 0x73, 0x65, 0x12, 0x35,
+ 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x70, 0x61, 0x6e, 0x6e, 0x65, 0x72, 0x2e,
+ 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x64, 0x61, 0x74, 0x61, 0x62, 0x61, 0x73, 0x65, 0x2e, 0x76,
+ 0x31, 0x2e, 0x44, 0x72, 0x6f, 0x70, 0x44, 0x61, 0x74, 0x61, 0x62, 0x61, 0x73, 0x65, 0x52, 0x65,
+ 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x16, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70,
+ 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x22, 0x44, 0xda,
+ 0x41, 0x08, 0x64, 0x61, 0x74, 0x61, 0x62, 0x61, 0x73, 0x65, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x33,
+ 0x2a, 0x31, 0x2f, 0x76, 0x31, 0x2f, 0x7b, 0x64, 0x61, 0x74, 0x61, 0x62, 0x61, 0x73, 0x65, 0x3d,
+ 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x2a, 0x2f, 0x69, 0x6e, 0x73, 0x74, 0x61,
+ 0x6e, 0x63, 0x65, 0x73, 0x2f, 0x2a, 0x2f, 0x64, 0x61, 0x74, 0x61, 0x62, 0x61, 0x73, 0x65, 0x73,
+ 0x2f, 0x2a, 0x7d, 0x12, 0xcd, 0x01, 0x0a, 0x0e, 0x47, 0x65, 0x74, 0x44, 0x61, 0x74, 0x61, 0x62,
+ 0x61, 0x73, 0x65, 0x44, 0x64, 0x6c, 0x12, 0x37, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e,
+ 0x73, 0x70, 0x61, 0x6e, 0x6e, 0x65, 0x72, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x64, 0x61,
+ 0x74, 0x61, 0x62, 0x61, 0x73, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x47, 0x65, 0x74, 0x44, 0x61, 0x74,
+ 0x61, 0x62, 0x61, 0x73, 0x65, 0x44, 0x64, 0x6c, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a,
+ 0x38, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x70, 0x61, 0x6e, 0x6e, 0x65, 0x72,
+ 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x64, 0x61, 0x74, 0x61, 0x62, 0x61, 0x73, 0x65, 0x2e,
+ 0x76, 0x31, 0x2e, 0x47, 0x65, 0x74, 0x44, 0x61, 0x74, 0x61, 0x62, 0x61, 0x73, 0x65, 0x44, 0x64,
+ 0x6c, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x48, 0xda, 0x41, 0x08, 0x64, 0x61,
+ 0x74, 0x61, 0x62, 0x61, 0x73, 0x65, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x37, 0x12, 0x35, 0x2f, 0x76,
+ 0x31, 0x2f, 0x7b, 0x64, 0x61, 0x74, 0x61, 0x62, 0x61, 0x73, 0x65, 0x3d, 0x70, 0x72, 0x6f, 0x6a,
+ 0x65, 0x63, 0x74, 0x73, 0x2f, 0x2a, 0x2f, 0x69, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x73,
+ 0x2f, 0x2a, 0x2f, 0x64, 0x61, 0x74, 0x61, 0x62, 0x61, 0x73, 0x65, 0x73, 0x2f, 0x2a, 0x7d, 0x2f,
+ 0x64, 0x64, 0x6c, 0x12, 0xc2, 0x02, 0x0a, 0x0c, 0x53, 0x65, 0x74, 0x49, 0x61, 0x6d, 0x50, 0x6f,
+ 0x6c, 0x69, 0x63, 0x79, 0x12, 0x22, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x69, 0x61,
+ 0x6d, 0x2e, 0x76, 0x31, 0x2e, 0x53, 0x65, 0x74, 0x49, 0x61, 0x6d, 0x50, 0x6f, 0x6c, 0x69, 0x63,
+ 0x79, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x15, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c,
+ 0x65, 0x2e, 0x69, 0x61, 0x6d, 0x2e, 0x76, 0x31, 0x2e, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x22,
+ 0xf6, 0x01, 0xda, 0x41, 0x0f, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x2c, 0x70, 0x6f,
+ 0x6c, 0x69, 0x63, 0x79, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0xdd, 0x01, 0x3a, 0x01, 0x2a, 0x5a, 0x41,
+ 0x3a, 0x01, 0x2a, 0x22, 0x3c, 0x2f, 0x76, 0x31, 0x2f, 0x7b, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72,
+ 0x63, 0x65, 0x3d, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x2a, 0x2f, 0x69, 0x6e,
+ 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x73, 0x2f, 0x2a, 0x2f, 0x62, 0x61, 0x63, 0x6b, 0x75, 0x70,
+ 0x73, 0x2f, 0x2a, 0x7d, 0x3a, 0x73, 0x65, 0x74, 0x49, 0x61, 0x6d, 0x50, 0x6f, 0x6c, 0x69, 0x63,
+ 0x79, 0x5a, 0x55, 0x3a, 0x01, 0x2a, 0x22, 0x50, 0x2f, 0x76, 0x31, 0x2f, 0x7b, 0x72, 0x65, 0x73,
+ 0x6f, 0x75, 0x72, 0x63, 0x65, 0x3d, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x2a,
+ 0x2f, 0x69, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x73, 0x2f, 0x2a, 0x2f, 0x64, 0x61, 0x74,
+ 0x61, 0x62, 0x61, 0x73, 0x65, 0x73, 0x2f, 0x2a, 0x2f, 0x62, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x53,
+ 0x63, 0x68, 0x65, 0x64, 0x75, 0x6c, 0x65, 0x73, 0x2f, 0x2a, 0x7d, 0x3a, 0x73, 0x65, 0x74, 0x49,
+ 0x61, 0x6d, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x22, 0x3e, 0x2f, 0x76, 0x31, 0x2f, 0x7b, 0x72,
+ 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x3d, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73,
+ 0x2f, 0x2a, 0x2f, 0x69, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x73, 0x2f, 0x2a, 0x2f, 0x64,
+ 0x61, 0x74, 0x61, 0x62, 0x61, 0x73, 0x65, 0x73, 0x2f, 0x2a, 0x7d, 0x3a, 0x73, 0x65, 0x74, 0x49,
+ 0x61, 0x6d, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x12, 0xbb, 0x02, 0x0a, 0x0c, 0x47, 0x65, 0x74,
+ 0x49, 0x61, 0x6d, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x12, 0x22, 0x2e, 0x67, 0x6f, 0x6f, 0x67,
+ 0x6c, 0x65, 0x2e, 0x69, 0x61, 0x6d, 0x2e, 0x76, 0x31, 0x2e, 0x47, 0x65, 0x74, 0x49, 0x61, 0x6d,
+ 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x15, 0x2e,
+ 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x69, 0x61, 0x6d, 0x2e, 0x76, 0x31, 0x2e, 0x50, 0x6f,
+ 0x6c, 0x69, 0x63, 0x79, 0x22, 0xef, 0x01, 0xda, 0x41, 0x08, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72,
+ 0x63, 0x65, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0xdd, 0x01, 0x3a, 0x01, 0x2a, 0x5a, 0x41, 0x3a, 0x01,
+ 0x2a, 0x22, 0x3c, 0x2f, 0x76, 0x31, 0x2f, 0x7b, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65,
+ 0x3d, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x2a, 0x2f, 0x69, 0x6e, 0x73, 0x74,
+ 0x61, 0x6e, 0x63, 0x65, 0x73, 0x2f, 0x2a, 0x2f, 0x62, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x73, 0x2f,
+ 0x2a, 0x7d, 0x3a, 0x67, 0x65, 0x74, 0x49, 0x61, 0x6d, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x5a,
+ 0x55, 0x3a, 0x01, 0x2a, 0x22, 0x50, 0x2f, 0x76, 0x31, 0x2f, 0x7b, 0x72, 0x65, 0x73, 0x6f, 0x75,
+ 0x72, 0x63, 0x65, 0x3d, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x2a, 0x2f, 0x69,
+ 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x73, 0x2f, 0x2a, 0x2f, 0x64, 0x61, 0x74, 0x61, 0x62,
+ 0x61, 0x73, 0x65, 0x73, 0x2f, 0x2a, 0x2f, 0x62, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x53, 0x63, 0x68,
+ 0x65, 0x64, 0x75, 0x6c, 0x65, 0x73, 0x2f, 0x2a, 0x7d, 0x3a, 0x67, 0x65, 0x74, 0x49, 0x61, 0x6d,
+ 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x22, 0x3e, 0x2f, 0x76, 0x31, 0x2f, 0x7b, 0x72, 0x65, 0x73,
+ 0x6f, 0x75, 0x72, 0x63, 0x65, 0x3d, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x2a,
+ 0x2f, 0x69, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x73, 0x2f, 0x2a, 0x2f, 0x64, 0x61, 0x74,
+ 0x61, 0x62, 0x61, 0x73, 0x65, 0x73, 0x2f, 0x2a, 0x7d, 0x3a, 0x67, 0x65, 0x74, 0x49, 0x61, 0x6d,
+ 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x12, 0xd4, 0x03, 0x0a, 0x12, 0x54, 0x65, 0x73, 0x74, 0x49,
+ 0x61, 0x6d, 0x50, 0x65, 0x72, 0x6d, 0x69, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x28, 0x2e,
+ 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x69, 0x61, 0x6d, 0x2e, 0x76, 0x31, 0x2e, 0x54, 0x65,
+ 0x73, 0x74, 0x49, 0x61, 0x6d, 0x50, 0x65, 0x72, 0x6d, 0x69, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x73,
+ 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x29, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65,
+ 0x2e, 0x69, 0x61, 0x6d, 0x2e, 0x76, 0x31, 0x2e, 0x54, 0x65, 0x73, 0x74, 0x49, 0x61, 0x6d, 0x50,
+ 0x65, 0x72, 0x6d, 0x69, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e,
+ 0x73, 0x65, 0x22, 0xe8, 0x02, 0xda, 0x41, 0x14, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65,
+ 0x2c, 0x70, 0x65, 0x72, 0x6d, 0x69, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x82, 0xd3, 0xe4, 0x93,
+ 0x02, 0xca, 0x02, 0x3a, 0x01, 0x2a, 0x5a, 0x47, 0x3a, 0x01, 0x2a, 0x22, 0x42, 0x2f, 0x76, 0x31,
+ 0x2f, 0x7b, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x3d, 0x70, 0x72, 0x6f, 0x6a, 0x65,
+ 0x63, 0x74, 0x73, 0x2f, 0x2a, 0x2f, 0x69, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x73, 0x2f,
+ 0x2a, 0x2f, 0x62, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x73, 0x2f, 0x2a, 0x7d, 0x3a, 0x74, 0x65, 0x73,
+ 0x74, 0x49, 0x61, 0x6d, 0x50, 0x65, 0x72, 0x6d, 0x69, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x5a,
+ 0x5b, 0x3a, 0x01, 0x2a, 0x22, 0x56, 0x2f, 0x76, 0x31, 0x2f, 0x7b, 0x72, 0x65, 0x73, 0x6f, 0x75,
+ 0x72, 0x63, 0x65, 0x3d, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x2a, 0x2f, 0x69,
+ 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x73, 0x2f, 0x2a, 0x2f, 0x64, 0x61, 0x74, 0x61, 0x62,
+ 0x61, 0x73, 0x65, 0x73, 0x2f, 0x2a, 0x2f, 0x62, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x53, 0x63, 0x68,
+ 0x65, 0x64, 0x75, 0x6c, 0x65, 0x73, 0x2f, 0x2a, 0x7d, 0x3a, 0x74, 0x65, 0x73, 0x74, 0x49, 0x61,
+ 0x6d, 0x50, 0x65, 0x72, 0x6d, 0x69, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x5a, 0x59, 0x3a, 0x01,
+ 0x2a, 0x22, 0x54, 0x2f, 0x76, 0x31, 0x2f, 0x7b, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65,
+ 0x3d, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x2a, 0x2f, 0x69, 0x6e, 0x73, 0x74,
+ 0x61, 0x6e, 0x63, 0x65, 0x73, 0x2f, 0x2a, 0x2f, 0x64, 0x61, 0x74, 0x61, 0x62, 0x61, 0x73, 0x65,
+ 0x73, 0x2f, 0x2a, 0x2f, 0x64, 0x61, 0x74, 0x61, 0x62, 0x61, 0x73, 0x65, 0x52, 0x6f, 0x6c, 0x65,
+ 0x73, 0x2f, 0x2a, 0x7d, 0x3a, 0x74, 0x65, 0x73, 0x74, 0x49, 0x61, 0x6d, 0x50, 0x65, 0x72, 0x6d,
+ 0x69, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x22, 0x44, 0x2f, 0x76, 0x31, 0x2f, 0x7b, 0x72, 0x65,
+ 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x3d, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f,
+ 0x2a, 0x2f, 0x69, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x73, 0x2f, 0x2a, 0x2f, 0x64, 0x61,
+ 0x74, 0x61, 0x62, 0x61, 0x73, 0x65, 0x73, 0x2f, 0x2a, 0x7d, 0x3a, 0x74, 0x65, 0x73, 0x74, 0x49,
+ 0x61, 0x6d, 0x50, 0x65, 0x72, 0x6d, 0x69, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x9f, 0x02,
+ 0x0a, 0x0c, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x42, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x12, 0x35,
+ 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x70, 0x61, 0x6e, 0x6e, 0x65, 0x72, 0x2e,
+ 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x64, 0x61, 0x74, 0x61, 0x62, 0x61, 0x73, 0x65, 0x2e, 0x76,
+ 0x31, 0x2e, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x42, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x52, 0x65,
+ 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6c,
+ 0x6f, 0x6e, 0x67, 0x72, 0x75, 0x6e, 0x6e, 0x69, 0x6e, 0x67, 0x2e, 0x4f, 0x70, 0x65, 0x72, 0x61,
+ 0x74, 0x69, 0x6f, 0x6e, 0x22, 0xb8, 0x01, 0xca, 0x41, 0x60, 0x0a, 0x27, 0x67, 0x6f, 0x6f, 0x67,
+ 0x6c, 0x65, 0x2e, 0x73, 0x70, 0x61, 0x6e, 0x6e, 0x65, 0x72, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e,
+ 0x2e, 0x64, 0x61, 0x74, 0x61, 0x62, 0x61, 0x73, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x42, 0x61, 0x63,
+ 0x6b, 0x75, 0x70, 0x12, 0x35, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x70, 0x61, 0x6e,
+ 0x6e, 0x65, 0x72, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x64, 0x61, 0x74, 0x61, 0x62, 0x61,
+ 0x73, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x42, 0x61, 0x63, 0x6b,
+ 0x75, 0x70, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0xda, 0x41, 0x17, 0x70, 0x61, 0x72,
+ 0x65, 0x6e, 0x74, 0x2c, 0x62, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x2c, 0x62, 0x61, 0x63, 0x6b, 0x75,
+ 0x70, 0x5f, 0x69, 0x64, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x35, 0x3a, 0x06, 0x62, 0x61, 0x63, 0x6b,
+ 0x75, 0x70, 0x22, 0x2b, 0x2f, 0x76, 0x31, 0x2f, 0x7b, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x3d,
+ 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x2a, 0x2f, 0x69, 0x6e, 0x73, 0x74, 0x61,
+ 0x6e, 0x63, 0x65, 0x73, 0x2f, 0x2a, 0x7d, 0x2f, 0x62, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x73, 0x12,
+ 0xac, 0x02, 0x0a, 0x0a, 0x43, 0x6f, 0x70, 0x79, 0x42, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x12, 0x33,
+ 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x70, 0x61, 0x6e, 0x6e, 0x65, 0x72, 0x2e,
+ 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x64, 0x61, 0x74, 0x61, 0x62, 0x61, 0x73, 0x65, 0x2e, 0x76,
+ 0x31, 0x2e, 0x43, 0x6f, 0x70, 0x79, 0x42, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x52, 0x65, 0x71, 0x75,
+ 0x65, 0x73, 0x74, 0x1a, 0x1d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6c, 0x6f, 0x6e,
+ 0x67, 0x72, 0x75, 0x6e, 0x6e, 0x69, 0x6e, 0x67, 0x2e, 0x4f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x69,
+ 0x6f, 0x6e, 0x22, 0xc9, 0x01, 0xca, 0x41, 0x5e, 0x0a, 0x27, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65,
+ 0x2e, 0x73, 0x70, 0x61, 0x6e, 0x6e, 0x65, 0x72, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x64,
+ 0x61, 0x74, 0x61, 0x62, 0x61, 0x73, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x42, 0x61, 0x63, 0x6b, 0x75,
+ 0x70, 0x12, 0x33, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x70, 0x61, 0x6e, 0x6e, 0x65,
+ 0x72, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x64, 0x61, 0x74, 0x61, 0x62, 0x61, 0x73, 0x65,
+ 0x2e, 0x76, 0x31, 0x2e, 0x43, 0x6f, 0x70, 0x79, 0x42, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x4d, 0x65,
+ 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0xda, 0x41, 0x2a, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x2c,
+ 0x62, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x5f, 0x69, 0x64, 0x2c, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65,
+ 0x5f, 0x62, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x2c, 0x65, 0x78, 0x70, 0x69, 0x72, 0x65, 0x5f, 0x74,
+ 0x69, 0x6d, 0x65, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x35, 0x3a, 0x01, 0x2a, 0x22, 0x30, 0x2f, 0x76,
+ 0x31, 0x2f, 0x7b, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x3d, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63,
+ 0x74, 0x73, 0x2f, 0x2a, 0x2f, 0x69, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x73, 0x2f, 0x2a,
+ 0x7d, 0x2f, 0x62, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x73, 0x3a, 0x63, 0x6f, 0x70, 0x79, 0x12, 0xa5,
+ 0x01, 0x0a, 0x09, 0x47, 0x65, 0x74, 0x42, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x12, 0x32, 0x2e, 0x67,
+ 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x70, 0x61, 0x6e, 0x6e, 0x65, 0x72, 0x2e, 0x61, 0x64,
+ 0x6d, 0x69, 0x6e, 0x2e, 0x64, 0x61, 0x74, 0x61, 0x62, 0x61, 0x73, 0x65, 0x2e, 0x76, 0x31, 0x2e,
+ 0x47, 0x65, 0x74, 0x42, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74,
+ 0x1a, 0x28, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x70, 0x61, 0x6e, 0x6e, 0x65,
+ 0x72, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x64, 0x61, 0x74, 0x61, 0x62, 0x61, 0x73, 0x65,
+ 0x2e, 0x76, 0x31, 0x2e, 0x42, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x22, 0x3a, 0xda, 0x41, 0x04, 0x6e,
+ 0x61, 0x6d, 0x65, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x2d, 0x12, 0x2b, 0x2f, 0x76, 0x31, 0x2f, 0x7b,
+ 0x6e, 0x61, 0x6d, 0x65, 0x3d, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x2a, 0x2f,
+ 0x69, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x73, 0x2f, 0x2a, 0x2f, 0x62, 0x61, 0x63, 0x6b,
+ 0x75, 0x70, 0x73, 0x2f, 0x2a, 0x7d, 0x12, 0xc8, 0x01, 0x0a, 0x0c, 0x55, 0x70, 0x64, 0x61, 0x74,
+ 0x65, 0x42, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x12, 0x35, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65,
+ 0x2e, 0x73, 0x70, 0x61, 0x6e, 0x6e, 0x65, 0x72, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x64,
+ 0x61, 0x74, 0x61, 0x62, 0x61, 0x73, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x55, 0x70, 0x64, 0x61, 0x74,
+ 0x65, 0x42, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x28,
+ 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x70, 0x61, 0x6e, 0x6e, 0x65, 0x72, 0x2e,
+ 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x64, 0x61, 0x74, 0x61, 0x62, 0x61, 0x73, 0x65, 0x2e, 0x76,
+ 0x31, 0x2e, 0x42, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x22, 0x57, 0xda, 0x41, 0x12, 0x62, 0x61, 0x63,
+ 0x6b, 0x75, 0x70, 0x2c, 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, 0x5f, 0x6d, 0x61, 0x73, 0x6b, 0x82,
+ 0xd3, 0xe4, 0x93, 0x02, 0x3c, 0x3a, 0x06, 0x62, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x32, 0x32, 0x2f,
+ 0x76, 0x31, 0x2f, 0x7b, 0x62, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x2e, 0x6e, 0x61, 0x6d, 0x65, 0x3d,
+ 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x2a, 0x2f, 0x69, 0x6e, 0x73, 0x74, 0x61,
+ 0x6e, 0x63, 0x65, 0x73, 0x2f, 0x2a, 0x2f, 0x62, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x73, 0x2f, 0x2a,
+ 0x7d, 0x12, 0x99, 0x01, 0x0a, 0x0c, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x42, 0x61, 0x63, 0x6b,
+ 0x75, 0x70, 0x12, 0x35, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x70, 0x61, 0x6e,
+ 0x6e, 0x65, 0x72, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x64, 0x61, 0x74, 0x61, 0x62, 0x61,
+ 0x73, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x42, 0x61, 0x63, 0x6b,
+ 0x75, 0x70, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x16, 0x2e, 0x67, 0x6f, 0x6f, 0x67,
+ 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6d, 0x70, 0x74,
+ 0x79, 0x22, 0x3a, 0xda, 0x41, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x2d,
+ 0x2a, 0x2b, 0x2f, 0x76, 0x31, 0x2f, 0x7b, 0x6e, 0x61, 0x6d, 0x65, 0x3d, 0x70, 0x72, 0x6f, 0x6a,
+ 0x65, 0x63, 0x74, 0x73, 0x2f, 0x2a, 0x2f, 0x69, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x73,
+ 0x2f, 0x2a, 0x2f, 0x62, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x73, 0x2f, 0x2a, 0x7d, 0x12, 0xb8, 0x01,
+ 0x0a, 0x0b, 0x4c, 0x69, 0x73, 0x74, 0x42, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x73, 0x12, 0x34, 0x2e,
+ 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x70, 0x61, 0x6e, 0x6e, 0x65, 0x72, 0x2e, 0x61,
+ 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x64, 0x61, 0x74, 0x61, 0x62, 0x61, 0x73, 0x65, 0x2e, 0x76, 0x31,
+ 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x42, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x73, 0x52, 0x65, 0x71, 0x75,
+ 0x65, 0x73, 0x74, 0x1a, 0x35, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x70, 0x61,
+ 0x6e, 0x6e, 0x65, 0x72, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x64, 0x61, 0x74, 0x61, 0x62,
+ 0x61, 0x73, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x42, 0x61, 0x63, 0x6b, 0x75,
+ 0x70, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x3c, 0xda, 0x41, 0x06, 0x70,
+ 0x61, 0x72, 0x65, 0x6e, 0x74, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x2d, 0x12, 0x2b, 0x2f, 0x76, 0x31,
+ 0x2f, 0x7b, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x3d, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74,
+ 0x73, 0x2f, 0x2a, 0x2f, 0x69, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x73, 0x2f, 0x2a, 0x7d,
+ 0x2f, 0x62, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x73, 0x12, 0xb1, 0x02, 0x0a, 0x0f, 0x52, 0x65, 0x73,
+ 0x74, 0x6f, 0x72, 0x65, 0x44, 0x61, 0x74, 0x61, 0x62, 0x61, 0x73, 0x65, 0x12, 0x38, 0x2e, 0x67,
+ 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x70, 0x61, 0x6e, 0x6e, 0x65, 0x72, 0x2e, 0x61, 0x64,
+ 0x6d, 0x69, 0x6e, 0x2e, 0x64, 0x61, 0x74, 0x61, 0x62, 0x61, 0x73, 0x65, 0x2e, 0x76, 0x31, 0x2e,
+ 0x52, 0x65, 0x73, 0x74, 0x6f, 0x72, 0x65, 0x44, 0x61, 0x74, 0x61, 0x62, 0x61, 0x73, 0x65, 0x52,
+ 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e,
+ 0x6c, 0x6f, 0x6e, 0x67, 0x72, 0x75, 0x6e, 0x6e, 0x69, 0x6e, 0x67, 0x2e, 0x4f, 0x70, 0x65, 0x72,
+ 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x22, 0xc4, 0x01, 0xca, 0x41, 0x65, 0x0a, 0x29, 0x67, 0x6f, 0x6f,
+ 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x70, 0x61, 0x6e, 0x6e, 0x65, 0x72, 0x2e, 0x61, 0x64, 0x6d, 0x69,
+ 0x6e, 0x2e, 0x64, 0x61, 0x74, 0x61, 0x62, 0x61, 0x73, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x44, 0x61,
+ 0x74, 0x61, 0x62, 0x61, 0x73, 0x65, 0x12, 0x38, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73,
+ 0x70, 0x61, 0x6e, 0x6e, 0x65, 0x72, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x64, 0x61, 0x74,
+ 0x61, 0x62, 0x61, 0x73, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x52, 0x65, 0x73, 0x74, 0x6f, 0x72, 0x65,
+ 0x44, 0x61, 0x74, 0x61, 0x62, 0x61, 0x73, 0x65, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61,
+ 0xda, 0x41, 0x19, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x2c, 0x64, 0x61, 0x74, 0x61, 0x62, 0x61,
+ 0x73, 0x65, 0x5f, 0x69, 0x64, 0x2c, 0x62, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x82, 0xd3, 0xe4, 0x93,
+ 0x02, 0x3a, 0x3a, 0x01, 0x2a, 0x22, 0x35, 0x2f, 0x76, 0x31, 0x2f, 0x7b, 0x70, 0x61, 0x72, 0x65,
+ 0x6e, 0x74, 0x3d, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x2a, 0x2f, 0x69, 0x6e,
+ 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x73, 0x2f, 0x2a, 0x7d, 0x2f, 0x64, 0x61, 0x74, 0x61, 0x62,
+ 0x61, 0x73, 0x65, 0x73, 0x3a, 0x72, 0x65, 0x73, 0x74, 0x6f, 0x72, 0x65, 0x12, 0xe4, 0x01, 0x0a,
+ 0x16, 0x4c, 0x69, 0x73, 0x74, 0x44, 0x61, 0x74, 0x61, 0x62, 0x61, 0x73, 0x65, 0x4f, 0x70, 0x65,
+ 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x3f, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65,
+ 0x2e, 0x73, 0x70, 0x61, 0x6e, 0x6e, 0x65, 0x72, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x64,
+ 0x61, 0x74, 0x61, 0x62, 0x61, 0x73, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x44,
+ 0x61, 0x74, 0x61, 0x62, 0x61, 0x73, 0x65, 0x4f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e,
+ 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x40, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c,
+ 0x65, 0x2e, 0x73, 0x70, 0x61, 0x6e, 0x6e, 0x65, 0x72, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e,
+ 0x64, 0x61, 0x74, 0x61, 0x62, 0x61, 0x73, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x4c, 0x69, 0x73, 0x74,
+ 0x44, 0x61, 0x74, 0x61, 0x62, 0x61, 0x73, 0x65, 0x4f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f,
+ 0x6e, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x47, 0xda, 0x41, 0x06, 0x70,
+ 0x61, 0x72, 0x65, 0x6e, 0x74, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x38, 0x12, 0x36, 0x2f, 0x76, 0x31,
+ 0x2f, 0x7b, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x3d, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74,
+ 0x73, 0x2f, 0x2a, 0x2f, 0x69, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x73, 0x2f, 0x2a, 0x7d,
+ 0x2f, 0x64, 0x61, 0x74, 0x61, 0x62, 0x61, 0x73, 0x65, 0x4f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x69,
+ 0x6f, 0x6e, 0x73, 0x12, 0xdc, 0x01, 0x0a, 0x14, 0x4c, 0x69, 0x73, 0x74, 0x42, 0x61, 0x63, 0x6b,
+ 0x75, 0x70, 0x4f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x3d, 0x2e, 0x67,
+ 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x70, 0x61, 0x6e, 0x6e, 0x65, 0x72, 0x2e, 0x61, 0x64,
+ 0x6d, 0x69, 0x6e, 0x2e, 0x64, 0x61, 0x74, 0x61, 0x62, 0x61, 0x73, 0x65, 0x2e, 0x76, 0x31, 0x2e,
+ 0x4c, 0x69, 0x73, 0x74, 0x42, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x4f, 0x70, 0x65, 0x72, 0x61, 0x74,
+ 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x3e, 0x2e, 0x67, 0x6f,
+ 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x70, 0x61, 0x6e, 0x6e, 0x65, 0x72, 0x2e, 0x61, 0x64, 0x6d,
+ 0x69, 0x6e, 0x2e, 0x64, 0x61, 0x74, 0x61, 0x62, 0x61, 0x73, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x4c,
+ 0x69, 0x73, 0x74, 0x42, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x4f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x69,
+ 0x6f, 0x6e, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x45, 0xda, 0x41, 0x06,
+ 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x36, 0x12, 0x34, 0x2f, 0x76,
+ 0x31, 0x2f, 0x7b, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x3d, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63,
+ 0x74, 0x73, 0x2f, 0x2a, 0x2f, 0x69, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x73, 0x2f, 0x2a,
+ 0x7d, 0x2f, 0x62, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x4f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f,
+ 0x6e, 0x73, 0x12, 0xdc, 0x01, 0x0a, 0x11, 0x4c, 0x69, 0x73, 0x74, 0x44, 0x61, 0x74, 0x61, 0x62,
+ 0x61, 0x73, 0x65, 0x52, 0x6f, 0x6c, 0x65, 0x73, 0x12, 0x3a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c,
+ 0x65, 0x2e, 0x73, 0x70, 0x61, 0x6e, 0x6e, 0x65, 0x72, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e,
+ 0x64, 0x61, 0x74, 0x61, 0x62, 0x61, 0x73, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x4c, 0x69, 0x73, 0x74,
+ 0x44, 0x61, 0x74, 0x61, 0x62, 0x61, 0x73, 0x65, 0x52, 0x6f, 0x6c, 0x65, 0x73, 0x52, 0x65, 0x71,
+ 0x75, 0x65, 0x73, 0x74, 0x1a, 0x3b, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x70,
+ 0x61, 0x6e, 0x6e, 0x65, 0x72, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x64, 0x61, 0x74, 0x61,
+ 0x62, 0x61, 0x73, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x44, 0x61, 0x74, 0x61,
+ 0x62, 0x61, 0x73, 0x65, 0x52, 0x6f, 0x6c, 0x65, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73,
+ 0x65, 0x22, 0x4e, 0xda, 0x41, 0x06, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x82, 0xd3, 0xe4, 0x93,
+ 0x02, 0x3f, 0x12, 0x3d, 0x2f, 0x76, 0x31, 0x2f, 0x7b, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x3d,
+ 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x2a, 0x2f, 0x69, 0x6e, 0x73, 0x74, 0x61,
+ 0x6e, 0x63, 0x65, 0x73, 0x2f, 0x2a, 0x2f, 0x64, 0x61, 0x74, 0x61, 0x62, 0x61, 0x73, 0x65, 0x73,
+ 0x2f, 0x2a, 0x7d, 0x2f, 0x64, 0x61, 0x74, 0x61, 0x62, 0x61, 0x73, 0x65, 0x52, 0x6f, 0x6c, 0x65,
+ 0x73, 0x12, 0x8e, 0x02, 0x0a, 0x14, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x42, 0x61, 0x63, 0x6b,
+ 0x75, 0x70, 0x53, 0x63, 0x68, 0x65, 0x64, 0x75, 0x6c, 0x65, 0x12, 0x3d, 0x2e, 0x67, 0x6f, 0x6f,
+ 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x70, 0x61, 0x6e, 0x6e, 0x65, 0x72, 0x2e, 0x61, 0x64, 0x6d, 0x69,
+ 0x6e, 0x2e, 0x64, 0x61, 0x74, 0x61, 0x62, 0x61, 0x73, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x43, 0x72,
+ 0x65, 0x61, 0x74, 0x65, 0x42, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x53, 0x63, 0x68, 0x65, 0x64, 0x75,
+ 0x6c, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x30, 0x2e, 0x67, 0x6f, 0x6f, 0x67,
+ 0x6c, 0x65, 0x2e, 0x73, 0x70, 0x61, 0x6e, 0x6e, 0x65, 0x72, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e,
+ 0x2e, 0x64, 0x61, 0x74, 0x61, 0x62, 0x61, 0x73, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x42, 0x61, 0x63,
+ 0x6b, 0x75, 0x70, 0x53, 0x63, 0x68, 0x65, 0x64, 0x75, 0x6c, 0x65, 0x22, 0x84, 0x01, 0xda, 0x41,
+ 0x29, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x2c, 0x62, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x5f, 0x73,
+ 0x63, 0x68, 0x65, 0x64, 0x75, 0x6c, 0x65, 0x2c, 0x62, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x5f, 0x73,
+ 0x63, 0x68, 0x65, 0x64, 0x75, 0x6c, 0x65, 0x5f, 0x69, 0x64, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x52,
+ 0x3a, 0x0f, 0x62, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x5f, 0x73, 0x63, 0x68, 0x65, 0x64, 0x75, 0x6c,
+ 0x65, 0x22, 0x3f, 0x2f, 0x76, 0x31, 0x2f, 0x7b, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x3d, 0x70,
+ 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x2a, 0x2f, 0x69, 0x6e, 0x73, 0x74, 0x61, 0x6e,
+ 0x63, 0x65, 0x73, 0x2f, 0x2a, 0x2f, 0x64, 0x61, 0x74, 0x61, 0x62, 0x61, 0x73, 0x65, 0x73, 0x2f,
+ 0x2a, 0x7d, 0x2f, 0x62, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x53, 0x63, 0x68, 0x65, 0x64, 0x75, 0x6c,
+ 0x65, 0x73, 0x12, 0xd1, 0x01, 0x0a, 0x11, 0x47, 0x65, 0x74, 0x42, 0x61, 0x63, 0x6b, 0x75, 0x70,
+ 0x53, 0x63, 0x68, 0x65, 0x64, 0x75, 0x6c, 0x65, 0x12, 0x3a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c,
+ 0x65, 0x2e, 0x73, 0x70, 0x61, 0x6e, 0x6e, 0x65, 0x72, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e,
+ 0x64, 0x61, 0x74, 0x61, 0x62, 0x61, 0x73, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x47, 0x65, 0x74, 0x42,
+ 0x61, 0x63, 0x6b, 0x75, 0x70, 0x53, 0x63, 0x68, 0x65, 0x64, 0x75, 0x6c, 0x65, 0x52, 0x65, 0x71,
+ 0x75, 0x65, 0x73, 0x74, 0x1a, 0x30, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x70,
+ 0x61, 0x6e, 0x6e, 0x65, 0x72, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x64, 0x61, 0x74, 0x61,
+ 0x62, 0x61, 0x73, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x42, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x53, 0x63,
+ 0x68, 0x65, 0x64, 0x75, 0x6c, 0x65, 0x22, 0x4e, 0xda, 0x41, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x82,
+ 0xd3, 0xe4, 0x93, 0x02, 0x41, 0x12, 0x3f, 0x2f, 0x76, 0x31, 0x2f, 0x7b, 0x6e, 0x61, 0x6d, 0x65,
+ 0x3d, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x2a, 0x2f, 0x69, 0x6e, 0x73, 0x74,
+ 0x61, 0x6e, 0x63, 0x65, 0x73, 0x2f, 0x2a, 0x2f, 0x64, 0x61, 0x74, 0x61, 0x62, 0x61, 0x73, 0x65,
+ 0x73, 0x2f, 0x2a, 0x2f, 0x62, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x53, 0x63, 0x68, 0x65, 0x64, 0x75,
+ 0x6c, 0x65, 0x73, 0x2f, 0x2a, 0x7d, 0x12, 0x90, 0x02, 0x0a, 0x14, 0x55, 0x70, 0x64, 0x61, 0x74,
+ 0x65, 0x42, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x53, 0x63, 0x68, 0x65, 0x64, 0x75, 0x6c, 0x65, 0x12,
+ 0x3d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x70, 0x61, 0x6e, 0x6e, 0x65, 0x72,
+ 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x64, 0x61, 0x74, 0x61, 0x62, 0x61, 0x73, 0x65, 0x2e,
+ 0x76, 0x31, 0x2e, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x42, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x53,
+ 0x63, 0x68, 0x65, 0x64, 0x75, 0x6c, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x30,
+ 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x70, 0x61, 0x6e, 0x6e, 0x65, 0x72, 0x2e,
+ 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x64, 0x61, 0x74, 0x61, 0x62, 0x61, 0x73, 0x65, 0x2e, 0x76,
+ 0x31, 0x2e, 0x42, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x53, 0x63, 0x68, 0x65, 0x64, 0x75, 0x6c, 0x65,
+ 0x22, 0x86, 0x01, 0xda, 0x41, 0x1b, 0x62, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x5f, 0x73, 0x63, 0x68,
+ 0x65, 0x64, 0x75, 0x6c, 0x65, 0x2c, 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, 0x5f, 0x6d, 0x61, 0x73,
+ 0x6b, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x62, 0x3a, 0x0f, 0x62, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x5f,
+ 0x73, 0x63, 0x68, 0x65, 0x64, 0x75, 0x6c, 0x65, 0x32, 0x4f, 0x2f, 0x76, 0x31, 0x2f, 0x7b, 0x62,
+ 0x61, 0x63, 0x6b, 0x75, 0x70, 0x5f, 0x73, 0x63, 0x68, 0x65, 0x64, 0x75, 0x6c, 0x65, 0x2e, 0x6e,
+ 0x61, 0x6d, 0x65, 0x3d, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x2a, 0x2f, 0x69,
+ 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x73, 0x2f, 0x2a, 0x2f, 0x64, 0x61, 0x74, 0x61, 0x62,
+ 0x61, 0x73, 0x65, 0x73, 0x2f, 0x2a, 0x2f, 0x62, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x53, 0x63, 0x68,
+ 0x65, 0x64, 0x75, 0x6c, 0x65, 0x73, 0x2f, 0x2a, 0x7d, 0x12, 0xbd, 0x01, 0x0a, 0x14, 0x44, 0x65,
+ 0x6c, 0x65, 0x74, 0x65, 0x42, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x53, 0x63, 0x68, 0x65, 0x64, 0x75,
+ 0x6c, 0x65, 0x12, 0x3d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x70, 0x61, 0x6e,
+ 0x6e, 0x65, 0x72, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x64, 0x61, 0x74, 0x61, 0x62, 0x61,
+ 0x73, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x42, 0x61, 0x63, 0x6b,
+ 0x75, 0x70, 0x53, 0x63, 0x68, 0x65, 0x64, 0x75, 0x6c, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73,
+ 0x74, 0x1a, 0x16, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f,
+ 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x22, 0x4e, 0xda, 0x41, 0x04, 0x6e, 0x61,
+ 0x6d, 0x65, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x41, 0x2a, 0x3f, 0x2f, 0x76, 0x31, 0x2f, 0x7b, 0x6e,
+ 0x61, 0x6d, 0x65, 0x3d, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x2a, 0x2f, 0x69,
+ 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x73, 0x2f, 0x2a, 0x2f, 0x64, 0x61, 0x74, 0x61, 0x62,
+ 0x61, 0x73, 0x65, 0x73, 0x2f, 0x2a, 0x2f, 0x62, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x53, 0x63, 0x68,
+ 0x65, 0x64, 0x75, 0x6c, 0x65, 0x73, 0x2f, 0x2a, 0x7d, 0x12, 0xe4, 0x01, 0x0a, 0x13, 0x4c, 0x69,
+ 0x73, 0x74, 0x42, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x53, 0x63, 0x68, 0x65, 0x64, 0x75, 0x6c, 0x65,
+ 0x73, 0x12, 0x3c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x70, 0x61, 0x6e, 0x6e,
+ 0x65, 0x72, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x64, 0x61, 0x74, 0x61, 0x62, 0x61, 0x73,
+ 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x42, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x53,
+ 0x63, 0x68, 0x65, 0x64, 0x75, 0x6c, 0x65, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a,
+ 0x3d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x70, 0x61, 0x6e, 0x6e, 0x65, 0x72,
+ 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x64, 0x61, 0x74, 0x61, 0x62, 0x61, 0x73, 0x65, 0x2e,
+ 0x76, 0x31, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x42, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x53, 0x63, 0x68,
+ 0x65, 0x64, 0x75, 0x6c, 0x65, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x50,
+ 0xda, 0x41, 0x06, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x41, 0x12,
+ 0x3f, 0x2f, 0x76, 0x31, 0x2f, 0x7b, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x3d, 0x70, 0x72, 0x6f,
+ 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x2a, 0x2f, 0x69, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65,
+ 0x73, 0x2f, 0x2a, 0x2f, 0x64, 0x61, 0x74, 0x61, 0x62, 0x61, 0x73, 0x65, 0x73, 0x2f, 0x2a, 0x7d,
+ 0x2f, 0x62, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x53, 0x63, 0x68, 0x65, 0x64, 0x75, 0x6c, 0x65, 0x73,
+ 0x1a, 0x78, 0xca, 0x41, 0x16, 0x73, 0x70, 0x61, 0x6e, 0x6e, 0x65, 0x72, 0x2e, 0x67, 0x6f, 0x6f,
+ 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0xd2, 0x41, 0x5c, 0x68, 0x74,
+ 0x74, 0x70, 0x73, 0x3a, 0x2f, 0x2f, 0x77, 0x77, 0x77, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65,
+ 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x61, 0x75, 0x74, 0x68, 0x2f, 0x63, 0x6c,
+ 0x6f, 0x75, 0x64, 0x2d, 0x70, 0x6c, 0x61, 0x74, 0x66, 0x6f, 0x72, 0x6d, 0x2c, 0x68, 0x74, 0x74,
+ 0x70, 0x73, 0x3a, 0x2f, 0x2f, 0x77, 0x77, 0x77, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61,
+ 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x61, 0x75, 0x74, 0x68, 0x2f, 0x73, 0x70, 0x61,
+ 0x6e, 0x6e, 0x65, 0x72, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x42, 0xd8, 0x02, 0xea, 0x41, 0x4a,
+ 0x0a, 0x1f, 0x73, 0x70, 0x61, 0x6e, 0x6e, 0x65, 0x72, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65,
+ 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x49, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63,
+ 0x65, 0x12, 0x27, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x7b, 0x70, 0x72, 0x6f,
+ 0x6a, 0x65, 0x63, 0x74, 0x7d, 0x2f, 0x69, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x73, 0x2f,
+ 0x7b, 0x69, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x7d, 0x0a, 0x24, 0x63, 0x6f, 0x6d, 0x2e,
+ 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x70, 0x61, 0x6e, 0x6e, 0x65, 0x72, 0x2e, 0x61,
+ 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x64, 0x61, 0x74, 0x61, 0x62, 0x61, 0x73, 0x65, 0x2e, 0x76, 0x31,
+ 0x42, 0x19, 0x53, 0x70, 0x61, 0x6e, 0x6e, 0x65, 0x72, 0x44, 0x61, 0x74, 0x61, 0x62, 0x61, 0x73,
+ 0x65, 0x41, 0x64, 0x6d, 0x69, 0x6e, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x46, 0x63,
+ 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x63, 0x6f, 0x6d, 0x2f,
+ 0x67, 0x6f, 0x2f, 0x73, 0x70, 0x61, 0x6e, 0x6e, 0x65, 0x72, 0x2f, 0x61, 0x64, 0x6d, 0x69, 0x6e,
+ 0x2f, 0x64, 0x61, 0x74, 0x61, 0x62, 0x61, 0x73, 0x65, 0x2f, 0x61, 0x70, 0x69, 0x76, 0x31, 0x2f,
+ 0x64, 0x61, 0x74, 0x61, 0x62, 0x61, 0x73, 0x65, 0x70, 0x62, 0x3b, 0x64, 0x61, 0x74, 0x61, 0x62,
+ 0x61, 0x73, 0x65, 0x70, 0x62, 0xaa, 0x02, 0x26, 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x43,
+ 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x53, 0x70, 0x61, 0x6e, 0x6e, 0x65, 0x72, 0x2e, 0x41, 0x64, 0x6d,
+ 0x69, 0x6e, 0x2e, 0x44, 0x61, 0x74, 0x61, 0x62, 0x61, 0x73, 0x65, 0x2e, 0x56, 0x31, 0xca, 0x02,
+ 0x26, 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x5c, 0x43, 0x6c, 0x6f, 0x75, 0x64, 0x5c, 0x53, 0x70,
+ 0x61, 0x6e, 0x6e, 0x65, 0x72, 0x5c, 0x41, 0x64, 0x6d, 0x69, 0x6e, 0x5c, 0x44, 0x61, 0x74, 0x61,
+ 0x62, 0x61, 0x73, 0x65, 0x5c, 0x56, 0x31, 0xea, 0x02, 0x2b, 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65,
+ 0x3a, 0x3a, 0x43, 0x6c, 0x6f, 0x75, 0x64, 0x3a, 0x3a, 0x53, 0x70, 0x61, 0x6e, 0x6e, 0x65, 0x72,
+ 0x3a, 0x3a, 0x41, 0x64, 0x6d, 0x69, 0x6e, 0x3a, 0x3a, 0x44, 0x61, 0x74, 0x61, 0x62, 0x61, 0x73,
+ 0x65, 0x3a, 0x3a, 0x56, 0x31, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33,
+}
+
+var (
+ file_google_spanner_admin_database_v1_spanner_database_admin_proto_rawDescOnce sync.Once
+ file_google_spanner_admin_database_v1_spanner_database_admin_proto_rawDescData = file_google_spanner_admin_database_v1_spanner_database_admin_proto_rawDesc
+)
+
+func file_google_spanner_admin_database_v1_spanner_database_admin_proto_rawDescGZIP() []byte {
+ file_google_spanner_admin_database_v1_spanner_database_admin_proto_rawDescOnce.Do(func() {
+ file_google_spanner_admin_database_v1_spanner_database_admin_proto_rawDescData = protoimpl.X.CompressGZIP(file_google_spanner_admin_database_v1_spanner_database_admin_proto_rawDescData)
+ })
+ return file_google_spanner_admin_database_v1_spanner_database_admin_proto_rawDescData
+}
+
+var file_google_spanner_admin_database_v1_spanner_database_admin_proto_enumTypes = make([]protoimpl.EnumInfo, 3)
+var file_google_spanner_admin_database_v1_spanner_database_admin_proto_msgTypes = make([]protoimpl.MessageInfo, 24)
+var file_google_spanner_admin_database_v1_spanner_database_admin_proto_goTypes = []any{
+ (RestoreSourceType)(0), // 0: google.spanner.admin.database.v1.RestoreSourceType
+ (Database_State)(0), // 1: google.spanner.admin.database.v1.Database.State
+ (RestoreDatabaseEncryptionConfig_EncryptionType)(0), // 2: google.spanner.admin.database.v1.RestoreDatabaseEncryptionConfig.EncryptionType
+ (*RestoreInfo)(nil), // 3: google.spanner.admin.database.v1.RestoreInfo
+ (*Database)(nil), // 4: google.spanner.admin.database.v1.Database
+ (*ListDatabasesRequest)(nil), // 5: google.spanner.admin.database.v1.ListDatabasesRequest
+ (*ListDatabasesResponse)(nil), // 6: google.spanner.admin.database.v1.ListDatabasesResponse
+ (*CreateDatabaseRequest)(nil), // 7: google.spanner.admin.database.v1.CreateDatabaseRequest
+ (*CreateDatabaseMetadata)(nil), // 8: google.spanner.admin.database.v1.CreateDatabaseMetadata
+ (*GetDatabaseRequest)(nil), // 9: google.spanner.admin.database.v1.GetDatabaseRequest
+ (*UpdateDatabaseRequest)(nil), // 10: google.spanner.admin.database.v1.UpdateDatabaseRequest
+ (*UpdateDatabaseMetadata)(nil), // 11: google.spanner.admin.database.v1.UpdateDatabaseMetadata
+ (*UpdateDatabaseDdlRequest)(nil), // 12: google.spanner.admin.database.v1.UpdateDatabaseDdlRequest
+ (*DdlStatementActionInfo)(nil), // 13: google.spanner.admin.database.v1.DdlStatementActionInfo
+ (*UpdateDatabaseDdlMetadata)(nil), // 14: google.spanner.admin.database.v1.UpdateDatabaseDdlMetadata
+ (*DropDatabaseRequest)(nil), // 15: google.spanner.admin.database.v1.DropDatabaseRequest
+ (*GetDatabaseDdlRequest)(nil), // 16: google.spanner.admin.database.v1.GetDatabaseDdlRequest
+ (*GetDatabaseDdlResponse)(nil), // 17: google.spanner.admin.database.v1.GetDatabaseDdlResponse
+ (*ListDatabaseOperationsRequest)(nil), // 18: google.spanner.admin.database.v1.ListDatabaseOperationsRequest
+ (*ListDatabaseOperationsResponse)(nil), // 19: google.spanner.admin.database.v1.ListDatabaseOperationsResponse
+ (*RestoreDatabaseRequest)(nil), // 20: google.spanner.admin.database.v1.RestoreDatabaseRequest
+ (*RestoreDatabaseEncryptionConfig)(nil), // 21: google.spanner.admin.database.v1.RestoreDatabaseEncryptionConfig
+ (*RestoreDatabaseMetadata)(nil), // 22: google.spanner.admin.database.v1.RestoreDatabaseMetadata
+ (*OptimizeRestoredDatabaseMetadata)(nil), // 23: google.spanner.admin.database.v1.OptimizeRestoredDatabaseMetadata
+ (*DatabaseRole)(nil), // 24: google.spanner.admin.database.v1.DatabaseRole
+ (*ListDatabaseRolesRequest)(nil), // 25: google.spanner.admin.database.v1.ListDatabaseRolesRequest
+ (*ListDatabaseRolesResponse)(nil), // 26: google.spanner.admin.database.v1.ListDatabaseRolesResponse
+ (*BackupInfo)(nil), // 27: google.spanner.admin.database.v1.BackupInfo
+ (*timestamppb.Timestamp)(nil), // 28: google.protobuf.Timestamp
+ (*EncryptionConfig)(nil), // 29: google.spanner.admin.database.v1.EncryptionConfig
+ (*EncryptionInfo)(nil), // 30: google.spanner.admin.database.v1.EncryptionInfo
+ (DatabaseDialect)(0), // 31: google.spanner.admin.database.v1.DatabaseDialect
+ (*fieldmaskpb.FieldMask)(nil), // 32: google.protobuf.FieldMask
+ (*OperationProgress)(nil), // 33: google.spanner.admin.database.v1.OperationProgress
+ (*longrunningpb.Operation)(nil), // 34: google.longrunning.Operation
+ (*iampb.SetIamPolicyRequest)(nil), // 35: google.iam.v1.SetIamPolicyRequest
+ (*iampb.GetIamPolicyRequest)(nil), // 36: google.iam.v1.GetIamPolicyRequest
+ (*iampb.TestIamPermissionsRequest)(nil), // 37: google.iam.v1.TestIamPermissionsRequest
+ (*CreateBackupRequest)(nil), // 38: google.spanner.admin.database.v1.CreateBackupRequest
+ (*CopyBackupRequest)(nil), // 39: google.spanner.admin.database.v1.CopyBackupRequest
+ (*GetBackupRequest)(nil), // 40: google.spanner.admin.database.v1.GetBackupRequest
+ (*UpdateBackupRequest)(nil), // 41: google.spanner.admin.database.v1.UpdateBackupRequest
+ (*DeleteBackupRequest)(nil), // 42: google.spanner.admin.database.v1.DeleteBackupRequest
+ (*ListBackupsRequest)(nil), // 43: google.spanner.admin.database.v1.ListBackupsRequest
+ (*ListBackupOperationsRequest)(nil), // 44: google.spanner.admin.database.v1.ListBackupOperationsRequest
+ (*CreateBackupScheduleRequest)(nil), // 45: google.spanner.admin.database.v1.CreateBackupScheduleRequest
+ (*GetBackupScheduleRequest)(nil), // 46: google.spanner.admin.database.v1.GetBackupScheduleRequest
+ (*UpdateBackupScheduleRequest)(nil), // 47: google.spanner.admin.database.v1.UpdateBackupScheduleRequest
+ (*DeleteBackupScheduleRequest)(nil), // 48: google.spanner.admin.database.v1.DeleteBackupScheduleRequest
+ (*ListBackupSchedulesRequest)(nil), // 49: google.spanner.admin.database.v1.ListBackupSchedulesRequest
+ (*emptypb.Empty)(nil), // 50: google.protobuf.Empty
+ (*iampb.Policy)(nil), // 51: google.iam.v1.Policy
+ (*iampb.TestIamPermissionsResponse)(nil), // 52: google.iam.v1.TestIamPermissionsResponse
+ (*Backup)(nil), // 53: google.spanner.admin.database.v1.Backup
+ (*ListBackupsResponse)(nil), // 54: google.spanner.admin.database.v1.ListBackupsResponse
+ (*ListBackupOperationsResponse)(nil), // 55: google.spanner.admin.database.v1.ListBackupOperationsResponse
+ (*BackupSchedule)(nil), // 56: google.spanner.admin.database.v1.BackupSchedule
+ (*ListBackupSchedulesResponse)(nil), // 57: google.spanner.admin.database.v1.ListBackupSchedulesResponse
+}
+var file_google_spanner_admin_database_v1_spanner_database_admin_proto_depIdxs = []int32{
+ 0, // 0: google.spanner.admin.database.v1.RestoreInfo.source_type:type_name -> google.spanner.admin.database.v1.RestoreSourceType
+ 27, // 1: google.spanner.admin.database.v1.RestoreInfo.backup_info:type_name -> google.spanner.admin.database.v1.BackupInfo
+ 1, // 2: google.spanner.admin.database.v1.Database.state:type_name -> google.spanner.admin.database.v1.Database.State
+ 28, // 3: google.spanner.admin.database.v1.Database.create_time:type_name -> google.protobuf.Timestamp
+ 3, // 4: google.spanner.admin.database.v1.Database.restore_info:type_name -> google.spanner.admin.database.v1.RestoreInfo
+ 29, // 5: google.spanner.admin.database.v1.Database.encryption_config:type_name -> google.spanner.admin.database.v1.EncryptionConfig
+ 30, // 6: google.spanner.admin.database.v1.Database.encryption_info:type_name -> google.spanner.admin.database.v1.EncryptionInfo
+ 28, // 7: google.spanner.admin.database.v1.Database.earliest_version_time:type_name -> google.protobuf.Timestamp
+ 31, // 8: google.spanner.admin.database.v1.Database.database_dialect:type_name -> google.spanner.admin.database.v1.DatabaseDialect
+ 4, // 9: google.spanner.admin.database.v1.ListDatabasesResponse.databases:type_name -> google.spanner.admin.database.v1.Database
+ 29, // 10: google.spanner.admin.database.v1.CreateDatabaseRequest.encryption_config:type_name -> google.spanner.admin.database.v1.EncryptionConfig
+ 31, // 11: google.spanner.admin.database.v1.CreateDatabaseRequest.database_dialect:type_name -> google.spanner.admin.database.v1.DatabaseDialect
+ 4, // 12: google.spanner.admin.database.v1.UpdateDatabaseRequest.database:type_name -> google.spanner.admin.database.v1.Database
+ 32, // 13: google.spanner.admin.database.v1.UpdateDatabaseRequest.update_mask:type_name -> google.protobuf.FieldMask
+ 10, // 14: google.spanner.admin.database.v1.UpdateDatabaseMetadata.request:type_name -> google.spanner.admin.database.v1.UpdateDatabaseRequest
+ 33, // 15: google.spanner.admin.database.v1.UpdateDatabaseMetadata.progress:type_name -> google.spanner.admin.database.v1.OperationProgress
+ 28, // 16: google.spanner.admin.database.v1.UpdateDatabaseMetadata.cancel_time:type_name -> google.protobuf.Timestamp
+ 28, // 17: google.spanner.admin.database.v1.UpdateDatabaseDdlMetadata.commit_timestamps:type_name -> google.protobuf.Timestamp
+ 33, // 18: google.spanner.admin.database.v1.UpdateDatabaseDdlMetadata.progress:type_name -> google.spanner.admin.database.v1.OperationProgress
+ 13, // 19: google.spanner.admin.database.v1.UpdateDatabaseDdlMetadata.actions:type_name -> google.spanner.admin.database.v1.DdlStatementActionInfo
+ 34, // 20: google.spanner.admin.database.v1.ListDatabaseOperationsResponse.operations:type_name -> google.longrunning.Operation
+ 21, // 21: google.spanner.admin.database.v1.RestoreDatabaseRequest.encryption_config:type_name -> google.spanner.admin.database.v1.RestoreDatabaseEncryptionConfig
+ 2, // 22: google.spanner.admin.database.v1.RestoreDatabaseEncryptionConfig.encryption_type:type_name -> google.spanner.admin.database.v1.RestoreDatabaseEncryptionConfig.EncryptionType
+ 0, // 23: google.spanner.admin.database.v1.RestoreDatabaseMetadata.source_type:type_name -> google.spanner.admin.database.v1.RestoreSourceType
+ 27, // 24: google.spanner.admin.database.v1.RestoreDatabaseMetadata.backup_info:type_name -> google.spanner.admin.database.v1.BackupInfo
+ 33, // 25: google.spanner.admin.database.v1.RestoreDatabaseMetadata.progress:type_name -> google.spanner.admin.database.v1.OperationProgress
+ 28, // 26: google.spanner.admin.database.v1.RestoreDatabaseMetadata.cancel_time:type_name -> google.protobuf.Timestamp
+ 33, // 27: google.spanner.admin.database.v1.OptimizeRestoredDatabaseMetadata.progress:type_name -> google.spanner.admin.database.v1.OperationProgress
+ 24, // 28: google.spanner.admin.database.v1.ListDatabaseRolesResponse.database_roles:type_name -> google.spanner.admin.database.v1.DatabaseRole
+ 5, // 29: google.spanner.admin.database.v1.DatabaseAdmin.ListDatabases:input_type -> google.spanner.admin.database.v1.ListDatabasesRequest
+ 7, // 30: google.spanner.admin.database.v1.DatabaseAdmin.CreateDatabase:input_type -> google.spanner.admin.database.v1.CreateDatabaseRequest
+ 9, // 31: google.spanner.admin.database.v1.DatabaseAdmin.GetDatabase:input_type -> google.spanner.admin.database.v1.GetDatabaseRequest
+ 10, // 32: google.spanner.admin.database.v1.DatabaseAdmin.UpdateDatabase:input_type -> google.spanner.admin.database.v1.UpdateDatabaseRequest
+ 12, // 33: google.spanner.admin.database.v1.DatabaseAdmin.UpdateDatabaseDdl:input_type -> google.spanner.admin.database.v1.UpdateDatabaseDdlRequest
+ 15, // 34: google.spanner.admin.database.v1.DatabaseAdmin.DropDatabase:input_type -> google.spanner.admin.database.v1.DropDatabaseRequest
+ 16, // 35: google.spanner.admin.database.v1.DatabaseAdmin.GetDatabaseDdl:input_type -> google.spanner.admin.database.v1.GetDatabaseDdlRequest
+ 35, // 36: google.spanner.admin.database.v1.DatabaseAdmin.SetIamPolicy:input_type -> google.iam.v1.SetIamPolicyRequest
+ 36, // 37: google.spanner.admin.database.v1.DatabaseAdmin.GetIamPolicy:input_type -> google.iam.v1.GetIamPolicyRequest
+ 37, // 38: google.spanner.admin.database.v1.DatabaseAdmin.TestIamPermissions:input_type -> google.iam.v1.TestIamPermissionsRequest
+ 38, // 39: google.spanner.admin.database.v1.DatabaseAdmin.CreateBackup:input_type -> google.spanner.admin.database.v1.CreateBackupRequest
+ 39, // 40: google.spanner.admin.database.v1.DatabaseAdmin.CopyBackup:input_type -> google.spanner.admin.database.v1.CopyBackupRequest
+ 40, // 41: google.spanner.admin.database.v1.DatabaseAdmin.GetBackup:input_type -> google.spanner.admin.database.v1.GetBackupRequest
+ 41, // 42: google.spanner.admin.database.v1.DatabaseAdmin.UpdateBackup:input_type -> google.spanner.admin.database.v1.UpdateBackupRequest
+ 42, // 43: google.spanner.admin.database.v1.DatabaseAdmin.DeleteBackup:input_type -> google.spanner.admin.database.v1.DeleteBackupRequest
+ 43, // 44: google.spanner.admin.database.v1.DatabaseAdmin.ListBackups:input_type -> google.spanner.admin.database.v1.ListBackupsRequest
+ 20, // 45: google.spanner.admin.database.v1.DatabaseAdmin.RestoreDatabase:input_type -> google.spanner.admin.database.v1.RestoreDatabaseRequest
+ 18, // 46: google.spanner.admin.database.v1.DatabaseAdmin.ListDatabaseOperations:input_type -> google.spanner.admin.database.v1.ListDatabaseOperationsRequest
+ 44, // 47: google.spanner.admin.database.v1.DatabaseAdmin.ListBackupOperations:input_type -> google.spanner.admin.database.v1.ListBackupOperationsRequest
+ 25, // 48: google.spanner.admin.database.v1.DatabaseAdmin.ListDatabaseRoles:input_type -> google.spanner.admin.database.v1.ListDatabaseRolesRequest
+ 45, // 49: google.spanner.admin.database.v1.DatabaseAdmin.CreateBackupSchedule:input_type -> google.spanner.admin.database.v1.CreateBackupScheduleRequest
+ 46, // 50: google.spanner.admin.database.v1.DatabaseAdmin.GetBackupSchedule:input_type -> google.spanner.admin.database.v1.GetBackupScheduleRequest
+ 47, // 51: google.spanner.admin.database.v1.DatabaseAdmin.UpdateBackupSchedule:input_type -> google.spanner.admin.database.v1.UpdateBackupScheduleRequest
+ 48, // 52: google.spanner.admin.database.v1.DatabaseAdmin.DeleteBackupSchedule:input_type -> google.spanner.admin.database.v1.DeleteBackupScheduleRequest
+ 49, // 53: google.spanner.admin.database.v1.DatabaseAdmin.ListBackupSchedules:input_type -> google.spanner.admin.database.v1.ListBackupSchedulesRequest
+ 6, // 54: google.spanner.admin.database.v1.DatabaseAdmin.ListDatabases:output_type -> google.spanner.admin.database.v1.ListDatabasesResponse
+ 34, // 55: google.spanner.admin.database.v1.DatabaseAdmin.CreateDatabase:output_type -> google.longrunning.Operation
+ 4, // 56: google.spanner.admin.database.v1.DatabaseAdmin.GetDatabase:output_type -> google.spanner.admin.database.v1.Database
+ 34, // 57: google.spanner.admin.database.v1.DatabaseAdmin.UpdateDatabase:output_type -> google.longrunning.Operation
+ 34, // 58: google.spanner.admin.database.v1.DatabaseAdmin.UpdateDatabaseDdl:output_type -> google.longrunning.Operation
+ 50, // 59: google.spanner.admin.database.v1.DatabaseAdmin.DropDatabase:output_type -> google.protobuf.Empty
+ 17, // 60: google.spanner.admin.database.v1.DatabaseAdmin.GetDatabaseDdl:output_type -> google.spanner.admin.database.v1.GetDatabaseDdlResponse
+ 51, // 61: google.spanner.admin.database.v1.DatabaseAdmin.SetIamPolicy:output_type -> google.iam.v1.Policy
+ 51, // 62: google.spanner.admin.database.v1.DatabaseAdmin.GetIamPolicy:output_type -> google.iam.v1.Policy
+ 52, // 63: google.spanner.admin.database.v1.DatabaseAdmin.TestIamPermissions:output_type -> google.iam.v1.TestIamPermissionsResponse
+ 34, // 64: google.spanner.admin.database.v1.DatabaseAdmin.CreateBackup:output_type -> google.longrunning.Operation
+ 34, // 65: google.spanner.admin.database.v1.DatabaseAdmin.CopyBackup:output_type -> google.longrunning.Operation
+ 53, // 66: google.spanner.admin.database.v1.DatabaseAdmin.GetBackup:output_type -> google.spanner.admin.database.v1.Backup
+ 53, // 67: google.spanner.admin.database.v1.DatabaseAdmin.UpdateBackup:output_type -> google.spanner.admin.database.v1.Backup
+ 50, // 68: google.spanner.admin.database.v1.DatabaseAdmin.DeleteBackup:output_type -> google.protobuf.Empty
+ 54, // 69: google.spanner.admin.database.v1.DatabaseAdmin.ListBackups:output_type -> google.spanner.admin.database.v1.ListBackupsResponse
+ 34, // 70: google.spanner.admin.database.v1.DatabaseAdmin.RestoreDatabase:output_type -> google.longrunning.Operation
+ 19, // 71: google.spanner.admin.database.v1.DatabaseAdmin.ListDatabaseOperations:output_type -> google.spanner.admin.database.v1.ListDatabaseOperationsResponse
+ 55, // 72: google.spanner.admin.database.v1.DatabaseAdmin.ListBackupOperations:output_type -> google.spanner.admin.database.v1.ListBackupOperationsResponse
+ 26, // 73: google.spanner.admin.database.v1.DatabaseAdmin.ListDatabaseRoles:output_type -> google.spanner.admin.database.v1.ListDatabaseRolesResponse
+ 56, // 74: google.spanner.admin.database.v1.DatabaseAdmin.CreateBackupSchedule:output_type -> google.spanner.admin.database.v1.BackupSchedule
+ 56, // 75: google.spanner.admin.database.v1.DatabaseAdmin.GetBackupSchedule:output_type -> google.spanner.admin.database.v1.BackupSchedule
+ 56, // 76: google.spanner.admin.database.v1.DatabaseAdmin.UpdateBackupSchedule:output_type -> google.spanner.admin.database.v1.BackupSchedule
+ 50, // 77: google.spanner.admin.database.v1.DatabaseAdmin.DeleteBackupSchedule:output_type -> google.protobuf.Empty
+ 57, // 78: google.spanner.admin.database.v1.DatabaseAdmin.ListBackupSchedules:output_type -> google.spanner.admin.database.v1.ListBackupSchedulesResponse
+ 54, // [54:79] is the sub-list for method output_type
+ 29, // [29:54] is the sub-list for method input_type
+ 29, // [29:29] is the sub-list for extension type_name
+ 29, // [29:29] is the sub-list for extension extendee
+ 0, // [0:29] is the sub-list for field type_name
+}
+
+func init() { file_google_spanner_admin_database_v1_spanner_database_admin_proto_init() }
+func file_google_spanner_admin_database_v1_spanner_database_admin_proto_init() {
+ if File_google_spanner_admin_database_v1_spanner_database_admin_proto != nil {
+ return
+ }
+ file_google_spanner_admin_database_v1_backup_proto_init()
+ file_google_spanner_admin_database_v1_backup_schedule_proto_init()
+ file_google_spanner_admin_database_v1_common_proto_init()
+ if !protoimpl.UnsafeEnabled {
+ file_google_spanner_admin_database_v1_spanner_database_admin_proto_msgTypes[0].Exporter = func(v any, i int) any {
+ switch v := v.(*RestoreInfo); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_google_spanner_admin_database_v1_spanner_database_admin_proto_msgTypes[1].Exporter = func(v any, i int) any {
+ switch v := v.(*Database); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_google_spanner_admin_database_v1_spanner_database_admin_proto_msgTypes[2].Exporter = func(v any, i int) any {
+ switch v := v.(*ListDatabasesRequest); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_google_spanner_admin_database_v1_spanner_database_admin_proto_msgTypes[3].Exporter = func(v any, i int) any {
+ switch v := v.(*ListDatabasesResponse); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_google_spanner_admin_database_v1_spanner_database_admin_proto_msgTypes[4].Exporter = func(v any, i int) any {
+ switch v := v.(*CreateDatabaseRequest); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_google_spanner_admin_database_v1_spanner_database_admin_proto_msgTypes[5].Exporter = func(v any, i int) any {
+ switch v := v.(*CreateDatabaseMetadata); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_google_spanner_admin_database_v1_spanner_database_admin_proto_msgTypes[6].Exporter = func(v any, i int) any {
+ switch v := v.(*GetDatabaseRequest); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_google_spanner_admin_database_v1_spanner_database_admin_proto_msgTypes[7].Exporter = func(v any, i int) any {
+ switch v := v.(*UpdateDatabaseRequest); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_google_spanner_admin_database_v1_spanner_database_admin_proto_msgTypes[8].Exporter = func(v any, i int) any {
+ switch v := v.(*UpdateDatabaseMetadata); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_google_spanner_admin_database_v1_spanner_database_admin_proto_msgTypes[9].Exporter = func(v any, i int) any {
+ switch v := v.(*UpdateDatabaseDdlRequest); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_google_spanner_admin_database_v1_spanner_database_admin_proto_msgTypes[10].Exporter = func(v any, i int) any {
+ switch v := v.(*DdlStatementActionInfo); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_google_spanner_admin_database_v1_spanner_database_admin_proto_msgTypes[11].Exporter = func(v any, i int) any {
+ switch v := v.(*UpdateDatabaseDdlMetadata); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_google_spanner_admin_database_v1_spanner_database_admin_proto_msgTypes[12].Exporter = func(v any, i int) any {
+ switch v := v.(*DropDatabaseRequest); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_google_spanner_admin_database_v1_spanner_database_admin_proto_msgTypes[13].Exporter = func(v any, i int) any {
+ switch v := v.(*GetDatabaseDdlRequest); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_google_spanner_admin_database_v1_spanner_database_admin_proto_msgTypes[14].Exporter = func(v any, i int) any {
+ switch v := v.(*GetDatabaseDdlResponse); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_google_spanner_admin_database_v1_spanner_database_admin_proto_msgTypes[15].Exporter = func(v any, i int) any {
+ switch v := v.(*ListDatabaseOperationsRequest); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_google_spanner_admin_database_v1_spanner_database_admin_proto_msgTypes[16].Exporter = func(v any, i int) any {
+ switch v := v.(*ListDatabaseOperationsResponse); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_google_spanner_admin_database_v1_spanner_database_admin_proto_msgTypes[17].Exporter = func(v any, i int) any {
+ switch v := v.(*RestoreDatabaseRequest); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_google_spanner_admin_database_v1_spanner_database_admin_proto_msgTypes[18].Exporter = func(v any, i int) any {
+ switch v := v.(*RestoreDatabaseEncryptionConfig); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_google_spanner_admin_database_v1_spanner_database_admin_proto_msgTypes[19].Exporter = func(v any, i int) any {
+ switch v := v.(*RestoreDatabaseMetadata); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_google_spanner_admin_database_v1_spanner_database_admin_proto_msgTypes[20].Exporter = func(v any, i int) any {
+ switch v := v.(*OptimizeRestoredDatabaseMetadata); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_google_spanner_admin_database_v1_spanner_database_admin_proto_msgTypes[21].Exporter = func(v any, i int) any {
+ switch v := v.(*DatabaseRole); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_google_spanner_admin_database_v1_spanner_database_admin_proto_msgTypes[22].Exporter = func(v any, i int) any {
+ switch v := v.(*ListDatabaseRolesRequest); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_google_spanner_admin_database_v1_spanner_database_admin_proto_msgTypes[23].Exporter = func(v any, i int) any {
+ switch v := v.(*ListDatabaseRolesResponse); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ }
+ file_google_spanner_admin_database_v1_spanner_database_admin_proto_msgTypes[0].OneofWrappers = []any{
+ (*RestoreInfo_BackupInfo)(nil),
+ }
+ file_google_spanner_admin_database_v1_spanner_database_admin_proto_msgTypes[17].OneofWrappers = []any{
+ (*RestoreDatabaseRequest_Backup)(nil),
+ }
+ file_google_spanner_admin_database_v1_spanner_database_admin_proto_msgTypes[19].OneofWrappers = []any{
+ (*RestoreDatabaseMetadata_BackupInfo)(nil),
+ }
+ type x struct{}
+ out := protoimpl.TypeBuilder{
+ File: protoimpl.DescBuilder{
+ GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
+ RawDescriptor: file_google_spanner_admin_database_v1_spanner_database_admin_proto_rawDesc,
+ NumEnums: 3,
+ NumMessages: 24,
+ NumExtensions: 0,
+ NumServices: 1,
+ },
+ GoTypes: file_google_spanner_admin_database_v1_spanner_database_admin_proto_goTypes,
+ DependencyIndexes: file_google_spanner_admin_database_v1_spanner_database_admin_proto_depIdxs,
+ EnumInfos: file_google_spanner_admin_database_v1_spanner_database_admin_proto_enumTypes,
+ MessageInfos: file_google_spanner_admin_database_v1_spanner_database_admin_proto_msgTypes,
+ }.Build()
+ File_google_spanner_admin_database_v1_spanner_database_admin_proto = out.File
+ file_google_spanner_admin_database_v1_spanner_database_admin_proto_rawDesc = nil
+ file_google_spanner_admin_database_v1_spanner_database_admin_proto_goTypes = nil
+ file_google_spanner_admin_database_v1_spanner_database_admin_proto_depIdxs = nil
+}
+
+// Reference imports to suppress errors if they are not otherwise used.
+var _ context.Context
+var _ grpc.ClientConnInterface
+
+// This is a compile-time assertion to ensure that this generated file
+// is compatible with the grpc package it is being compiled against.
+const _ = grpc.SupportPackageIsVersion6
+
+// DatabaseAdminClient is the client API for DatabaseAdmin service.
+//
+// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream.
+type DatabaseAdminClient interface {
+ // Lists Cloud Spanner databases.
+ ListDatabases(ctx context.Context, in *ListDatabasesRequest, opts ...grpc.CallOption) (*ListDatabasesResponse, error)
+ // Creates a new Cloud Spanner database and starts to prepare it for serving.
+ // The returned [long-running operation][google.longrunning.Operation] will
+ // have a name of the format `<database_name>/operations/<operation_id>` and
+ // can be used to track preparation of the database. The
+ // [metadata][google.longrunning.Operation.metadata] field type is
+ // [CreateDatabaseMetadata][google.spanner.admin.database.v1.CreateDatabaseMetadata].
+ // The [response][google.longrunning.Operation.response] field type is
+ // [Database][google.spanner.admin.database.v1.Database], if successful.
+ CreateDatabase(ctx context.Context, in *CreateDatabaseRequest, opts ...grpc.CallOption) (*longrunningpb.Operation, error)
+ // Gets the state of a Cloud Spanner database.
+ GetDatabase(ctx context.Context, in *GetDatabaseRequest, opts ...grpc.CallOption) (*Database, error)
+ // Updates a Cloud Spanner database. The returned
+ // [long-running operation][google.longrunning.Operation] can be used to track
+ // the progress of updating the database. If the named database does not
+ // exist, returns `NOT_FOUND`.
+ //
+ // While the operation is pending:
+ //
+ // - The database's
+ // [reconciling][google.spanner.admin.database.v1.Database.reconciling]
+ // field is set to true.
+ // - Cancelling the operation is best-effort. If the cancellation succeeds,
+ // the operation metadata's
+ // [cancel_time][google.spanner.admin.database.v1.UpdateDatabaseMetadata.cancel_time]
+ // is set, the updates are reverted, and the operation terminates with a
+ // `CANCELLED` status.
+ // - New UpdateDatabase requests will return a `FAILED_PRECONDITION` error
+ // until the pending operation is done (returns successfully or with
+ // error).
+ // - Reading the database via the API continues to give the pre-request
+ // values.
+ //
+ // Upon completion of the returned operation:
+ //
+ // - The new values are in effect and readable via the API.
+ // - The database's
+ // [reconciling][google.spanner.admin.database.v1.Database.reconciling]
+ // field becomes false.
+ //
+ // The returned [long-running operation][google.longrunning.Operation] will
+ // have a name of the format
+ // `projects/<project>/instances/<instance>/databases/<database>/operations/<operation_id>`
+ // and can be used to track the database modification. The
+ // [metadata][google.longrunning.Operation.metadata] field type is
+ // [UpdateDatabaseMetadata][google.spanner.admin.database.v1.UpdateDatabaseMetadata].
+ // The [response][google.longrunning.Operation.response] field type is
+ // [Database][google.spanner.admin.database.v1.Database], if successful.
+ UpdateDatabase(ctx context.Context, in *UpdateDatabaseRequest, opts ...grpc.CallOption) (*longrunningpb.Operation, error)
+ // Updates the schema of a Cloud Spanner database by
+ // creating/altering/dropping tables, columns, indexes, etc. The returned
+ // [long-running operation][google.longrunning.Operation] will have a name of
+ // the format `<database_name>/operations/<operation_id>` and can be used to
+ // track execution of the schema change(s). The
+ // [metadata][google.longrunning.Operation.metadata] field type is
+ // [UpdateDatabaseDdlMetadata][google.spanner.admin.database.v1.UpdateDatabaseDdlMetadata].
+ // The operation has no response.
+ UpdateDatabaseDdl(ctx context.Context, in *UpdateDatabaseDdlRequest, opts ...grpc.CallOption) (*longrunningpb.Operation, error)
+ // Drops (aka deletes) a Cloud Spanner database.
+ // Completed backups for the database will be retained according to their
+ // `expire_time`.
+ // Note: Cloud Spanner might continue to accept requests for a few seconds
+ // after the database has been deleted.
+ DropDatabase(ctx context.Context, in *DropDatabaseRequest, opts ...grpc.CallOption) (*emptypb.Empty, error)
+ // Returns the schema of a Cloud Spanner database as a list of formatted
+ // DDL statements. This method does not show pending schema updates, those may
+ // be queried using the [Operations][google.longrunning.Operations] API.
+ GetDatabaseDdl(ctx context.Context, in *GetDatabaseDdlRequest, opts ...grpc.CallOption) (*GetDatabaseDdlResponse, error)
+ // Sets the access control policy on a database or backup resource.
+ // Replaces any existing policy.
+ //
+ // Authorization requires `spanner.databases.setIamPolicy`
+ // permission on [resource][google.iam.v1.SetIamPolicyRequest.resource].
+ // For backups, authorization requires `spanner.backups.setIamPolicy`
+ // permission on [resource][google.iam.v1.SetIamPolicyRequest.resource].
+ SetIamPolicy(ctx context.Context, in *iampb.SetIamPolicyRequest, opts ...grpc.CallOption) (*iampb.Policy, error)
+ // Gets the access control policy for a database or backup resource.
+ // Returns an empty policy if a database or backup exists but does not have a
+ // policy set.
+ //
+ // Authorization requires `spanner.databases.getIamPolicy` permission on
+ // [resource][google.iam.v1.GetIamPolicyRequest.resource].
+ // For backups, authorization requires `spanner.backups.getIamPolicy`
+ // permission on [resource][google.iam.v1.GetIamPolicyRequest.resource].
+ GetIamPolicy(ctx context.Context, in *iampb.GetIamPolicyRequest, opts ...grpc.CallOption) (*iampb.Policy, error)
+ // Returns permissions that the caller has on the specified database or backup
+ // resource.
+ //
+ // Attempting this RPC on a non-existent Cloud Spanner database will
+ // result in a NOT_FOUND error if the user has
+ // `spanner.databases.list` permission on the containing Cloud
+ // Spanner instance. Otherwise returns an empty set of permissions.
+ // Calling this method on a backup that does not exist will
+ // result in a NOT_FOUND error if the user has
+ // `spanner.backups.list` permission on the containing instance.
+ TestIamPermissions(ctx context.Context, in *iampb.TestIamPermissionsRequest, opts ...grpc.CallOption) (*iampb.TestIamPermissionsResponse, error)
+ // Starts creating a new Cloud Spanner Backup.
+ // The returned backup [long-running operation][google.longrunning.Operation]
+ // will have a name of the format
+ // `projects/<project>/instances/<instance>/backups/<backup>/operations/<operation_id>`
+ // and can be used to track creation of the backup. The
+ // [metadata][google.longrunning.Operation.metadata] field type is
+ // [CreateBackupMetadata][google.spanner.admin.database.v1.CreateBackupMetadata].
+ // The [response][google.longrunning.Operation.response] field type is
+ // [Backup][google.spanner.admin.database.v1.Backup], if successful.
+ // Cancelling the returned operation will stop the creation and delete the
+ // backup. There can be only one pending backup creation per database. Backup
+ // creation of different databases can run concurrently.
+ CreateBackup(ctx context.Context, in *CreateBackupRequest, opts ...grpc.CallOption) (*longrunningpb.Operation, error)
+ // Starts copying a Cloud Spanner Backup.
+ // The returned backup [long-running operation][google.longrunning.Operation]
+ // will have a name of the format
+ // `projects/<project>/instances/<instance>/backups/<backup>/operations/<operation_id>`
+ // and can be used to track copying of the backup. The operation is associated
+ // with the destination backup.
+ // The [metadata][google.longrunning.Operation.metadata] field type is
+ // [CopyBackupMetadata][google.spanner.admin.database.v1.CopyBackupMetadata].
+ // The [response][google.longrunning.Operation.response] field type is
+ // [Backup][google.spanner.admin.database.v1.Backup], if successful.
+ // Cancelling the returned operation will stop the copying and delete the
+ // destination backup. Concurrent CopyBackup requests can run on the same
+ // source backup.
+ CopyBackup(ctx context.Context, in *CopyBackupRequest, opts ...grpc.CallOption) (*longrunningpb.Operation, error)
+ // Gets metadata on a pending or completed
+ // [Backup][google.spanner.admin.database.v1.Backup].
+ GetBackup(ctx context.Context, in *GetBackupRequest, opts ...grpc.CallOption) (*Backup, error)
+ // Updates a pending or completed
+ // [Backup][google.spanner.admin.database.v1.Backup].
+ UpdateBackup(ctx context.Context, in *UpdateBackupRequest, opts ...grpc.CallOption) (*Backup, error)
+ // Deletes a pending or completed
+ // [Backup][google.spanner.admin.database.v1.Backup].
+ DeleteBackup(ctx context.Context, in *DeleteBackupRequest, opts ...grpc.CallOption) (*emptypb.Empty, error)
+ // Lists completed and pending backups.
+ // Backups returned are ordered by `create_time` in descending order,
+ // starting from the most recent `create_time`.
+ ListBackups(ctx context.Context, in *ListBackupsRequest, opts ...grpc.CallOption) (*ListBackupsResponse, error)
+ // Create a new database by restoring from a completed backup. The new
+ // database must be in the same project and in an instance with the same
+ // instance configuration as the instance containing
+ // the backup. The returned database [long-running
+ // operation][google.longrunning.Operation] has a name of the format
+ // `projects/<project>/instances/<instance>/databases/<database>/operations/<operation_id>`,
+ // and can be used to track the progress of the operation, and to cancel it.
+ // The [metadata][google.longrunning.Operation.metadata] field type is
+ // [RestoreDatabaseMetadata][google.spanner.admin.database.v1.RestoreDatabaseMetadata].
+ // The [response][google.longrunning.Operation.response] type
+ // is [Database][google.spanner.admin.database.v1.Database], if
+ // successful. Cancelling the returned operation will stop the restore and
+ // delete the database.
+ // There can be only one database being restored into an instance at a time.
+ // Once the restore operation completes, a new restore operation can be
+ // initiated, without waiting for the optimize operation associated with the
+ // first restore to complete.
+ RestoreDatabase(ctx context.Context, in *RestoreDatabaseRequest, opts ...grpc.CallOption) (*longrunningpb.Operation, error)
+ // Lists database [longrunning-operations][google.longrunning.Operation].
+ // A database operation has a name of the form
+ // `projects/<project>/instances/<instance>/databases/<database>/operations/<operation>`.
+ // The long-running operation
+ // [metadata][google.longrunning.Operation.metadata] field type
+ // `metadata.type_url` describes the type of the metadata. Operations returned
+ // include those that have completed/failed/canceled within the last 7 days,
+ // and pending operations.
+ ListDatabaseOperations(ctx context.Context, in *ListDatabaseOperationsRequest, opts ...grpc.CallOption) (*ListDatabaseOperationsResponse, error)
+ // Lists the backup [long-running operations][google.longrunning.Operation] in
+ // the given instance. A backup operation has a name of the form
+ // `projects/<project>/instances/<instance>/backups/<backup>/operations/<operation>`.
+ // The long-running operation
+ // [metadata][google.longrunning.Operation.metadata] field type
+ // `metadata.type_url` describes the type of the metadata. Operations returned
+ // include those that have completed/failed/canceled within the last 7 days,
+ // and pending operations. Operations returned are ordered by
+ // `operation.metadata.value.progress.start_time` in descending order starting
+ // from the most recently started operation.
+ ListBackupOperations(ctx context.Context, in *ListBackupOperationsRequest, opts ...grpc.CallOption) (*ListBackupOperationsResponse, error)
+ // Lists Cloud Spanner database roles.
+ ListDatabaseRoles(ctx context.Context, in *ListDatabaseRolesRequest, opts ...grpc.CallOption) (*ListDatabaseRolesResponse, error)
+ // Creates a new backup schedule.
+ CreateBackupSchedule(ctx context.Context, in *CreateBackupScheduleRequest, opts ...grpc.CallOption) (*BackupSchedule, error)
+ // Gets backup schedule for the input schedule name.
+ GetBackupSchedule(ctx context.Context, in *GetBackupScheduleRequest, opts ...grpc.CallOption) (*BackupSchedule, error)
+ // Updates a backup schedule.
+ UpdateBackupSchedule(ctx context.Context, in *UpdateBackupScheduleRequest, opts ...grpc.CallOption) (*BackupSchedule, error)
+ // Deletes a backup schedule.
+ DeleteBackupSchedule(ctx context.Context, in *DeleteBackupScheduleRequest, opts ...grpc.CallOption) (*emptypb.Empty, error)
+ // Lists all the backup schedules for the database.
+ ListBackupSchedules(ctx context.Context, in *ListBackupSchedulesRequest, opts ...grpc.CallOption) (*ListBackupSchedulesResponse, error)
+}
+
+type databaseAdminClient struct {
+ cc grpc.ClientConnInterface
+}
+
+func NewDatabaseAdminClient(cc grpc.ClientConnInterface) DatabaseAdminClient {
+ return &databaseAdminClient{cc}
+}
+
+func (c *databaseAdminClient) ListDatabases(ctx context.Context, in *ListDatabasesRequest, opts ...grpc.CallOption) (*ListDatabasesResponse, error) {
+ out := new(ListDatabasesResponse)
+ err := c.cc.Invoke(ctx, "/google.spanner.admin.database.v1.DatabaseAdmin/ListDatabases", in, out, opts...)
+ if err != nil {
+ return nil, err
+ }
+ return out, nil
+}
+
+func (c *databaseAdminClient) CreateDatabase(ctx context.Context, in *CreateDatabaseRequest, opts ...grpc.CallOption) (*longrunningpb.Operation, error) {
+ out := new(longrunningpb.Operation)
+ err := c.cc.Invoke(ctx, "/google.spanner.admin.database.v1.DatabaseAdmin/CreateDatabase", in, out, opts...)
+ if err != nil {
+ return nil, err
+ }
+ return out, nil
+}
+
+func (c *databaseAdminClient) GetDatabase(ctx context.Context, in *GetDatabaseRequest, opts ...grpc.CallOption) (*Database, error) {
+ out := new(Database)
+ err := c.cc.Invoke(ctx, "/google.spanner.admin.database.v1.DatabaseAdmin/GetDatabase", in, out, opts...)
+ if err != nil {
+ return nil, err
+ }
+ return out, nil
+}
+
+func (c *databaseAdminClient) UpdateDatabase(ctx context.Context, in *UpdateDatabaseRequest, opts ...grpc.CallOption) (*longrunningpb.Operation, error) {
+ out := new(longrunningpb.Operation)
+ err := c.cc.Invoke(ctx, "/google.spanner.admin.database.v1.DatabaseAdmin/UpdateDatabase", in, out, opts...)
+ if err != nil {
+ return nil, err
+ }
+ return out, nil
+}
+
+func (c *databaseAdminClient) UpdateDatabaseDdl(ctx context.Context, in *UpdateDatabaseDdlRequest, opts ...grpc.CallOption) (*longrunningpb.Operation, error) {
+ out := new(longrunningpb.Operation)
+ err := c.cc.Invoke(ctx, "/google.spanner.admin.database.v1.DatabaseAdmin/UpdateDatabaseDdl", in, out, opts...)
+ if err != nil {
+ return nil, err
+ }
+ return out, nil
+}
+
+func (c *databaseAdminClient) DropDatabase(ctx context.Context, in *DropDatabaseRequest, opts ...grpc.CallOption) (*emptypb.Empty, error) {
+ out := new(emptypb.Empty)
+ err := c.cc.Invoke(ctx, "/google.spanner.admin.database.v1.DatabaseAdmin/DropDatabase", in, out, opts...)
+ if err != nil {
+ return nil, err
+ }
+ return out, nil
+}
+
+func (c *databaseAdminClient) GetDatabaseDdl(ctx context.Context, in *GetDatabaseDdlRequest, opts ...grpc.CallOption) (*GetDatabaseDdlResponse, error) {
+ out := new(GetDatabaseDdlResponse)
+ err := c.cc.Invoke(ctx, "/google.spanner.admin.database.v1.DatabaseAdmin/GetDatabaseDdl", in, out, opts...)
+ if err != nil {
+ return nil, err
+ }
+ return out, nil
+}
+
+func (c *databaseAdminClient) SetIamPolicy(ctx context.Context, in *iampb.SetIamPolicyRequest, opts ...grpc.CallOption) (*iampb.Policy, error) {
+ out := new(iampb.Policy)
+ err := c.cc.Invoke(ctx, "/google.spanner.admin.database.v1.DatabaseAdmin/SetIamPolicy", in, out, opts...)
+ if err != nil {
+ return nil, err
+ }
+ return out, nil
+}
+
+func (c *databaseAdminClient) GetIamPolicy(ctx context.Context, in *iampb.GetIamPolicyRequest, opts ...grpc.CallOption) (*iampb.Policy, error) {
+ out := new(iampb.Policy)
+ err := c.cc.Invoke(ctx, "/google.spanner.admin.database.v1.DatabaseAdmin/GetIamPolicy", in, out, opts...)
+ if err != nil {
+ return nil, err
+ }
+ return out, nil
+}
+
+func (c *databaseAdminClient) TestIamPermissions(ctx context.Context, in *iampb.TestIamPermissionsRequest, opts ...grpc.CallOption) (*iampb.TestIamPermissionsResponse, error) {
+ out := new(iampb.TestIamPermissionsResponse)
+ err := c.cc.Invoke(ctx, "/google.spanner.admin.database.v1.DatabaseAdmin/TestIamPermissions", in, out, opts...)
+ if err != nil {
+ return nil, err
+ }
+ return out, nil
+}
+
+func (c *databaseAdminClient) CreateBackup(ctx context.Context, in *CreateBackupRequest, opts ...grpc.CallOption) (*longrunningpb.Operation, error) {
+ out := new(longrunningpb.Operation)
+ err := c.cc.Invoke(ctx, "/google.spanner.admin.database.v1.DatabaseAdmin/CreateBackup", in, out, opts...)
+ if err != nil {
+ return nil, err
+ }
+ return out, nil
+}
+
+func (c *databaseAdminClient) CopyBackup(ctx context.Context, in *CopyBackupRequest, opts ...grpc.CallOption) (*longrunningpb.Operation, error) {
+ out := new(longrunningpb.Operation)
+ err := c.cc.Invoke(ctx, "/google.spanner.admin.database.v1.DatabaseAdmin/CopyBackup", in, out, opts...)
+ if err != nil {
+ return nil, err
+ }
+ return out, nil
+}
+
+func (c *databaseAdminClient) GetBackup(ctx context.Context, in *GetBackupRequest, opts ...grpc.CallOption) (*Backup, error) {
+ out := new(Backup)
+ err := c.cc.Invoke(ctx, "/google.spanner.admin.database.v1.DatabaseAdmin/GetBackup", in, out, opts...)
+ if err != nil {
+ return nil, err
+ }
+ return out, nil
+}
+
+func (c *databaseAdminClient) UpdateBackup(ctx context.Context, in *UpdateBackupRequest, opts ...grpc.CallOption) (*Backup, error) {
+ out := new(Backup)
+ err := c.cc.Invoke(ctx, "/google.spanner.admin.database.v1.DatabaseAdmin/UpdateBackup", in, out, opts...)
+ if err != nil {
+ return nil, err
+ }
+ return out, nil
+}
+
+func (c *databaseAdminClient) DeleteBackup(ctx context.Context, in *DeleteBackupRequest, opts ...grpc.CallOption) (*emptypb.Empty, error) {
+ out := new(emptypb.Empty)
+ err := c.cc.Invoke(ctx, "/google.spanner.admin.database.v1.DatabaseAdmin/DeleteBackup", in, out, opts...)
+ if err != nil {
+ return nil, err
+ }
+ return out, nil
+}
+
+func (c *databaseAdminClient) ListBackups(ctx context.Context, in *ListBackupsRequest, opts ...grpc.CallOption) (*ListBackupsResponse, error) {
+ out := new(ListBackupsResponse)
+ err := c.cc.Invoke(ctx, "/google.spanner.admin.database.v1.DatabaseAdmin/ListBackups", in, out, opts...)
+ if err != nil {
+ return nil, err
+ }
+ return out, nil
+}
+
+func (c *databaseAdminClient) RestoreDatabase(ctx context.Context, in *RestoreDatabaseRequest, opts ...grpc.CallOption) (*longrunningpb.Operation, error) {
+ out := new(longrunningpb.Operation)
+ err := c.cc.Invoke(ctx, "/google.spanner.admin.database.v1.DatabaseAdmin/RestoreDatabase", in, out, opts...)
+ if err != nil {
+ return nil, err
+ }
+ return out, nil
+}
+
+func (c *databaseAdminClient) ListDatabaseOperations(ctx context.Context, in *ListDatabaseOperationsRequest, opts ...grpc.CallOption) (*ListDatabaseOperationsResponse, error) {
+ out := new(ListDatabaseOperationsResponse)
+ err := c.cc.Invoke(ctx, "/google.spanner.admin.database.v1.DatabaseAdmin/ListDatabaseOperations", in, out, opts...)
+ if err != nil {
+ return nil, err
+ }
+ return out, nil
+}
+
+func (c *databaseAdminClient) ListBackupOperations(ctx context.Context, in *ListBackupOperationsRequest, opts ...grpc.CallOption) (*ListBackupOperationsResponse, error) {
+ out := new(ListBackupOperationsResponse)
+ err := c.cc.Invoke(ctx, "/google.spanner.admin.database.v1.DatabaseAdmin/ListBackupOperations", in, out, opts...)
+ if err != nil {
+ return nil, err
+ }
+ return out, nil
+}
+
+func (c *databaseAdminClient) ListDatabaseRoles(ctx context.Context, in *ListDatabaseRolesRequest, opts ...grpc.CallOption) (*ListDatabaseRolesResponse, error) {
+ out := new(ListDatabaseRolesResponse)
+ err := c.cc.Invoke(ctx, "/google.spanner.admin.database.v1.DatabaseAdmin/ListDatabaseRoles", in, out, opts...)
+ if err != nil {
+ return nil, err
+ }
+ return out, nil
+}
+
+func (c *databaseAdminClient) CreateBackupSchedule(ctx context.Context, in *CreateBackupScheduleRequest, opts ...grpc.CallOption) (*BackupSchedule, error) {
+ out := new(BackupSchedule)
+ err := c.cc.Invoke(ctx, "/google.spanner.admin.database.v1.DatabaseAdmin/CreateBackupSchedule", in, out, opts...)
+ if err != nil {
+ return nil, err
+ }
+ return out, nil
+}
+
+func (c *databaseAdminClient) GetBackupSchedule(ctx context.Context, in *GetBackupScheduleRequest, opts ...grpc.CallOption) (*BackupSchedule, error) {
+ out := new(BackupSchedule)
+ err := c.cc.Invoke(ctx, "/google.spanner.admin.database.v1.DatabaseAdmin/GetBackupSchedule", in, out, opts...)
+ if err != nil {
+ return nil, err
+ }
+ return out, nil
+}
+
+func (c *databaseAdminClient) UpdateBackupSchedule(ctx context.Context, in *UpdateBackupScheduleRequest, opts ...grpc.CallOption) (*BackupSchedule, error) {
+ out := new(BackupSchedule)
+ err := c.cc.Invoke(ctx, "/google.spanner.admin.database.v1.DatabaseAdmin/UpdateBackupSchedule", in, out, opts...)
+ if err != nil {
+ return nil, err
+ }
+ return out, nil
+}
+
+func (c *databaseAdminClient) DeleteBackupSchedule(ctx context.Context, in *DeleteBackupScheduleRequest, opts ...grpc.CallOption) (*emptypb.Empty, error) {
+ out := new(emptypb.Empty)
+ err := c.cc.Invoke(ctx, "/google.spanner.admin.database.v1.DatabaseAdmin/DeleteBackupSchedule", in, out, opts...)
+ if err != nil {
+ return nil, err
+ }
+ return out, nil
+}
+
+func (c *databaseAdminClient) ListBackupSchedules(ctx context.Context, in *ListBackupSchedulesRequest, opts ...grpc.CallOption) (*ListBackupSchedulesResponse, error) {
+ out := new(ListBackupSchedulesResponse)
+ err := c.cc.Invoke(ctx, "/google.spanner.admin.database.v1.DatabaseAdmin/ListBackupSchedules", in, out, opts...)
+ if err != nil {
+ return nil, err
+ }
+ return out, nil
+}
+
+// DatabaseAdminServer is the server API for DatabaseAdmin service.
+type DatabaseAdminServer interface {
+ // Lists Cloud Spanner databases.
+ ListDatabases(context.Context, *ListDatabasesRequest) (*ListDatabasesResponse, error)
+ // Creates a new Cloud Spanner database and starts to prepare it for serving.
+ // The returned [long-running operation][google.longrunning.Operation] will
+ // have a name of the format `<database_name>/operations/<operation_id>` and
+ // can be used to track preparation of the database. The
+ // [metadata][google.longrunning.Operation.metadata] field type is
+ // [CreateDatabaseMetadata][google.spanner.admin.database.v1.CreateDatabaseMetadata].
+ // The [response][google.longrunning.Operation.response] field type is
+ // [Database][google.spanner.admin.database.v1.Database], if successful.
+ CreateDatabase(context.Context, *CreateDatabaseRequest) (*longrunningpb.Operation, error)
+ // Gets the state of a Cloud Spanner database.
+ GetDatabase(context.Context, *GetDatabaseRequest) (*Database, error)
+ // Updates a Cloud Spanner database. The returned
+ // [long-running operation][google.longrunning.Operation] can be used to track
+ // the progress of updating the database. If the named database does not
+ // exist, returns `NOT_FOUND`.
+ //
+ // While the operation is pending:
+ //
+ // - The database's
+ // [reconciling][google.spanner.admin.database.v1.Database.reconciling]
+ // field is set to true.
+ // - Cancelling the operation is best-effort. If the cancellation succeeds,
+ // the operation metadata's
+ // [cancel_time][google.spanner.admin.database.v1.UpdateDatabaseMetadata.cancel_time]
+ // is set, the updates are reverted, and the operation terminates with a
+ // `CANCELLED` status.
+ // - New UpdateDatabase requests will return a `FAILED_PRECONDITION` error
+ // until the pending operation is done (returns successfully or with
+ // error).
+ // - Reading the database via the API continues to give the pre-request
+ // values.
+ //
+ // Upon completion of the returned operation:
+ //
+ // - The new values are in effect and readable via the API.
+ // - The database's
+ // [reconciling][google.spanner.admin.database.v1.Database.reconciling]
+ // field becomes false.
+ //
+ // The returned [long-running operation][google.longrunning.Operation] will
+ // have a name of the format
+ // `projects/<project>/instances/<instance>/databases/<database>/operations/<operation_id>`
+ // and can be used to track the database modification. The
+ // [metadata][google.longrunning.Operation.metadata] field type is
+ // [UpdateDatabaseMetadata][google.spanner.admin.database.v1.UpdateDatabaseMetadata].
+ // The [response][google.longrunning.Operation.response] field type is
+ // [Database][google.spanner.admin.database.v1.Database], if successful.
+ UpdateDatabase(context.Context, *UpdateDatabaseRequest) (*longrunningpb.Operation, error)
+ // Updates the schema of a Cloud Spanner database by
+ // creating/altering/dropping tables, columns, indexes, etc. The returned
+ // [long-running operation][google.longrunning.Operation] will have a name of
+ // the format `<database_name>/operations/<operation_id>` and can be used to
+ // track execution of the schema change(s). The
+ // [metadata][google.longrunning.Operation.metadata] field type is
+ // [UpdateDatabaseDdlMetadata][google.spanner.admin.database.v1.UpdateDatabaseDdlMetadata].
+ // The operation has no response.
+ UpdateDatabaseDdl(context.Context, *UpdateDatabaseDdlRequest) (*longrunningpb.Operation, error)
+ // Drops (aka deletes) a Cloud Spanner database.
+ // Completed backups for the database will be retained according to their
+ // `expire_time`.
+ // Note: Cloud Spanner might continue to accept requests for a few seconds
+ // after the database has been deleted.
+ DropDatabase(context.Context, *DropDatabaseRequest) (*emptypb.Empty, error)
+ // Returns the schema of a Cloud Spanner database as a list of formatted
+ // DDL statements. This method does not show pending schema updates, those may
+ // be queried using the [Operations][google.longrunning.Operations] API.
+ GetDatabaseDdl(context.Context, *GetDatabaseDdlRequest) (*GetDatabaseDdlResponse, error)
+ // Sets the access control policy on a database or backup resource.
+ // Replaces any existing policy.
+ //
+ // Authorization requires `spanner.databases.setIamPolicy`
+ // permission on [resource][google.iam.v1.SetIamPolicyRequest.resource].
+ // For backups, authorization requires `spanner.backups.setIamPolicy`
+ // permission on [resource][google.iam.v1.SetIamPolicyRequest.resource].
+ SetIamPolicy(context.Context, *iampb.SetIamPolicyRequest) (*iampb.Policy, error)
+ // Gets the access control policy for a database or backup resource.
+ // Returns an empty policy if a database or backup exists but does not have a
+ // policy set.
+ //
+ // Authorization requires `spanner.databases.getIamPolicy` permission on
+ // [resource][google.iam.v1.GetIamPolicyRequest.resource].
+ // For backups, authorization requires `spanner.backups.getIamPolicy`
+ // permission on [resource][google.iam.v1.GetIamPolicyRequest.resource].
+ GetIamPolicy(context.Context, *iampb.GetIamPolicyRequest) (*iampb.Policy, error)
+ // Returns permissions that the caller has on the specified database or backup
+ // resource.
+ //
+ // Attempting this RPC on a non-existent Cloud Spanner database will
+ // result in a NOT_FOUND error if the user has
+ // `spanner.databases.list` permission on the containing Cloud
+ // Spanner instance. Otherwise returns an empty set of permissions.
+ // Calling this method on a backup that does not exist will
+ // result in a NOT_FOUND error if the user has
+ // `spanner.backups.list` permission on the containing instance.
+ TestIamPermissions(context.Context, *iampb.TestIamPermissionsRequest) (*iampb.TestIamPermissionsResponse, error)
+ // Starts creating a new Cloud Spanner Backup.
+ // The returned backup [long-running operation][google.longrunning.Operation]
+ // will have a name of the format
+ // `projects/<project>/instances/<instance>/backups/<backup>/operations/<operation_id>`
+ // and can be used to track creation of the backup. The
+ // [metadata][google.longrunning.Operation.metadata] field type is
+ // [CreateBackupMetadata][google.spanner.admin.database.v1.CreateBackupMetadata].
+ // The [response][google.longrunning.Operation.response] field type is
+ // [Backup][google.spanner.admin.database.v1.Backup], if successful.
+ // Cancelling the returned operation will stop the creation and delete the
+ // backup. There can be only one pending backup creation per database. Backup
+ // creation of different databases can run concurrently.
+ CreateBackup(context.Context, *CreateBackupRequest) (*longrunningpb.Operation, error)
+ // Starts copying a Cloud Spanner Backup.
+ // The returned backup [long-running operation][google.longrunning.Operation]
+ // will have a name of the format
+ // `projects/<project>/instances/<instance>/backups/<backup>/operations/<operation_id>`
+ // and can be used to track copying of the backup. The operation is associated
+ // with the destination backup.
+ // The [metadata][google.longrunning.Operation.metadata] field type is
+ // [CopyBackupMetadata][google.spanner.admin.database.v1.CopyBackupMetadata].
+ // The [response][google.longrunning.Operation.response] field type is
+ // [Backup][google.spanner.admin.database.v1.Backup], if successful.
+ // Cancelling the returned operation will stop the copying and delete the
+ // destination backup. Concurrent CopyBackup requests can run on the same
+ // source backup.
+ CopyBackup(context.Context, *CopyBackupRequest) (*longrunningpb.Operation, error)
+ // Gets metadata on a pending or completed
+ // [Backup][google.spanner.admin.database.v1.Backup].
+ GetBackup(context.Context, *GetBackupRequest) (*Backup, error)
+ // Updates a pending or completed
+ // [Backup][google.spanner.admin.database.v1.Backup].
+ UpdateBackup(context.Context, *UpdateBackupRequest) (*Backup, error)
+ // Deletes a pending or completed
+ // [Backup][google.spanner.admin.database.v1.Backup].
+ DeleteBackup(context.Context, *DeleteBackupRequest) (*emptypb.Empty, error)
+ // Lists completed and pending backups.
+ // Backups returned are ordered by `create_time` in descending order,
+ // starting from the most recent `create_time`.
+ ListBackups(context.Context, *ListBackupsRequest) (*ListBackupsResponse, error)
+ // Create a new database by restoring from a completed backup. The new
+ // database must be in the same project and in an instance with the same
+ // instance configuration as the instance containing
+ // the backup. The returned database [long-running
+ // operation][google.longrunning.Operation] has a name of the format
+ // `projects/<project>/instances/<instance>/databases/<database>/operations/<operation_id>`,
+ // and can be used to track the progress of the operation, and to cancel it.
+ // The [metadata][google.longrunning.Operation.metadata] field type is
+ // [RestoreDatabaseMetadata][google.spanner.admin.database.v1.RestoreDatabaseMetadata].
+ // The [response][google.longrunning.Operation.response] type
+ // is [Database][google.spanner.admin.database.v1.Database], if
+ // successful. Cancelling the returned operation will stop the restore and
+ // delete the database.
+ // There can be only one database being restored into an instance at a time.
+ // Once the restore operation completes, a new restore operation can be
+ // initiated, without waiting for the optimize operation associated with the
+ // first restore to complete.
+ RestoreDatabase(context.Context, *RestoreDatabaseRequest) (*longrunningpb.Operation, error)
+ // Lists database [longrunning-operations][google.longrunning.Operation].
+ // A database operation has a name of the form
+ // `projects/<project>/instances/<instance>/databases/<database>/operations/<operation>`.
+ // The long-running operation
+ // [metadata][google.longrunning.Operation.metadata] field type
+ // `metadata.type_url` describes the type of the metadata. Operations returned
+ // include those that have completed/failed/canceled within the last 7 days,
+ // and pending operations.
+ ListDatabaseOperations(context.Context, *ListDatabaseOperationsRequest) (*ListDatabaseOperationsResponse, error)
+ // Lists the backup [long-running operations][google.longrunning.Operation] in
+ // the given instance. A backup operation has a name of the form
+ // `projects/<project>/instances/<instance>/backups/<backup>/operations/<operation>`.
+ // The long-running operation
+ // [metadata][google.longrunning.Operation.metadata] field type
+ // `metadata.type_url` describes the type of the metadata. Operations returned
+ // include those that have completed/failed/canceled within the last 7 days,
+ // and pending operations. Operations returned are ordered by
+ // `operation.metadata.value.progress.start_time` in descending order starting
+ // from the most recently started operation.
+ ListBackupOperations(context.Context, *ListBackupOperationsRequest) (*ListBackupOperationsResponse, error)
+ // Lists Cloud Spanner database roles.
+ ListDatabaseRoles(context.Context, *ListDatabaseRolesRequest) (*ListDatabaseRolesResponse, error)
+ // Creates a new backup schedule.
+ CreateBackupSchedule(context.Context, *CreateBackupScheduleRequest) (*BackupSchedule, error)
+ // Gets backup schedule for the input schedule name.
+ GetBackupSchedule(context.Context, *GetBackupScheduleRequest) (*BackupSchedule, error)
+ // Updates a backup schedule.
+ UpdateBackupSchedule(context.Context, *UpdateBackupScheduleRequest) (*BackupSchedule, error)
+ // Deletes a backup schedule.
+ DeleteBackupSchedule(context.Context, *DeleteBackupScheduleRequest) (*emptypb.Empty, error)
+ // Lists all the backup schedules for the database.
+ ListBackupSchedules(context.Context, *ListBackupSchedulesRequest) (*ListBackupSchedulesResponse, error)
+}
+
+// UnimplementedDatabaseAdminServer can be embedded to have forward compatible implementations.
+type UnimplementedDatabaseAdminServer struct {
+}
+
+func (*UnimplementedDatabaseAdminServer) ListDatabases(context.Context, *ListDatabasesRequest) (*ListDatabasesResponse, error) {
+ return nil, status.Errorf(codes.Unimplemented, "method ListDatabases not implemented")
+}
+func (*UnimplementedDatabaseAdminServer) CreateDatabase(context.Context, *CreateDatabaseRequest) (*longrunningpb.Operation, error) {
+ return nil, status.Errorf(codes.Unimplemented, "method CreateDatabase not implemented")
+}
+func (*UnimplementedDatabaseAdminServer) GetDatabase(context.Context, *GetDatabaseRequest) (*Database, error) {
+ return nil, status.Errorf(codes.Unimplemented, "method GetDatabase not implemented")
+}
+func (*UnimplementedDatabaseAdminServer) UpdateDatabase(context.Context, *UpdateDatabaseRequest) (*longrunningpb.Operation, error) {
+ return nil, status.Errorf(codes.Unimplemented, "method UpdateDatabase not implemented")
+}
+func (*UnimplementedDatabaseAdminServer) UpdateDatabaseDdl(context.Context, *UpdateDatabaseDdlRequest) (*longrunningpb.Operation, error) {
+ return nil, status.Errorf(codes.Unimplemented, "method UpdateDatabaseDdl not implemented")
+}
+func (*UnimplementedDatabaseAdminServer) DropDatabase(context.Context, *DropDatabaseRequest) (*emptypb.Empty, error) {
+ return nil, status.Errorf(codes.Unimplemented, "method DropDatabase not implemented")
+}
+func (*UnimplementedDatabaseAdminServer) GetDatabaseDdl(context.Context, *GetDatabaseDdlRequest) (*GetDatabaseDdlResponse, error) {
+ return nil, status.Errorf(codes.Unimplemented, "method GetDatabaseDdl not implemented")
+}
+func (*UnimplementedDatabaseAdminServer) SetIamPolicy(context.Context, *iampb.SetIamPolicyRequest) (*iampb.Policy, error) {
+ return nil, status.Errorf(codes.Unimplemented, "method SetIamPolicy not implemented")
+}
+func (*UnimplementedDatabaseAdminServer) GetIamPolicy(context.Context, *iampb.GetIamPolicyRequest) (*iampb.Policy, error) {
+ return nil, status.Errorf(codes.Unimplemented, "method GetIamPolicy not implemented")
+}
+func (*UnimplementedDatabaseAdminServer) TestIamPermissions(context.Context, *iampb.TestIamPermissionsRequest) (*iampb.TestIamPermissionsResponse, error) {
+ return nil, status.Errorf(codes.Unimplemented, "method TestIamPermissions not implemented")
+}
+func (*UnimplementedDatabaseAdminServer) CreateBackup(context.Context, *CreateBackupRequest) (*longrunningpb.Operation, error) {
+ return nil, status.Errorf(codes.Unimplemented, "method CreateBackup not implemented")
+}
+func (*UnimplementedDatabaseAdminServer) CopyBackup(context.Context, *CopyBackupRequest) (*longrunningpb.Operation, error) {
+ return nil, status.Errorf(codes.Unimplemented, "method CopyBackup not implemented")
+}
+func (*UnimplementedDatabaseAdminServer) GetBackup(context.Context, *GetBackupRequest) (*Backup, error) {
+ return nil, status.Errorf(codes.Unimplemented, "method GetBackup not implemented")
+}
+func (*UnimplementedDatabaseAdminServer) UpdateBackup(context.Context, *UpdateBackupRequest) (*Backup, error) {
+ return nil, status.Errorf(codes.Unimplemented, "method UpdateBackup not implemented")
+}
+func (*UnimplementedDatabaseAdminServer) DeleteBackup(context.Context, *DeleteBackupRequest) (*emptypb.Empty, error) {
+ return nil, status.Errorf(codes.Unimplemented, "method DeleteBackup not implemented")
+}
+func (*UnimplementedDatabaseAdminServer) ListBackups(context.Context, *ListBackupsRequest) (*ListBackupsResponse, error) {
+ return nil, status.Errorf(codes.Unimplemented, "method ListBackups not implemented")
+}
+func (*UnimplementedDatabaseAdminServer) RestoreDatabase(context.Context, *RestoreDatabaseRequest) (*longrunningpb.Operation, error) {
+ return nil, status.Errorf(codes.Unimplemented, "method RestoreDatabase not implemented")
+}
+func (*UnimplementedDatabaseAdminServer) ListDatabaseOperations(context.Context, *ListDatabaseOperationsRequest) (*ListDatabaseOperationsResponse, error) {
+ return nil, status.Errorf(codes.Unimplemented, "method ListDatabaseOperations not implemented")
+}
+func (*UnimplementedDatabaseAdminServer) ListBackupOperations(context.Context, *ListBackupOperationsRequest) (*ListBackupOperationsResponse, error) {
+ return nil, status.Errorf(codes.Unimplemented, "method ListBackupOperations not implemented")
+}
+func (*UnimplementedDatabaseAdminServer) ListDatabaseRoles(context.Context, *ListDatabaseRolesRequest) (*ListDatabaseRolesResponse, error) {
+ return nil, status.Errorf(codes.Unimplemented, "method ListDatabaseRoles not implemented")
+}
+func (*UnimplementedDatabaseAdminServer) CreateBackupSchedule(context.Context, *CreateBackupScheduleRequest) (*BackupSchedule, error) {
+ return nil, status.Errorf(codes.Unimplemented, "method CreateBackupSchedule not implemented")
+}
+func (*UnimplementedDatabaseAdminServer) GetBackupSchedule(context.Context, *GetBackupScheduleRequest) (*BackupSchedule, error) {
+ return nil, status.Errorf(codes.Unimplemented, "method GetBackupSchedule not implemented")
+}
+func (*UnimplementedDatabaseAdminServer) UpdateBackupSchedule(context.Context, *UpdateBackupScheduleRequest) (*BackupSchedule, error) {
+ return nil, status.Errorf(codes.Unimplemented, "method UpdateBackupSchedule not implemented")
+}
+func (*UnimplementedDatabaseAdminServer) DeleteBackupSchedule(context.Context, *DeleteBackupScheduleRequest) (*emptypb.Empty, error) {
+ return nil, status.Errorf(codes.Unimplemented, "method DeleteBackupSchedule not implemented")
+}
+func (*UnimplementedDatabaseAdminServer) ListBackupSchedules(context.Context, *ListBackupSchedulesRequest) (*ListBackupSchedulesResponse, error) {
+ return nil, status.Errorf(codes.Unimplemented, "method ListBackupSchedules not implemented")
+}
+
+func RegisterDatabaseAdminServer(s *grpc.Server, srv DatabaseAdminServer) {
+ s.RegisterService(&_DatabaseAdmin_serviceDesc, srv)
+}
+
+func _DatabaseAdmin_ListDatabases_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+ in := new(ListDatabasesRequest)
+ if err := dec(in); err != nil {
+ return nil, err
+ }
+ if interceptor == nil {
+ return srv.(DatabaseAdminServer).ListDatabases(ctx, in)
+ }
+ info := &grpc.UnaryServerInfo{
+ Server: srv,
+ FullMethod: "/google.spanner.admin.database.v1.DatabaseAdmin/ListDatabases",
+ }
+ handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+ return srv.(DatabaseAdminServer).ListDatabases(ctx, req.(*ListDatabasesRequest))
+ }
+ return interceptor(ctx, in, info, handler)
+}
+
+func _DatabaseAdmin_CreateDatabase_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+ in := new(CreateDatabaseRequest)
+ if err := dec(in); err != nil {
+ return nil, err
+ }
+ if interceptor == nil {
+ return srv.(DatabaseAdminServer).CreateDatabase(ctx, in)
+ }
+ info := &grpc.UnaryServerInfo{
+ Server: srv,
+ FullMethod: "/google.spanner.admin.database.v1.DatabaseAdmin/CreateDatabase",
+ }
+ handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+ return srv.(DatabaseAdminServer).CreateDatabase(ctx, req.(*CreateDatabaseRequest))
+ }
+ return interceptor(ctx, in, info, handler)
+}
+
+func _DatabaseAdmin_GetDatabase_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+ in := new(GetDatabaseRequest)
+ if err := dec(in); err != nil {
+ return nil, err
+ }
+ if interceptor == nil {
+ return srv.(DatabaseAdminServer).GetDatabase(ctx, in)
+ }
+ info := &grpc.UnaryServerInfo{
+ Server: srv,
+ FullMethod: "/google.spanner.admin.database.v1.DatabaseAdmin/GetDatabase",
+ }
+ handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+ return srv.(DatabaseAdminServer).GetDatabase(ctx, req.(*GetDatabaseRequest))
+ }
+ return interceptor(ctx, in, info, handler)
+}
+
+func _DatabaseAdmin_UpdateDatabase_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+ in := new(UpdateDatabaseRequest)
+ if err := dec(in); err != nil {
+ return nil, err
+ }
+ if interceptor == nil {
+ return srv.(DatabaseAdminServer).UpdateDatabase(ctx, in)
+ }
+ info := &grpc.UnaryServerInfo{
+ Server: srv,
+ FullMethod: "/google.spanner.admin.database.v1.DatabaseAdmin/UpdateDatabase",
+ }
+ handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+ return srv.(DatabaseAdminServer).UpdateDatabase(ctx, req.(*UpdateDatabaseRequest))
+ }
+ return interceptor(ctx, in, info, handler)
+}
+
+func _DatabaseAdmin_UpdateDatabaseDdl_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+ in := new(UpdateDatabaseDdlRequest)
+ if err := dec(in); err != nil {
+ return nil, err
+ }
+ if interceptor == nil {
+ return srv.(DatabaseAdminServer).UpdateDatabaseDdl(ctx, in)
+ }
+ info := &grpc.UnaryServerInfo{
+ Server: srv,
+ FullMethod: "/google.spanner.admin.database.v1.DatabaseAdmin/UpdateDatabaseDdl",
+ }
+ handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+ return srv.(DatabaseAdminServer).UpdateDatabaseDdl(ctx, req.(*UpdateDatabaseDdlRequest))
+ }
+ return interceptor(ctx, in, info, handler)
+}
+
+func _DatabaseAdmin_DropDatabase_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+ in := new(DropDatabaseRequest)
+ if err := dec(in); err != nil {
+ return nil, err
+ }
+ if interceptor == nil {
+ return srv.(DatabaseAdminServer).DropDatabase(ctx, in)
+ }
+ info := &grpc.UnaryServerInfo{
+ Server: srv,
+ FullMethod: "/google.spanner.admin.database.v1.DatabaseAdmin/DropDatabase",
+ }
+ handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+ return srv.(DatabaseAdminServer).DropDatabase(ctx, req.(*DropDatabaseRequest))
+ }
+ return interceptor(ctx, in, info, handler)
+}
+
+func _DatabaseAdmin_GetDatabaseDdl_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+ in := new(GetDatabaseDdlRequest)
+ if err := dec(in); err != nil {
+ return nil, err
+ }
+ if interceptor == nil {
+ return srv.(DatabaseAdminServer).GetDatabaseDdl(ctx, in)
+ }
+ info := &grpc.UnaryServerInfo{
+ Server: srv,
+ FullMethod: "/google.spanner.admin.database.v1.DatabaseAdmin/GetDatabaseDdl",
+ }
+ handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+ return srv.(DatabaseAdminServer).GetDatabaseDdl(ctx, req.(*GetDatabaseDdlRequest))
+ }
+ return interceptor(ctx, in, info, handler)
+}
+
+func _DatabaseAdmin_SetIamPolicy_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+ in := new(iampb.SetIamPolicyRequest)
+ if err := dec(in); err != nil {
+ return nil, err
+ }
+ if interceptor == nil {
+ return srv.(DatabaseAdminServer).SetIamPolicy(ctx, in)
+ }
+ info := &grpc.UnaryServerInfo{
+ Server: srv,
+ FullMethod: "/google.spanner.admin.database.v1.DatabaseAdmin/SetIamPolicy",
+ }
+ handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+ return srv.(DatabaseAdminServer).SetIamPolicy(ctx, req.(*iampb.SetIamPolicyRequest))
+ }
+ return interceptor(ctx, in, info, handler)
+}
+
+func _DatabaseAdmin_GetIamPolicy_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+ in := new(iampb.GetIamPolicyRequest)
+ if err := dec(in); err != nil {
+ return nil, err
+ }
+ if interceptor == nil {
+ return srv.(DatabaseAdminServer).GetIamPolicy(ctx, in)
+ }
+ info := &grpc.UnaryServerInfo{
+ Server: srv,
+ FullMethod: "/google.spanner.admin.database.v1.DatabaseAdmin/GetIamPolicy",
+ }
+ handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+ return srv.(DatabaseAdminServer).GetIamPolicy(ctx, req.(*iampb.GetIamPolicyRequest))
+ }
+ return interceptor(ctx, in, info, handler)
+}
+
+func _DatabaseAdmin_TestIamPermissions_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+ in := new(iampb.TestIamPermissionsRequest)
+ if err := dec(in); err != nil {
+ return nil, err
+ }
+ if interceptor == nil {
+ return srv.(DatabaseAdminServer).TestIamPermissions(ctx, in)
+ }
+ info := &grpc.UnaryServerInfo{
+ Server: srv,
+ FullMethod: "/google.spanner.admin.database.v1.DatabaseAdmin/TestIamPermissions",
+ }
+ handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+ return srv.(DatabaseAdminServer).TestIamPermissions(ctx, req.(*iampb.TestIamPermissionsRequest))
+ }
+ return interceptor(ctx, in, info, handler)
+}
+
+func _DatabaseAdmin_CreateBackup_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+ in := new(CreateBackupRequest)
+ if err := dec(in); err != nil {
+ return nil, err
+ }
+ if interceptor == nil {
+ return srv.(DatabaseAdminServer).CreateBackup(ctx, in)
+ }
+ info := &grpc.UnaryServerInfo{
+ Server: srv,
+ FullMethod: "/google.spanner.admin.database.v1.DatabaseAdmin/CreateBackup",
+ }
+ handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+ return srv.(DatabaseAdminServer).CreateBackup(ctx, req.(*CreateBackupRequest))
+ }
+ return interceptor(ctx, in, info, handler)
+}
+
+func _DatabaseAdmin_CopyBackup_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+ in := new(CopyBackupRequest)
+ if err := dec(in); err != nil {
+ return nil, err
+ }
+ if interceptor == nil {
+ return srv.(DatabaseAdminServer).CopyBackup(ctx, in)
+ }
+ info := &grpc.UnaryServerInfo{
+ Server: srv,
+ FullMethod: "/google.spanner.admin.database.v1.DatabaseAdmin/CopyBackup",
+ }
+ handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+ return srv.(DatabaseAdminServer).CopyBackup(ctx, req.(*CopyBackupRequest))
+ }
+ return interceptor(ctx, in, info, handler)
+}
+
+func _DatabaseAdmin_GetBackup_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+ in := new(GetBackupRequest)
+ if err := dec(in); err != nil {
+ return nil, err
+ }
+ if interceptor == nil {
+ return srv.(DatabaseAdminServer).GetBackup(ctx, in)
+ }
+ info := &grpc.UnaryServerInfo{
+ Server: srv,
+ FullMethod: "/google.spanner.admin.database.v1.DatabaseAdmin/GetBackup",
+ }
+ handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+ return srv.(DatabaseAdminServer).GetBackup(ctx, req.(*GetBackupRequest))
+ }
+ return interceptor(ctx, in, info, handler)
+}
+
+func _DatabaseAdmin_UpdateBackup_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+ in := new(UpdateBackupRequest)
+ if err := dec(in); err != nil {
+ return nil, err
+ }
+ if interceptor == nil {
+ return srv.(DatabaseAdminServer).UpdateBackup(ctx, in)
+ }
+ info := &grpc.UnaryServerInfo{
+ Server: srv,
+ FullMethod: "/google.spanner.admin.database.v1.DatabaseAdmin/UpdateBackup",
+ }
+ handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+ return srv.(DatabaseAdminServer).UpdateBackup(ctx, req.(*UpdateBackupRequest))
+ }
+ return interceptor(ctx, in, info, handler)
+}
+
+func _DatabaseAdmin_DeleteBackup_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+ in := new(DeleteBackupRequest)
+ if err := dec(in); err != nil {
+ return nil, err
+ }
+ if interceptor == nil {
+ return srv.(DatabaseAdminServer).DeleteBackup(ctx, in)
+ }
+ info := &grpc.UnaryServerInfo{
+ Server: srv,
+ FullMethod: "/google.spanner.admin.database.v1.DatabaseAdmin/DeleteBackup",
+ }
+ handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+ return srv.(DatabaseAdminServer).DeleteBackup(ctx, req.(*DeleteBackupRequest))
+ }
+ return interceptor(ctx, in, info, handler)
+}
+
+func _DatabaseAdmin_ListBackups_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+ in := new(ListBackupsRequest)
+ if err := dec(in); err != nil {
+ return nil, err
+ }
+ if interceptor == nil {
+ return srv.(DatabaseAdminServer).ListBackups(ctx, in)
+ }
+ info := &grpc.UnaryServerInfo{
+ Server: srv,
+ FullMethod: "/google.spanner.admin.database.v1.DatabaseAdmin/ListBackups",
+ }
+ handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+ return srv.(DatabaseAdminServer).ListBackups(ctx, req.(*ListBackupsRequest))
+ }
+ return interceptor(ctx, in, info, handler)
+}
+
+func _DatabaseAdmin_RestoreDatabase_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+ in := new(RestoreDatabaseRequest)
+ if err := dec(in); err != nil {
+ return nil, err
+ }
+ if interceptor == nil {
+ return srv.(DatabaseAdminServer).RestoreDatabase(ctx, in)
+ }
+ info := &grpc.UnaryServerInfo{
+ Server: srv,
+ FullMethod: "/google.spanner.admin.database.v1.DatabaseAdmin/RestoreDatabase",
+ }
+ handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+ return srv.(DatabaseAdminServer).RestoreDatabase(ctx, req.(*RestoreDatabaseRequest))
+ }
+ return interceptor(ctx, in, info, handler)
+}
+
+func _DatabaseAdmin_ListDatabaseOperations_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+ in := new(ListDatabaseOperationsRequest)
+ if err := dec(in); err != nil {
+ return nil, err
+ }
+ if interceptor == nil {
+ return srv.(DatabaseAdminServer).ListDatabaseOperations(ctx, in)
+ }
+ info := &grpc.UnaryServerInfo{
+ Server: srv,
+ FullMethod: "/google.spanner.admin.database.v1.DatabaseAdmin/ListDatabaseOperations",
+ }
+ handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+ return srv.(DatabaseAdminServer).ListDatabaseOperations(ctx, req.(*ListDatabaseOperationsRequest))
+ }
+ return interceptor(ctx, in, info, handler)
+}
+
+func _DatabaseAdmin_ListBackupOperations_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+ in := new(ListBackupOperationsRequest)
+ if err := dec(in); err != nil {
+ return nil, err
+ }
+ if interceptor == nil {
+ return srv.(DatabaseAdminServer).ListBackupOperations(ctx, in)
+ }
+ info := &grpc.UnaryServerInfo{
+ Server: srv,
+ FullMethod: "/google.spanner.admin.database.v1.DatabaseAdmin/ListBackupOperations",
+ }
+ handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+ return srv.(DatabaseAdminServer).ListBackupOperations(ctx, req.(*ListBackupOperationsRequest))
+ }
+ return interceptor(ctx, in, info, handler)
+}
+
+func _DatabaseAdmin_ListDatabaseRoles_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+ in := new(ListDatabaseRolesRequest)
+ if err := dec(in); err != nil {
+ return nil, err
+ }
+ if interceptor == nil {
+ return srv.(DatabaseAdminServer).ListDatabaseRoles(ctx, in)
+ }
+ info := &grpc.UnaryServerInfo{
+ Server: srv,
+ FullMethod: "/google.spanner.admin.database.v1.DatabaseAdmin/ListDatabaseRoles",
+ }
+ handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+ return srv.(DatabaseAdminServer).ListDatabaseRoles(ctx, req.(*ListDatabaseRolesRequest))
+ }
+ return interceptor(ctx, in, info, handler)
+}
+
+func _DatabaseAdmin_CreateBackupSchedule_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+ in := new(CreateBackupScheduleRequest)
+ if err := dec(in); err != nil {
+ return nil, err
+ }
+ if interceptor == nil {
+ return srv.(DatabaseAdminServer).CreateBackupSchedule(ctx, in)
+ }
+ info := &grpc.UnaryServerInfo{
+ Server: srv,
+ FullMethod: "/google.spanner.admin.database.v1.DatabaseAdmin/CreateBackupSchedule",
+ }
+ handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+ return srv.(DatabaseAdminServer).CreateBackupSchedule(ctx, req.(*CreateBackupScheduleRequest))
+ }
+ return interceptor(ctx, in, info, handler)
+}
+
+func _DatabaseAdmin_GetBackupSchedule_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+ in := new(GetBackupScheduleRequest)
+ if err := dec(in); err != nil {
+ return nil, err
+ }
+ if interceptor == nil {
+ return srv.(DatabaseAdminServer).GetBackupSchedule(ctx, in)
+ }
+ info := &grpc.UnaryServerInfo{
+ Server: srv,
+ FullMethod: "/google.spanner.admin.database.v1.DatabaseAdmin/GetBackupSchedule",
+ }
+ handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+ return srv.(DatabaseAdminServer).GetBackupSchedule(ctx, req.(*GetBackupScheduleRequest))
+ }
+ return interceptor(ctx, in, info, handler)
+}
+
+func _DatabaseAdmin_UpdateBackupSchedule_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+ in := new(UpdateBackupScheduleRequest)
+ if err := dec(in); err != nil {
+ return nil, err
+ }
+ if interceptor == nil {
+ return srv.(DatabaseAdminServer).UpdateBackupSchedule(ctx, in)
+ }
+ info := &grpc.UnaryServerInfo{
+ Server: srv,
+ FullMethod: "/google.spanner.admin.database.v1.DatabaseAdmin/UpdateBackupSchedule",
+ }
+ handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+ return srv.(DatabaseAdminServer).UpdateBackupSchedule(ctx, req.(*UpdateBackupScheduleRequest))
+ }
+ return interceptor(ctx, in, info, handler)
+}
+
+func _DatabaseAdmin_DeleteBackupSchedule_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+ in := new(DeleteBackupScheduleRequest)
+ if err := dec(in); err != nil {
+ return nil, err
+ }
+ if interceptor == nil {
+ return srv.(DatabaseAdminServer).DeleteBackupSchedule(ctx, in)
+ }
+ info := &grpc.UnaryServerInfo{
+ Server: srv,
+ FullMethod: "/google.spanner.admin.database.v1.DatabaseAdmin/DeleteBackupSchedule",
+ }
+ handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+ return srv.(DatabaseAdminServer).DeleteBackupSchedule(ctx, req.(*DeleteBackupScheduleRequest))
+ }
+ return interceptor(ctx, in, info, handler)
+}
+
+func _DatabaseAdmin_ListBackupSchedules_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+ in := new(ListBackupSchedulesRequest)
+ if err := dec(in); err != nil {
+ return nil, err
+ }
+ if interceptor == nil {
+ return srv.(DatabaseAdminServer).ListBackupSchedules(ctx, in)
+ }
+ info := &grpc.UnaryServerInfo{
+ Server: srv,
+ FullMethod: "/google.spanner.admin.database.v1.DatabaseAdmin/ListBackupSchedules",
+ }
+ handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+ return srv.(DatabaseAdminServer).ListBackupSchedules(ctx, req.(*ListBackupSchedulesRequest))
+ }
+ return interceptor(ctx, in, info, handler)
+}
+
+var _DatabaseAdmin_serviceDesc = grpc.ServiceDesc{
+ ServiceName: "google.spanner.admin.database.v1.DatabaseAdmin",
+ HandlerType: (*DatabaseAdminServer)(nil),
+ Methods: []grpc.MethodDesc{
+ {
+ MethodName: "ListDatabases",
+ Handler: _DatabaseAdmin_ListDatabases_Handler,
+ },
+ {
+ MethodName: "CreateDatabase",
+ Handler: _DatabaseAdmin_CreateDatabase_Handler,
+ },
+ {
+ MethodName: "GetDatabase",
+ Handler: _DatabaseAdmin_GetDatabase_Handler,
+ },
+ {
+ MethodName: "UpdateDatabase",
+ Handler: _DatabaseAdmin_UpdateDatabase_Handler,
+ },
+ {
+ MethodName: "UpdateDatabaseDdl",
+ Handler: _DatabaseAdmin_UpdateDatabaseDdl_Handler,
+ },
+ {
+ MethodName: "DropDatabase",
+ Handler: _DatabaseAdmin_DropDatabase_Handler,
+ },
+ {
+ MethodName: "GetDatabaseDdl",
+ Handler: _DatabaseAdmin_GetDatabaseDdl_Handler,
+ },
+ {
+ MethodName: "SetIamPolicy",
+ Handler: _DatabaseAdmin_SetIamPolicy_Handler,
+ },
+ {
+ MethodName: "GetIamPolicy",
+ Handler: _DatabaseAdmin_GetIamPolicy_Handler,
+ },
+ {
+ MethodName: "TestIamPermissions",
+ Handler: _DatabaseAdmin_TestIamPermissions_Handler,
+ },
+ {
+ MethodName: "CreateBackup",
+ Handler: _DatabaseAdmin_CreateBackup_Handler,
+ },
+ {
+ MethodName: "CopyBackup",
+ Handler: _DatabaseAdmin_CopyBackup_Handler,
+ },
+ {
+ MethodName: "GetBackup",
+ Handler: _DatabaseAdmin_GetBackup_Handler,
+ },
+ {
+ MethodName: "UpdateBackup",
+ Handler: _DatabaseAdmin_UpdateBackup_Handler,
+ },
+ {
+ MethodName: "DeleteBackup",
+ Handler: _DatabaseAdmin_DeleteBackup_Handler,
+ },
+ {
+ MethodName: "ListBackups",
+ Handler: _DatabaseAdmin_ListBackups_Handler,
+ },
+ {
+ MethodName: "RestoreDatabase",
+ Handler: _DatabaseAdmin_RestoreDatabase_Handler,
+ },
+ {
+ MethodName: "ListDatabaseOperations",
+ Handler: _DatabaseAdmin_ListDatabaseOperations_Handler,
+ },
+ {
+ MethodName: "ListBackupOperations",
+ Handler: _DatabaseAdmin_ListBackupOperations_Handler,
+ },
+ {
+ MethodName: "ListDatabaseRoles",
+ Handler: _DatabaseAdmin_ListDatabaseRoles_Handler,
+ },
+ {
+ MethodName: "CreateBackupSchedule",
+ Handler: _DatabaseAdmin_CreateBackupSchedule_Handler,
+ },
+ {
+ MethodName: "GetBackupSchedule",
+ Handler: _DatabaseAdmin_GetBackupSchedule_Handler,
+ },
+ {
+ MethodName: "UpdateBackupSchedule",
+ Handler: _DatabaseAdmin_UpdateBackupSchedule_Handler,
+ },
+ {
+ MethodName: "DeleteBackupSchedule",
+ Handler: _DatabaseAdmin_DeleteBackupSchedule_Handler,
+ },
+ {
+ MethodName: "ListBackupSchedules",
+ Handler: _DatabaseAdmin_ListBackupSchedules_Handler,
+ },
+ },
+ Streams: []grpc.StreamDesc{},
+ Metadata: "google/spanner/admin/database/v1/spanner_database_admin.proto",
+}
diff --git a/vendor/cloud.google.com/go/spanner/admin/database/apiv1/doc.go b/vendor/cloud.google.com/go/spanner/admin/database/apiv1/doc.go
new file mode 100644
index 000000000..52f0ffb6b
--- /dev/null
+++ b/vendor/cloud.google.com/go/spanner/admin/database/apiv1/doc.go
@@ -0,0 +1,128 @@
+// Copyright 2024 Google LLC
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Code generated by protoc-gen-go_gapic. DO NOT EDIT.
+
+// Package database is an auto-generated package for the
+// Cloud Spanner API.
+//
+// Cloud Spanner is a managed, mission-critical, globally consistent and
+// scalable relational database service.
+//
+// # General documentation
+//
+// For information that is relevant for all client libraries please reference
+// https://pkg.go.dev/cloud.google.com/go#pkg-overview. Some information on this
+// page includes:
+//
+// - [Authentication and Authorization]
+// - [Timeouts and Cancellation]
+// - [Testing against Client Libraries]
+// - [Debugging Client Libraries]
+// - [Inspecting errors]
+//
+// # Example usage
+//
+// To get started with this package, create a client.
+//
+// ctx := context.Background()
+// // This snippet has been automatically generated and should be regarded as a code template only.
+// // It will require modifications to work:
+// // - It may require correct/in-range values for request initialization.
+// // - It may require specifying regional endpoints when creating the service client as shown in:
+// // https://pkg.go.dev/cloud.google.com/go#hdr-Client_Options
+// c, err := database.NewDatabaseAdminClient(ctx)
+// if err != nil {
+// // TODO: Handle error.
+// }
+// defer c.Close()
+//
+// The client will use your default application credentials. Clients should be reused instead of created as needed.
+// The methods of Client are safe for concurrent use by multiple goroutines.
+// The returned client must be Closed when it is done being used.
+//
+// # Using the Client
+//
+// The following is an example of making an API call with the newly created client.
+//
+// ctx := context.Background()
+// // This snippet has been automatically generated and should be regarded as a code template only.
+// // It will require modifications to work:
+// // - It may require correct/in-range values for request initialization.
+// // - It may require specifying regional endpoints when creating the service client as shown in:
+// // https://pkg.go.dev/cloud.google.com/go#hdr-Client_Options
+// c, err := database.NewDatabaseAdminClient(ctx)
+// if err != nil {
+// // TODO: Handle error.
+// }
+// defer c.Close()
+//
+// req := &databasepb.CopyBackupRequest{
+// // TODO: Fill request struct fields.
+// // See https://pkg.go.dev/cloud.google.com/go/spanner/admin/database/apiv1/databasepb#CopyBackupRequest.
+// }
+// op, err := c.CopyBackup(ctx, req)
+// if err != nil {
+// // TODO: Handle error.
+// }
+//
+// resp, err := op.Wait(ctx)
+// if err != nil {
+// // TODO: Handle error.
+// }
+// // TODO: Use resp.
+// _ = resp
+//
+// # Use of Context
+//
+// The ctx passed to NewDatabaseAdminClient is used for authentication requests and
+// for creating the underlying connection, but is not used for subsequent calls.
+// Individual methods on the client use the ctx given to them.
+//
+// To close the open connection, use the Close() method.
+//
+// [Authentication and Authorization]: https://pkg.go.dev/cloud.google.com/go#hdr-Authentication_and_Authorization
+// [Timeouts and Cancellation]: https://pkg.go.dev/cloud.google.com/go#hdr-Timeouts_and_Cancellation
+// [Testing against Client Libraries]: https://pkg.go.dev/cloud.google.com/go#hdr-Testing
+// [Debugging Client Libraries]: https://pkg.go.dev/cloud.google.com/go#hdr-Debugging
+// [Inspecting errors]: https://pkg.go.dev/cloud.google.com/go#hdr-Inspecting_errors
+package database // import "cloud.google.com/go/spanner/admin/database/apiv1"
+
+import (
+ "context"
+
+ "google.golang.org/api/option"
+)
+
+// For more information on implementing a client constructor hook, see
+// https://github.com/googleapis/google-cloud-go/wiki/Customizing-constructors.
+type clientHookParams struct{}
+type clientHook func(context.Context, clientHookParams) ([]option.ClientOption, error)
+
+var versionClient string
+
+func getVersionClient() string {
+ if versionClient == "" {
+ return "UNKNOWN"
+ }
+ return versionClient
+}
+
+// DefaultAuthScopes reports the default set of authentication scopes to use with this package.
+func DefaultAuthScopes() []string {
+ return []string{
+ "https://www.googleapis.com/auth/cloud-platform",
+ "https://www.googleapis.com/auth/spanner.admin",
+ }
+}
diff --git a/vendor/cloud.google.com/go/spanner/admin/database/apiv1/init.go b/vendor/cloud.google.com/go/spanner/admin/database/apiv1/init.go
new file mode 100644
index 000000000..60c190565
--- /dev/null
+++ b/vendor/cloud.google.com/go/spanner/admin/database/apiv1/init.go
@@ -0,0 +1,39 @@
+/*
+Copyright 2020 Google LLC
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+package database
+
+import (
+ "context"
+ "os"
+
+ "google.golang.org/api/option"
+ "google.golang.org/grpc"
+ "google.golang.org/grpc/credentials/insecure"
+)
+
+func init() {
+ newDatabaseAdminClientHook = func(ctx context.Context, p clientHookParams) ([]option.ClientOption, error) {
+ if emulator := os.Getenv("SPANNER_EMULATOR_HOST"); emulator != "" {
+ return []option.ClientOption{
+ option.WithEndpoint(emulator),
+ option.WithGRPCDialOption(grpc.WithTransportCredentials(insecure.NewCredentials())),
+ option.WithoutAuthentication(),
+ }, nil
+ }
+
+ return nil, nil
+ }
+}
diff --git a/vendor/cloud.google.com/go/spanner/admin/database/apiv1/path_funcs.go b/vendor/cloud.google.com/go/spanner/admin/database/apiv1/path_funcs.go
new file mode 100644
index 000000000..d474b2cce
--- /dev/null
+++ b/vendor/cloud.google.com/go/spanner/admin/database/apiv1/path_funcs.go
@@ -0,0 +1,49 @@
+// Copyright 2018 Google LLC
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package database
+
+// DatabaseAdminInstancePath returns the path for the instance resource.
+//
+// Deprecated: Use
+//
+// fmt.Sprintf("projects/%s/instances/%s", project, instance)
+//
+// instead.
+func DatabaseAdminInstancePath(project, instance string) string {
+ return "" +
+ "projects/" +
+ project +
+ "/instances/" +
+ instance +
+ ""
+}
+
+// DatabaseAdminDatabasePath returns the path for the database resource.
+//
+// Deprecated: Use
+//
+// fmt.Sprintf("projects/%s/instances/%s/databases/%s", project, instance, database)
+//
+// instead.
+func DatabaseAdminDatabasePath(project, instance, database string) string {
+ return "" +
+ "projects/" +
+ project +
+ "/instances/" +
+ instance +
+ "/databases/" +
+ database +
+ ""
+}
diff --git a/vendor/cloud.google.com/go/spanner/admin/database/apiv1/version.go b/vendor/cloud.google.com/go/spanner/admin/database/apiv1/version.go
new file mode 100644
index 000000000..b0ba71de8
--- /dev/null
+++ b/vendor/cloud.google.com/go/spanner/admin/database/apiv1/version.go
@@ -0,0 +1,23 @@
+// Copyright 2023 Google LLC
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Code generated by gapicgen. DO NOT EDIT.
+
+package database
+
+import "cloud.google.com/go/spanner/internal"
+
+func init() {
+ versionClient = internal.Version
+}
diff --git a/vendor/cloud.google.com/go/spanner/admin/instance/apiv1/auxiliary.go b/vendor/cloud.google.com/go/spanner/admin/instance/apiv1/auxiliary.go
new file mode 100644
index 000000000..f6bab7fe1
--- /dev/null
+++ b/vendor/cloud.google.com/go/spanner/admin/instance/apiv1/auxiliary.go
@@ -0,0 +1,664 @@
+// Copyright 2024 Google LLC
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Code generated by protoc-gen-go_gapic. DO NOT EDIT.
+
+package instance
+
+import (
+ "context"
+ "time"
+
+ "cloud.google.com/go/longrunning"
+ longrunningpb "cloud.google.com/go/longrunning/autogen/longrunningpb"
+ instancepb "cloud.google.com/go/spanner/admin/instance/apiv1/instancepb"
+ gax "github.com/googleapis/gax-go/v2"
+ "google.golang.org/api/iterator"
+)
+
+// CreateInstanceConfigOperation manages a long-running operation from CreateInstanceConfig.
+type CreateInstanceConfigOperation struct {
+ lro *longrunning.Operation
+ pollPath string
+}
+
+// Wait blocks until the long-running operation is completed, returning the response and any errors encountered.
+//
+// See documentation of Poll for error-handling information.
+func (op *CreateInstanceConfigOperation) Wait(ctx context.Context, opts ...gax.CallOption) (*instancepb.InstanceConfig, error) {
+ opts = append([]gax.CallOption{gax.WithPath(op.pollPath)}, opts...)
+ var resp instancepb.InstanceConfig
+ if err := op.lro.WaitWithInterval(ctx, &resp, time.Minute, opts...); err != nil {
+ return nil, err
+ }
+ return &resp, nil
+}
+
+// Poll fetches the latest state of the long-running operation.
+//
+// Poll also fetches the latest metadata, which can be retrieved by Metadata.
+//
+// If Poll fails, the error is returned and op is unmodified. If Poll succeeds and
+// the operation has completed with failure, the error is returned and op.Done will return true.
+// If Poll succeeds and the operation has completed successfully,
+// op.Done will return true, and the response of the operation is returned.
+// If Poll succeeds and the operation has not completed, the returned response and error are both nil.
+func (op *CreateInstanceConfigOperation) Poll(ctx context.Context, opts ...gax.CallOption) (*instancepb.InstanceConfig, error) {
+ opts = append([]gax.CallOption{gax.WithPath(op.pollPath)}, opts...)
+ var resp instancepb.InstanceConfig
+ if err := op.lro.Poll(ctx, &resp, opts...); err != nil {
+ return nil, err
+ }
+ if !op.Done() {
+ return nil, nil
+ }
+ return &resp, nil
+}
+
+// Metadata returns metadata associated with the long-running operation.
+// Metadata itself does not contact the server, but Poll does.
+// To get the latest metadata, call this method after a successful call to Poll.
+// If the metadata is not available, the returned metadata and error are both nil.
+func (op *CreateInstanceConfigOperation) Metadata() (*instancepb.CreateInstanceConfigMetadata, error) {
+ var meta instancepb.CreateInstanceConfigMetadata
+ if err := op.lro.Metadata(&meta); err == longrunning.ErrNoMetadata {
+ return nil, nil
+ } else if err != nil {
+ return nil, err
+ }
+ return &meta, nil
+}
+
+// Done reports whether the long-running operation has completed.
+func (op *CreateInstanceConfigOperation) Done() bool {
+ return op.lro.Done()
+}
+
+// Name returns the name of the long-running operation.
+// The name is assigned by the server and is unique within the service from which the operation is created.
+func (op *CreateInstanceConfigOperation) Name() string {
+ return op.lro.Name()
+}
+
+// CreateInstanceOperation manages a long-running operation from CreateInstance.
+type CreateInstanceOperation struct {
+ lro *longrunning.Operation
+ pollPath string
+}
+
+// Wait blocks until the long-running operation is completed, returning the response and any errors encountered.
+//
+// See documentation of Poll for error-handling information.
+func (op *CreateInstanceOperation) Wait(ctx context.Context, opts ...gax.CallOption) (*instancepb.Instance, error) {
+ opts = append([]gax.CallOption{gax.WithPath(op.pollPath)}, opts...)
+ var resp instancepb.Instance
+ if err := op.lro.WaitWithInterval(ctx, &resp, time.Minute, opts...); err != nil {
+ return nil, err
+ }
+ return &resp, nil
+}
+
+// Poll fetches the latest state of the long-running operation.
+//
+// Poll also fetches the latest metadata, which can be retrieved by Metadata.
+//
+// If Poll fails, the error is returned and op is unmodified. If Poll succeeds and
+// the operation has completed with failure, the error is returned and op.Done will return true.
+// If Poll succeeds and the operation has completed successfully,
+// op.Done will return true, and the response of the operation is returned.
+// If Poll succeeds and the operation has not completed, the returned response and error are both nil.
+func (op *CreateInstanceOperation) Poll(ctx context.Context, opts ...gax.CallOption) (*instancepb.Instance, error) {
+ opts = append([]gax.CallOption{gax.WithPath(op.pollPath)}, opts...)
+ var resp instancepb.Instance
+ if err := op.lro.Poll(ctx, &resp, opts...); err != nil {
+ return nil, err
+ }
+ if !op.Done() {
+ return nil, nil
+ }
+ return &resp, nil
+}
+
+// Metadata returns metadata associated with the long-running operation.
+// Metadata itself does not contact the server, but Poll does.
+// To get the latest metadata, call this method after a successful call to Poll.
+// If the metadata is not available, the returned metadata and error are both nil.
+func (op *CreateInstanceOperation) Metadata() (*instancepb.CreateInstanceMetadata, error) {
+ var meta instancepb.CreateInstanceMetadata
+ if err := op.lro.Metadata(&meta); err == longrunning.ErrNoMetadata {
+ return nil, nil
+ } else if err != nil {
+ return nil, err
+ }
+ return &meta, nil
+}
+
+// Done reports whether the long-running operation has completed.
+func (op *CreateInstanceOperation) Done() bool {
+ return op.lro.Done()
+}
+
+// Name returns the name of the long-running operation.
+// The name is assigned by the server and is unique within the service from which the operation is created.
+func (op *CreateInstanceOperation) Name() string {
+ return op.lro.Name()
+}
+
+// CreateInstancePartitionOperation manages a long-running operation from CreateInstancePartition.
+type CreateInstancePartitionOperation struct {
+ lro *longrunning.Operation
+ pollPath string
+}
+
+// Wait blocks until the long-running operation is completed, returning the response and any errors encountered.
+//
+// See documentation of Poll for error-handling information.
+func (op *CreateInstancePartitionOperation) Wait(ctx context.Context, opts ...gax.CallOption) (*instancepb.InstancePartition, error) {
+ opts = append([]gax.CallOption{gax.WithPath(op.pollPath)}, opts...)
+ var resp instancepb.InstancePartition
+ if err := op.lro.WaitWithInterval(ctx, &resp, time.Minute, opts...); err != nil {
+ return nil, err
+ }
+ return &resp, nil
+}
+
+// Poll fetches the latest state of the long-running operation.
+//
+// Poll also fetches the latest metadata, which can be retrieved by Metadata.
+//
+// If Poll fails, the error is returned and op is unmodified. If Poll succeeds and
+// the operation has completed with failure, the error is returned and op.Done will return true.
+// If Poll succeeds and the operation has completed successfully,
+// op.Done will return true, and the response of the operation is returned.
+// If Poll succeeds and the operation has not completed, the returned response and error are both nil.
+func (op *CreateInstancePartitionOperation) Poll(ctx context.Context, opts ...gax.CallOption) (*instancepb.InstancePartition, error) {
+ opts = append([]gax.CallOption{gax.WithPath(op.pollPath)}, opts...)
+ var resp instancepb.InstancePartition
+ if err := op.lro.Poll(ctx, &resp, opts...); err != nil {
+ return nil, err
+ }
+ if !op.Done() {
+ return nil, nil
+ }
+ return &resp, nil
+}
+
+// Metadata returns metadata associated with the long-running operation.
+// Metadata itself does not contact the server, but Poll does.
+// To get the latest metadata, call this method after a successful call to Poll.
+// If the metadata is not available, the returned metadata and error are both nil.
+func (op *CreateInstancePartitionOperation) Metadata() (*instancepb.CreateInstancePartitionMetadata, error) {
+ var meta instancepb.CreateInstancePartitionMetadata
+ if err := op.lro.Metadata(&meta); err == longrunning.ErrNoMetadata {
+ return nil, nil
+ } else if err != nil {
+ return nil, err
+ }
+ return &meta, nil
+}
+
+// Done reports whether the long-running operation has completed.
+func (op *CreateInstancePartitionOperation) Done() bool {
+ return op.lro.Done()
+}
+
+// Name returns the name of the long-running operation.
+// The name is assigned by the server and is unique within the service from which the operation is created.
+func (op *CreateInstancePartitionOperation) Name() string {
+ return op.lro.Name()
+}
+
+// MoveInstanceOperation manages a long-running operation from MoveInstance.
+type MoveInstanceOperation struct {
+ lro *longrunning.Operation
+ pollPath string
+}
+
+// Wait blocks until the long-running operation is completed, returning the response and any errors encountered.
+//
+// See documentation of Poll for error-handling information.
+func (op *MoveInstanceOperation) Wait(ctx context.Context, opts ...gax.CallOption) (*instancepb.MoveInstanceResponse, error) {
+ opts = append([]gax.CallOption{gax.WithPath(op.pollPath)}, opts...)
+ var resp instancepb.MoveInstanceResponse
+ if err := op.lro.WaitWithInterval(ctx, &resp, time.Minute, opts...); err != nil {
+ return nil, err
+ }
+ return &resp, nil
+}
+
+// Poll fetches the latest state of the long-running operation.
+//
+// Poll also fetches the latest metadata, which can be retrieved by Metadata.
+//
+// If Poll fails, the error is returned and op is unmodified. If Poll succeeds and
+// the operation has completed with failure, the error is returned and op.Done will return true.
+// If Poll succeeds and the operation has completed successfully,
+// op.Done will return true, and the response of the operation is returned.
+// If Poll succeeds and the operation has not completed, the returned response and error are both nil.
+func (op *MoveInstanceOperation) Poll(ctx context.Context, opts ...gax.CallOption) (*instancepb.MoveInstanceResponse, error) {
+ opts = append([]gax.CallOption{gax.WithPath(op.pollPath)}, opts...)
+ var resp instancepb.MoveInstanceResponse
+ if err := op.lro.Poll(ctx, &resp, opts...); err != nil {
+ return nil, err
+ }
+ if !op.Done() {
+ return nil, nil
+ }
+ return &resp, nil
+}
+
+// Metadata returns metadata associated with the long-running operation.
+// Metadata itself does not contact the server, but Poll does.
+// To get the latest metadata, call this method after a successful call to Poll.
+// If the metadata is not available, the returned metadata and error are both nil.
+func (op *MoveInstanceOperation) Metadata() (*instancepb.MoveInstanceMetadata, error) {
+ var meta instancepb.MoveInstanceMetadata
+ if err := op.lro.Metadata(&meta); err == longrunning.ErrNoMetadata {
+ return nil, nil
+ } else if err != nil {
+ return nil, err
+ }
+ return &meta, nil
+}
+
+// Done reports whether the long-running operation has completed.
+func (op *MoveInstanceOperation) Done() bool {
+ return op.lro.Done()
+}
+
+// Name returns the name of the long-running operation.
+// The name is assigned by the server and is unique within the service from which the operation is created.
+func (op *MoveInstanceOperation) Name() string {
+ return op.lro.Name()
+}
+
+// UpdateInstanceConfigOperation manages a long-running operation from UpdateInstanceConfig.
+type UpdateInstanceConfigOperation struct {
+ lro *longrunning.Operation
+ pollPath string
+}
+
+// Wait blocks until the long-running operation is completed, returning the response and any errors encountered.
+//
+// See documentation of Poll for error-handling information.
+func (op *UpdateInstanceConfigOperation) Wait(ctx context.Context, opts ...gax.CallOption) (*instancepb.InstanceConfig, error) {
+ opts = append([]gax.CallOption{gax.WithPath(op.pollPath)}, opts...)
+ var resp instancepb.InstanceConfig
+ if err := op.lro.WaitWithInterval(ctx, &resp, time.Minute, opts...); err != nil {
+ return nil, err
+ }
+ return &resp, nil
+}
+
+// Poll fetches the latest state of the long-running operation.
+//
+// Poll also fetches the latest metadata, which can be retrieved by Metadata.
+//
+// If Poll fails, the error is returned and op is unmodified. If Poll succeeds and
+// the operation has completed with failure, the error is returned and op.Done will return true.
+// If Poll succeeds and the operation has completed successfully,
+// op.Done will return true, and the response of the operation is returned.
+// If Poll succeeds and the operation has not completed, the returned response and error are both nil.
+func (op *UpdateInstanceConfigOperation) Poll(ctx context.Context, opts ...gax.CallOption) (*instancepb.InstanceConfig, error) {
+ opts = append([]gax.CallOption{gax.WithPath(op.pollPath)}, opts...)
+ var resp instancepb.InstanceConfig
+ if err := op.lro.Poll(ctx, &resp, opts...); err != nil {
+ return nil, err
+ }
+ if !op.Done() {
+ return nil, nil
+ }
+ return &resp, nil
+}
+
+// Metadata returns metadata associated with the long-running operation.
+// Metadata itself does not contact the server, but Poll does.
+// To get the latest metadata, call this method after a successful call to Poll.
+// If the metadata is not available, the returned metadata and error are both nil.
+func (op *UpdateInstanceConfigOperation) Metadata() (*instancepb.UpdateInstanceConfigMetadata, error) {
+ var meta instancepb.UpdateInstanceConfigMetadata
+ if err := op.lro.Metadata(&meta); err == longrunning.ErrNoMetadata {
+ return nil, nil
+ } else if err != nil {
+ return nil, err
+ }
+ return &meta, nil
+}
+
+// Done reports whether the long-running operation has completed.
+func (op *UpdateInstanceConfigOperation) Done() bool {
+ return op.lro.Done()
+}
+
+// Name returns the name of the long-running operation.
+// The name is assigned by the server and is unique within the service from which the operation is created.
+func (op *UpdateInstanceConfigOperation) Name() string {
+ return op.lro.Name()
+}
+
+// UpdateInstanceOperation manages a long-running operation from UpdateInstance.
+type UpdateInstanceOperation struct {
+ lro *longrunning.Operation
+ pollPath string
+}
+
+// Wait blocks until the long-running operation is completed, returning the response and any errors encountered.
+//
+// See documentation of Poll for error-handling information.
+func (op *UpdateInstanceOperation) Wait(ctx context.Context, opts ...gax.CallOption) (*instancepb.Instance, error) {
+ opts = append([]gax.CallOption{gax.WithPath(op.pollPath)}, opts...)
+ var resp instancepb.Instance
+ if err := op.lro.WaitWithInterval(ctx, &resp, time.Minute, opts...); err != nil {
+ return nil, err
+ }
+ return &resp, nil
+}
+
+// Poll fetches the latest state of the long-running operation.
+//
+// Poll also fetches the latest metadata, which can be retrieved by Metadata.
+//
+// If Poll fails, the error is returned and op is unmodified. If Poll succeeds and
+// the operation has completed with failure, the error is returned and op.Done will return true.
+// If Poll succeeds and the operation has completed successfully,
+// op.Done will return true, and the response of the operation is returned.
+// If Poll succeeds and the operation has not completed, the returned response and error are both nil.
+func (op *UpdateInstanceOperation) Poll(ctx context.Context, opts ...gax.CallOption) (*instancepb.Instance, error) {
+ opts = append([]gax.CallOption{gax.WithPath(op.pollPath)}, opts...)
+ var resp instancepb.Instance
+ if err := op.lro.Poll(ctx, &resp, opts...); err != nil {
+ return nil, err
+ }
+ if !op.Done() {
+ return nil, nil
+ }
+ return &resp, nil
+}
+
+// Metadata returns metadata associated with the long-running operation.
+// Metadata itself does not contact the server, but Poll does.
+// To get the latest metadata, call this method after a successful call to Poll.
+// If the metadata is not available, the returned metadata and error are both nil.
+func (op *UpdateInstanceOperation) Metadata() (*instancepb.UpdateInstanceMetadata, error) {
+ var meta instancepb.UpdateInstanceMetadata
+ if err := op.lro.Metadata(&meta); err == longrunning.ErrNoMetadata {
+ return nil, nil
+ } else if err != nil {
+ return nil, err
+ }
+ return &meta, nil
+}
+
+// Done reports whether the long-running operation has completed.
+func (op *UpdateInstanceOperation) Done() bool {
+ return op.lro.Done()
+}
+
+// Name returns the name of the long-running operation.
+// The name is assigned by the server and is unique within the service from which the operation is created.
+func (op *UpdateInstanceOperation) Name() string {
+ return op.lro.Name()
+}
+
+// UpdateInstancePartitionOperation manages a long-running operation from UpdateInstancePartition.
+type UpdateInstancePartitionOperation struct {
+ lro *longrunning.Operation
+ pollPath string
+}
+
+// Wait blocks until the long-running operation is completed, returning the response and any errors encountered.
+//
+// See documentation of Poll for error-handling information.
+func (op *UpdateInstancePartitionOperation) Wait(ctx context.Context, opts ...gax.CallOption) (*instancepb.InstancePartition, error) {
+ opts = append([]gax.CallOption{gax.WithPath(op.pollPath)}, opts...)
+ var resp instancepb.InstancePartition
+ if err := op.lro.WaitWithInterval(ctx, &resp, time.Minute, opts...); err != nil {
+ return nil, err
+ }
+ return &resp, nil
+}
+
+// Poll fetches the latest state of the long-running operation.
+//
+// Poll also fetches the latest metadata, which can be retrieved by Metadata.
+//
+// If Poll fails, the error is returned and op is unmodified. If Poll succeeds and
+// the operation has completed with failure, the error is returned and op.Done will return true.
+// If Poll succeeds and the operation has completed successfully,
+// op.Done will return true, and the response of the operation is returned.
+// If Poll succeeds and the operation has not completed, the returned response and error are both nil.
+func (op *UpdateInstancePartitionOperation) Poll(ctx context.Context, opts ...gax.CallOption) (*instancepb.InstancePartition, error) {
+ opts = append([]gax.CallOption{gax.WithPath(op.pollPath)}, opts...)
+ var resp instancepb.InstancePartition
+ if err := op.lro.Poll(ctx, &resp, opts...); err != nil {
+ return nil, err
+ }
+ if !op.Done() {
+ return nil, nil
+ }
+ return &resp, nil
+}
+
+// Metadata returns metadata associated with the long-running operation.
+// Metadata itself does not contact the server, but Poll does.
+// To get the latest metadata, call this method after a successful call to Poll.
+// If the metadata is not available, the returned metadata and error are both nil.
+func (op *UpdateInstancePartitionOperation) Metadata() (*instancepb.UpdateInstancePartitionMetadata, error) {
+ var meta instancepb.UpdateInstancePartitionMetadata
+ if err := op.lro.Metadata(&meta); err == longrunning.ErrNoMetadata {
+ return nil, nil
+ } else if err != nil {
+ return nil, err
+ }
+ return &meta, nil
+}
+
+// Done reports whether the long-running operation has completed.
+func (op *UpdateInstancePartitionOperation) Done() bool {
+ return op.lro.Done()
+}
+
+// Name returns the name of the long-running operation.
+// The name is assigned by the server and is unique within the service from which the operation is created.
+func (op *UpdateInstancePartitionOperation) Name() string {
+ return op.lro.Name()
+}
+
+// InstanceConfigIterator manages a stream of *instancepb.InstanceConfig.
+type InstanceConfigIterator struct {
+ items []*instancepb.InstanceConfig
+ pageInfo *iterator.PageInfo
+ nextFunc func() error
+
+ // Response is the raw response for the current page.
+ // It must be cast to the RPC response type.
+ // Calling Next() or InternalFetch() updates this value.
+ Response interface{}
+
+ // InternalFetch is for use by the Google Cloud Libraries only.
+ // It is not part of the stable interface of this package.
+ //
+ // InternalFetch returns results from a single call to the underlying RPC.
+ // The number of results is no greater than pageSize.
+ // If there are no more results, nextPageToken is empty and err is nil.
+ InternalFetch func(pageSize int, pageToken string) (results []*instancepb.InstanceConfig, nextPageToken string, err error)
+}
+
+// PageInfo supports pagination. See the google.golang.org/api/iterator package for details.
+func (it *InstanceConfigIterator) PageInfo() *iterator.PageInfo {
+ return it.pageInfo
+}
+
+// Next returns the next result. Its second return value is iterator.Done if there are no more
+// results. Once Next returns Done, all subsequent calls will return Done.
+func (it *InstanceConfigIterator) Next() (*instancepb.InstanceConfig, error) {
+ var item *instancepb.InstanceConfig
+ if err := it.nextFunc(); err != nil {
+ return item, err
+ }
+ item = it.items[0]
+ it.items = it.items[1:]
+ return item, nil
+}
+
+func (it *InstanceConfigIterator) bufLen() int {
+ return len(it.items)
+}
+
+func (it *InstanceConfigIterator) takeBuf() interface{} {
+ b := it.items
+ it.items = nil
+ return b
+}
+
+// InstanceIterator manages a stream of *instancepb.Instance.
+type InstanceIterator struct {
+ items []*instancepb.Instance
+ pageInfo *iterator.PageInfo
+ nextFunc func() error
+
+ // Response is the raw response for the current page.
+ // It must be cast to the RPC response type.
+ // Calling Next() or InternalFetch() updates this value.
+ Response interface{}
+
+ // InternalFetch is for use by the Google Cloud Libraries only.
+ // It is not part of the stable interface of this package.
+ //
+ // InternalFetch returns results from a single call to the underlying RPC.
+ // The number of results is no greater than pageSize.
+ // If there are no more results, nextPageToken is empty and err is nil.
+ InternalFetch func(pageSize int, pageToken string) (results []*instancepb.Instance, nextPageToken string, err error)
+}
+
+// PageInfo supports pagination. See the google.golang.org/api/iterator package for details.
+func (it *InstanceIterator) PageInfo() *iterator.PageInfo {
+ return it.pageInfo
+}
+
+// Next returns the next result. Its second return value is iterator.Done if there are no more
+// results. Once Next returns Done, all subsequent calls will return Done.
+func (it *InstanceIterator) Next() (*instancepb.Instance, error) {
+ var item *instancepb.Instance
+ if err := it.nextFunc(); err != nil {
+ return item, err
+ }
+ item = it.items[0]
+ it.items = it.items[1:]
+ return item, nil
+}
+
+func (it *InstanceIterator) bufLen() int {
+ return len(it.items)
+}
+
+func (it *InstanceIterator) takeBuf() interface{} {
+ b := it.items
+ it.items = nil
+ return b
+}
+
+// InstancePartitionIterator manages a stream of *instancepb.InstancePartition.
+type InstancePartitionIterator struct {
+ items []*instancepb.InstancePartition
+ pageInfo *iterator.PageInfo
+ nextFunc func() error
+
+ // Response is the raw response for the current page.
+ // It must be cast to the RPC response type.
+ // Calling Next() or InternalFetch() updates this value.
+ Response interface{}
+
+ // InternalFetch is for use by the Google Cloud Libraries only.
+ // It is not part of the stable interface of this package.
+ //
+ // InternalFetch returns results from a single call to the underlying RPC.
+ // The number of results is no greater than pageSize.
+ // If there are no more results, nextPageToken is empty and err is nil.
+ InternalFetch func(pageSize int, pageToken string) (results []*instancepb.InstancePartition, nextPageToken string, err error)
+}
+
+// PageInfo supports pagination. See the google.golang.org/api/iterator package for details.
+func (it *InstancePartitionIterator) PageInfo() *iterator.PageInfo {
+ return it.pageInfo
+}
+
+// Next returns the next result. Its second return value is iterator.Done if there are no more
+// results. Once Next returns Done, all subsequent calls will return Done.
+func (it *InstancePartitionIterator) Next() (*instancepb.InstancePartition, error) {
+ var item *instancepb.InstancePartition
+ if err := it.nextFunc(); err != nil {
+ return item, err
+ }
+ item = it.items[0]
+ it.items = it.items[1:]
+ return item, nil
+}
+
+func (it *InstancePartitionIterator) bufLen() int {
+ return len(it.items)
+}
+
+func (it *InstancePartitionIterator) takeBuf() interface{} {
+ b := it.items
+ it.items = nil
+ return b
+}
+
+// OperationIterator manages a stream of *longrunningpb.Operation.
+type OperationIterator struct {
+ items []*longrunningpb.Operation
+ pageInfo *iterator.PageInfo
+ nextFunc func() error
+
+ // Response is the raw response for the current page.
+ // It must be cast to the RPC response type.
+ // Calling Next() or InternalFetch() updates this value.
+ Response interface{}
+
+ // InternalFetch is for use by the Google Cloud Libraries only.
+ // It is not part of the stable interface of this package.
+ //
+ // InternalFetch returns results from a single call to the underlying RPC.
+ // The number of results is no greater than pageSize.
+ // If there are no more results, nextPageToken is empty and err is nil.
+ InternalFetch func(pageSize int, pageToken string) (results []*longrunningpb.Operation, nextPageToken string, err error)
+}
+
+// PageInfo supports pagination. See the google.golang.org/api/iterator package for details.
+func (it *OperationIterator) PageInfo() *iterator.PageInfo {
+ return it.pageInfo
+}
+
+// Next returns the next result. Its second return value is iterator.Done if there are no more
+// results. Once Next returns Done, all subsequent calls will return Done.
+func (it *OperationIterator) Next() (*longrunningpb.Operation, error) {
+ var item *longrunningpb.Operation
+ if err := it.nextFunc(); err != nil {
+ return item, err
+ }
+ item = it.items[0]
+ it.items = it.items[1:]
+ return item, nil
+}
+
+func (it *OperationIterator) bufLen() int {
+ return len(it.items)
+}
+
+func (it *OperationIterator) takeBuf() interface{} {
+ b := it.items
+ it.items = nil
+ return b
+}
diff --git a/vendor/cloud.google.com/go/spanner/admin/instance/apiv1/doc.go b/vendor/cloud.google.com/go/spanner/admin/instance/apiv1/doc.go
new file mode 100644
index 000000000..4f103493b
--- /dev/null
+++ b/vendor/cloud.google.com/go/spanner/admin/instance/apiv1/doc.go
@@ -0,0 +1,125 @@
+// Copyright 2024 Google LLC
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Code generated by protoc-gen-go_gapic. DO NOT EDIT.
+
+// Package instance is an auto-generated package for the
+// Cloud Spanner Instance Admin API.
+//
+// # General documentation
+//
+// For information that is relevant for all client libraries please reference
+// https://pkg.go.dev/cloud.google.com/go#pkg-overview. Some information on this
+// page includes:
+//
+// - [Authentication and Authorization]
+// - [Timeouts and Cancellation]
+// - [Testing against Client Libraries]
+// - [Debugging Client Libraries]
+// - [Inspecting errors]
+//
+// # Example usage
+//
+// To get started with this package, create a client.
+//
+// ctx := context.Background()
+// // This snippet has been automatically generated and should be regarded as a code template only.
+// // It will require modifications to work:
+// // - It may require correct/in-range values for request initialization.
+// // - It may require specifying regional endpoints when creating the service client as shown in:
+// // https://pkg.go.dev/cloud.google.com/go#hdr-Client_Options
+// c, err := instance.NewInstanceAdminClient(ctx)
+// if err != nil {
+// // TODO: Handle error.
+// }
+// defer c.Close()
+//
+// The client will use your default application credentials. Clients should be reused instead of created as needed.
+// The methods of Client are safe for concurrent use by multiple goroutines.
+// The returned client must be Closed when it is done being used.
+//
+// # Using the Client
+//
+// The following is an example of making an API call with the newly created client.
+//
+// ctx := context.Background()
+// // This snippet has been automatically generated and should be regarded as a code template only.
+// // It will require modifications to work:
+// // - It may require correct/in-range values for request initialization.
+// // - It may require specifying regional endpoints when creating the service client as shown in:
+// // https://pkg.go.dev/cloud.google.com/go#hdr-Client_Options
+// c, err := instance.NewInstanceAdminClient(ctx)
+// if err != nil {
+// // TODO: Handle error.
+// }
+// defer c.Close()
+//
+// req := &instancepb.CreateInstanceRequest{
+// // TODO: Fill request struct fields.
+// // See https://pkg.go.dev/cloud.google.com/go/spanner/admin/instance/apiv1/instancepb#CreateInstanceRequest.
+// }
+// op, err := c.CreateInstance(ctx, req)
+// if err != nil {
+// // TODO: Handle error.
+// }
+//
+// resp, err := op.Wait(ctx)
+// if err != nil {
+// // TODO: Handle error.
+// }
+// // TODO: Use resp.
+// _ = resp
+//
+// # Use of Context
+//
+// The ctx passed to NewInstanceAdminClient is used for authentication requests and
+// for creating the underlying connection, but is not used for subsequent calls.
+// Individual methods on the client use the ctx given to them.
+//
+// To close the open connection, use the Close() method.
+//
+// [Authentication and Authorization]: https://pkg.go.dev/cloud.google.com/go#hdr-Authentication_and_Authorization
+// [Timeouts and Cancellation]: https://pkg.go.dev/cloud.google.com/go#hdr-Timeouts_and_Cancellation
+// [Testing against Client Libraries]: https://pkg.go.dev/cloud.google.com/go#hdr-Testing
+// [Debugging Client Libraries]: https://pkg.go.dev/cloud.google.com/go#hdr-Debugging
+// [Inspecting errors]: https://pkg.go.dev/cloud.google.com/go#hdr-Inspecting_errors
+package instance // import "cloud.google.com/go/spanner/admin/instance/apiv1"
+
+import (
+ "context"
+
+ "google.golang.org/api/option"
+)
+
+// For more information on implementing a client constructor hook, see
+// https://github.com/googleapis/google-cloud-go/wiki/Customizing-constructors.
+type clientHookParams struct{}
+type clientHook func(context.Context, clientHookParams) ([]option.ClientOption, error)
+
+var versionClient string
+
+func getVersionClient() string {
+ if versionClient == "" {
+ return "UNKNOWN"
+ }
+ return versionClient
+}
+
+// DefaultAuthScopes reports the default set of authentication scopes to use with this package.
+func DefaultAuthScopes() []string {
+ return []string{
+ "https://www.googleapis.com/auth/cloud-platform",
+ "https://www.googleapis.com/auth/spanner.admin",
+ }
+}
diff --git a/vendor/cloud.google.com/go/spanner/admin/instance/apiv1/init.go b/vendor/cloud.google.com/go/spanner/admin/instance/apiv1/init.go
new file mode 100644
index 000000000..85046d971
--- /dev/null
+++ b/vendor/cloud.google.com/go/spanner/admin/instance/apiv1/init.go
@@ -0,0 +1,39 @@
+/*
+Copyright 2020 Google LLC
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+package instance
+
+import (
+ "context"
+ "os"
+
+ "google.golang.org/api/option"
+ "google.golang.org/grpc"
+ "google.golang.org/grpc/credentials/insecure"
+)
+
+func init() {
+ newInstanceAdminClientHook = func(ctx context.Context, p clientHookParams) ([]option.ClientOption, error) {
+ if emulator := os.Getenv("SPANNER_EMULATOR_HOST"); emulator != "" {
+ return []option.ClientOption{
+ option.WithEndpoint(emulator),
+ option.WithGRPCDialOption(grpc.WithTransportCredentials(insecure.NewCredentials())),
+ option.WithoutAuthentication(),
+ }, nil
+ }
+
+ return nil, nil
+ }
+}
diff --git a/vendor/cloud.google.com/go/spanner/admin/instance/apiv1/instance_admin_client.go b/vendor/cloud.google.com/go/spanner/admin/instance/apiv1/instance_admin_client.go
new file mode 100644
index 000000000..30b533f6f
--- /dev/null
+++ b/vendor/cloud.google.com/go/spanner/admin/instance/apiv1/instance_admin_client.go
@@ -0,0 +1,3660 @@
+// Copyright 2024 Google LLC
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Code generated by protoc-gen-go_gapic. DO NOT EDIT.
+
+package instance
+
+import (
+ "bytes"
+ "context"
+ "fmt"
+ "io"
+ "math"
+ "net/http"
+ "net/url"
+ "time"
+
+ iampb "cloud.google.com/go/iam/apiv1/iampb"
+ "cloud.google.com/go/longrunning"
+ lroauto "cloud.google.com/go/longrunning/autogen"
+ longrunningpb "cloud.google.com/go/longrunning/autogen/longrunningpb"
+ instancepb "cloud.google.com/go/spanner/admin/instance/apiv1/instancepb"
+ gax "github.com/googleapis/gax-go/v2"
+ "google.golang.org/api/googleapi"
+ "google.golang.org/api/iterator"
+ "google.golang.org/api/option"
+ "google.golang.org/api/option/internaloption"
+ gtransport "google.golang.org/api/transport/grpc"
+ httptransport "google.golang.org/api/transport/http"
+ "google.golang.org/grpc"
+ "google.golang.org/grpc/codes"
+ "google.golang.org/protobuf/encoding/protojson"
+ "google.golang.org/protobuf/proto"
+)
+
+var newInstanceAdminClientHook clientHook
+
+// InstanceAdminCallOptions contains the retry settings for each method of InstanceAdminClient.
+type InstanceAdminCallOptions struct {
+ ListInstanceConfigs []gax.CallOption
+ GetInstanceConfig []gax.CallOption
+ CreateInstanceConfig []gax.CallOption
+ UpdateInstanceConfig []gax.CallOption
+ DeleteInstanceConfig []gax.CallOption
+ ListInstanceConfigOperations []gax.CallOption
+ ListInstances []gax.CallOption
+ ListInstancePartitions []gax.CallOption
+ GetInstance []gax.CallOption
+ CreateInstance []gax.CallOption
+ UpdateInstance []gax.CallOption
+ DeleteInstance []gax.CallOption
+ SetIamPolicy []gax.CallOption
+ GetIamPolicy []gax.CallOption
+ TestIamPermissions []gax.CallOption
+ GetInstancePartition []gax.CallOption
+ CreateInstancePartition []gax.CallOption
+ DeleteInstancePartition []gax.CallOption
+ UpdateInstancePartition []gax.CallOption
+ ListInstancePartitionOperations []gax.CallOption
+ MoveInstance []gax.CallOption
+}
+
+func defaultInstanceAdminGRPCClientOptions() []option.ClientOption {
+ return []option.ClientOption{
+ internaloption.WithDefaultEndpoint("spanner.googleapis.com:443"),
+ internaloption.WithDefaultEndpointTemplate("spanner.UNIVERSE_DOMAIN:443"),
+ internaloption.WithDefaultMTLSEndpoint("spanner.mtls.googleapis.com:443"),
+ internaloption.WithDefaultUniverseDomain("googleapis.com"),
+ internaloption.WithDefaultAudience("https://spanner.googleapis.com/"),
+ internaloption.WithDefaultScopes(DefaultAuthScopes()...),
+ internaloption.EnableJwtWithScope(),
+ option.WithGRPCDialOption(grpc.WithDefaultCallOptions(
+ grpc.MaxCallRecvMsgSize(math.MaxInt32))),
+ }
+}
+
+func defaultInstanceAdminCallOptions() *InstanceAdminCallOptions {
+ return &InstanceAdminCallOptions{
+ ListInstanceConfigs: []gax.CallOption{
+ gax.WithTimeout(3600000 * time.Millisecond),
+ gax.WithRetry(func() gax.Retryer {
+ return gax.OnCodes([]codes.Code{
+ codes.Unavailable,
+ codes.DeadlineExceeded,
+ }, gax.Backoff{
+ Initial: 1000 * time.Millisecond,
+ Max: 32000 * time.Millisecond,
+ Multiplier: 1.30,
+ })
+ }),
+ },
+ GetInstanceConfig: []gax.CallOption{
+ gax.WithTimeout(3600000 * time.Millisecond),
+ gax.WithRetry(func() gax.Retryer {
+ return gax.OnCodes([]codes.Code{
+ codes.Unavailable,
+ codes.DeadlineExceeded,
+ }, gax.Backoff{
+ Initial: 1000 * time.Millisecond,
+ Max: 32000 * time.Millisecond,
+ Multiplier: 1.30,
+ })
+ }),
+ },
+ CreateInstanceConfig: []gax.CallOption{},
+ UpdateInstanceConfig: []gax.CallOption{},
+ DeleteInstanceConfig: []gax.CallOption{},
+ ListInstanceConfigOperations: []gax.CallOption{},
+ ListInstances: []gax.CallOption{
+ gax.WithTimeout(3600000 * time.Millisecond),
+ gax.WithRetry(func() gax.Retryer {
+ return gax.OnCodes([]codes.Code{
+ codes.Unavailable,
+ codes.DeadlineExceeded,
+ }, gax.Backoff{
+ Initial: 1000 * time.Millisecond,
+ Max: 32000 * time.Millisecond,
+ Multiplier: 1.30,
+ })
+ }),
+ },
+ ListInstancePartitions: []gax.CallOption{},
+ GetInstance: []gax.CallOption{
+ gax.WithTimeout(3600000 * time.Millisecond),
+ gax.WithRetry(func() gax.Retryer {
+ return gax.OnCodes([]codes.Code{
+ codes.Unavailable,
+ codes.DeadlineExceeded,
+ }, gax.Backoff{
+ Initial: 1000 * time.Millisecond,
+ Max: 32000 * time.Millisecond,
+ Multiplier: 1.30,
+ })
+ }),
+ },
+ CreateInstance: []gax.CallOption{
+ gax.WithTimeout(3600000 * time.Millisecond),
+ },
+ UpdateInstance: []gax.CallOption{
+ gax.WithTimeout(3600000 * time.Millisecond),
+ },
+ DeleteInstance: []gax.CallOption{
+ gax.WithTimeout(3600000 * time.Millisecond),
+ gax.WithRetry(func() gax.Retryer {
+ return gax.OnCodes([]codes.Code{
+ codes.Unavailable,
+ codes.DeadlineExceeded,
+ }, gax.Backoff{
+ Initial: 1000 * time.Millisecond,
+ Max: 32000 * time.Millisecond,
+ Multiplier: 1.30,
+ })
+ }),
+ },
+ SetIamPolicy: []gax.CallOption{
+ gax.WithTimeout(30000 * time.Millisecond),
+ },
+ GetIamPolicy: []gax.CallOption{
+ gax.WithTimeout(30000 * time.Millisecond),
+ gax.WithRetry(func() gax.Retryer {
+ return gax.OnCodes([]codes.Code{
+ codes.Unavailable,
+ codes.DeadlineExceeded,
+ }, gax.Backoff{
+ Initial: 1000 * time.Millisecond,
+ Max: 32000 * time.Millisecond,
+ Multiplier: 1.30,
+ })
+ }),
+ },
+ TestIamPermissions: []gax.CallOption{
+ gax.WithTimeout(30000 * time.Millisecond),
+ },
+ GetInstancePartition: []gax.CallOption{},
+ CreateInstancePartition: []gax.CallOption{},
+ DeleteInstancePartition: []gax.CallOption{},
+ UpdateInstancePartition: []gax.CallOption{},
+ ListInstancePartitionOperations: []gax.CallOption{},
+ MoveInstance: []gax.CallOption{},
+ }
+}
+
+func defaultInstanceAdminRESTCallOptions() *InstanceAdminCallOptions {
+ return &InstanceAdminCallOptions{
+ ListInstanceConfigs: []gax.CallOption{
+ gax.WithTimeout(3600000 * time.Millisecond),
+ gax.WithRetry(func() gax.Retryer {
+ return gax.OnHTTPCodes(gax.Backoff{
+ Initial: 1000 * time.Millisecond,
+ Max: 32000 * time.Millisecond,
+ Multiplier: 1.30,
+ },
+ http.StatusServiceUnavailable,
+ http.StatusGatewayTimeout)
+ }),
+ },
+ GetInstanceConfig: []gax.CallOption{
+ gax.WithTimeout(3600000 * time.Millisecond),
+ gax.WithRetry(func() gax.Retryer {
+ return gax.OnHTTPCodes(gax.Backoff{
+ Initial: 1000 * time.Millisecond,
+ Max: 32000 * time.Millisecond,
+ Multiplier: 1.30,
+ },
+ http.StatusServiceUnavailable,
+ http.StatusGatewayTimeout)
+ }),
+ },
+ CreateInstanceConfig: []gax.CallOption{},
+ UpdateInstanceConfig: []gax.CallOption{},
+ DeleteInstanceConfig: []gax.CallOption{},
+ ListInstanceConfigOperations: []gax.CallOption{},
+ ListInstances: []gax.CallOption{
+ gax.WithTimeout(3600000 * time.Millisecond),
+ gax.WithRetry(func() gax.Retryer {
+ return gax.OnHTTPCodes(gax.Backoff{
+ Initial: 1000 * time.Millisecond,
+ Max: 32000 * time.Millisecond,
+ Multiplier: 1.30,
+ },
+ http.StatusServiceUnavailable,
+ http.StatusGatewayTimeout)
+ }),
+ },
+ ListInstancePartitions: []gax.CallOption{},
+ GetInstance: []gax.CallOption{
+ gax.WithTimeout(3600000 * time.Millisecond),
+ gax.WithRetry(func() gax.Retryer {
+ return gax.OnHTTPCodes(gax.Backoff{
+ Initial: 1000 * time.Millisecond,
+ Max: 32000 * time.Millisecond,
+ Multiplier: 1.30,
+ },
+ http.StatusServiceUnavailable,
+ http.StatusGatewayTimeout)
+ }),
+ },
+ CreateInstance: []gax.CallOption{
+ gax.WithTimeout(3600000 * time.Millisecond),
+ },
+ UpdateInstance: []gax.CallOption{
+ gax.WithTimeout(3600000 * time.Millisecond),
+ },
+ DeleteInstance: []gax.CallOption{
+ gax.WithTimeout(3600000 * time.Millisecond),
+ gax.WithRetry(func() gax.Retryer {
+ return gax.OnHTTPCodes(gax.Backoff{
+ Initial: 1000 * time.Millisecond,
+ Max: 32000 * time.Millisecond,
+ Multiplier: 1.30,
+ },
+ http.StatusServiceUnavailable,
+ http.StatusGatewayTimeout)
+ }),
+ },
+ SetIamPolicy: []gax.CallOption{
+ gax.WithTimeout(30000 * time.Millisecond),
+ },
+ GetIamPolicy: []gax.CallOption{
+ gax.WithTimeout(30000 * time.Millisecond),
+ gax.WithRetry(func() gax.Retryer {
+ return gax.OnHTTPCodes(gax.Backoff{
+ Initial: 1000 * time.Millisecond,
+ Max: 32000 * time.Millisecond,
+ Multiplier: 1.30,
+ },
+ http.StatusServiceUnavailable,
+ http.StatusGatewayTimeout)
+ }),
+ },
+ TestIamPermissions: []gax.CallOption{
+ gax.WithTimeout(30000 * time.Millisecond),
+ },
+ GetInstancePartition: []gax.CallOption{},
+ CreateInstancePartition: []gax.CallOption{},
+ DeleteInstancePartition: []gax.CallOption{},
+ UpdateInstancePartition: []gax.CallOption{},
+ ListInstancePartitionOperations: []gax.CallOption{},
+ MoveInstance: []gax.CallOption{},
+ }
+}
+
+// internalInstanceAdminClient is an interface that defines the methods available from Cloud Spanner Instance Admin API.
+type internalInstanceAdminClient interface {
+ Close() error
+ setGoogleClientInfo(...string)
+ Connection() *grpc.ClientConn
+ ListInstanceConfigs(context.Context, *instancepb.ListInstanceConfigsRequest, ...gax.CallOption) *InstanceConfigIterator
+ GetInstanceConfig(context.Context, *instancepb.GetInstanceConfigRequest, ...gax.CallOption) (*instancepb.InstanceConfig, error)
+ CreateInstanceConfig(context.Context, *instancepb.CreateInstanceConfigRequest, ...gax.CallOption) (*CreateInstanceConfigOperation, error)
+ CreateInstanceConfigOperation(name string) *CreateInstanceConfigOperation
+ UpdateInstanceConfig(context.Context, *instancepb.UpdateInstanceConfigRequest, ...gax.CallOption) (*UpdateInstanceConfigOperation, error)
+ UpdateInstanceConfigOperation(name string) *UpdateInstanceConfigOperation
+ DeleteInstanceConfig(context.Context, *instancepb.DeleteInstanceConfigRequest, ...gax.CallOption) error
+ ListInstanceConfigOperations(context.Context, *instancepb.ListInstanceConfigOperationsRequest, ...gax.CallOption) *OperationIterator
+ ListInstances(context.Context, *instancepb.ListInstancesRequest, ...gax.CallOption) *InstanceIterator
+ ListInstancePartitions(context.Context, *instancepb.ListInstancePartitionsRequest, ...gax.CallOption) *InstancePartitionIterator
+ GetInstance(context.Context, *instancepb.GetInstanceRequest, ...gax.CallOption) (*instancepb.Instance, error)
+ CreateInstance(context.Context, *instancepb.CreateInstanceRequest, ...gax.CallOption) (*CreateInstanceOperation, error)
+ CreateInstanceOperation(name string) *CreateInstanceOperation
+ UpdateInstance(context.Context, *instancepb.UpdateInstanceRequest, ...gax.CallOption) (*UpdateInstanceOperation, error)
+ UpdateInstanceOperation(name string) *UpdateInstanceOperation
+ DeleteInstance(context.Context, *instancepb.DeleteInstanceRequest, ...gax.CallOption) error
+ SetIamPolicy(context.Context, *iampb.SetIamPolicyRequest, ...gax.CallOption) (*iampb.Policy, error)
+ GetIamPolicy(context.Context, *iampb.GetIamPolicyRequest, ...gax.CallOption) (*iampb.Policy, error)
+ TestIamPermissions(context.Context, *iampb.TestIamPermissionsRequest, ...gax.CallOption) (*iampb.TestIamPermissionsResponse, error)
+ GetInstancePartition(context.Context, *instancepb.GetInstancePartitionRequest, ...gax.CallOption) (*instancepb.InstancePartition, error)
+ CreateInstancePartition(context.Context, *instancepb.CreateInstancePartitionRequest, ...gax.CallOption) (*CreateInstancePartitionOperation, error)
+ CreateInstancePartitionOperation(name string) *CreateInstancePartitionOperation
+ DeleteInstancePartition(context.Context, *instancepb.DeleteInstancePartitionRequest, ...gax.CallOption) error
+ UpdateInstancePartition(context.Context, *instancepb.UpdateInstancePartitionRequest, ...gax.CallOption) (*UpdateInstancePartitionOperation, error)
+ UpdateInstancePartitionOperation(name string) *UpdateInstancePartitionOperation
+ ListInstancePartitionOperations(context.Context, *instancepb.ListInstancePartitionOperationsRequest, ...gax.CallOption) *OperationIterator
+ MoveInstance(context.Context, *instancepb.MoveInstanceRequest, ...gax.CallOption) (*MoveInstanceOperation, error)
+ MoveInstanceOperation(name string) *MoveInstanceOperation
+}
+
+// InstanceAdminClient is a client for interacting with Cloud Spanner Instance Admin API.
+// Methods, except Close, may be called concurrently. However, fields must not be modified concurrently with method calls.
+//
+// # Cloud Spanner Instance Admin API
+//
+// The Cloud Spanner Instance Admin API can be used to create, delete,
+// modify and list instances. Instances are dedicated Cloud Spanner serving
+// and storage resources to be used by Cloud Spanner databases.
+//
+// Each instance has a “configuration”, which dictates where the
+// serving resources for the Cloud Spanner instance are located (e.g.,
+// US-central, Europe). Configurations are created by Google based on
+// resource availability.
+//
+// Cloud Spanner billing is based on the instances that exist and their
+// sizes. After an instance exists, there are no additional
+// per-database or per-operation charges for use of the instance
+// (though there may be additional network bandwidth charges).
+// Instances offer isolation: problems with databases in one instance
+// will not affect other instances. However, within an instance
+// databases can affect each other. For example, if one database in an
+// instance receives a lot of requests and consumes most of the
+// instance resources, fewer resources are available for other
+// databases in that instance, and their performance may suffer.
+type InstanceAdminClient struct {
+ // The internal transport-dependent client.
+ internalClient internalInstanceAdminClient
+
+ // The call options for this service.
+ CallOptions *InstanceAdminCallOptions
+
+ // LROClient is used internally to handle long-running operations.
+ // It is exposed so that its CallOptions can be modified if required.
+ // Users should not Close this client.
+ LROClient *lroauto.OperationsClient
+}
+
+// Wrapper methods routed to the internal client.
+
+// Close closes the connection to the API service. The user should invoke this when
+// the client is no longer required.
+func (c *InstanceAdminClient) Close() error {
+ return c.internalClient.Close()
+}
+
+// setGoogleClientInfo sets the name and version of the application in
+// the `x-goog-api-client` header passed on each request. Intended for
+// use by Google-written clients.
+func (c *InstanceAdminClient) setGoogleClientInfo(keyval ...string) {
+ c.internalClient.setGoogleClientInfo(keyval...)
+}
+
+// Connection returns a connection to the API service.
+//
+// Deprecated: Connections are now pooled so this method does not always
+// return the same resource.
+func (c *InstanceAdminClient) Connection() *grpc.ClientConn {
+ return c.internalClient.Connection()
+}
+
+// ListInstanceConfigs lists the supported instance configurations for a given project.
+func (c *InstanceAdminClient) ListInstanceConfigs(ctx context.Context, req *instancepb.ListInstanceConfigsRequest, opts ...gax.CallOption) *InstanceConfigIterator {
+ return c.internalClient.ListInstanceConfigs(ctx, req, opts...)
+}
+
+// GetInstanceConfig gets information about a particular instance configuration.
+func (c *InstanceAdminClient) GetInstanceConfig(ctx context.Context, req *instancepb.GetInstanceConfigRequest, opts ...gax.CallOption) (*instancepb.InstanceConfig, error) {
+ return c.internalClient.GetInstanceConfig(ctx, req, opts...)
+}
+
+// CreateInstanceConfig creates an instance configuration and begins preparing it to be used. The
+// returned [long-running operation][google.longrunning.Operation]
+// can be used to track the progress of preparing the new
+// instance configuration. The instance configuration name is assigned by the
+// caller. If the named instance configuration already exists,
+// CreateInstanceConfig returns ALREADY_EXISTS.
+//
+// Immediately after the request returns:
+//
+// The instance configuration is readable via the API, with all requested
+// attributes. The instance configuration’s
+// reconciling
+// field is set to true. Its state is CREATING.
+//
+// While the operation is pending:
+//
+// Cancelling the operation renders the instance configuration immediately
+// unreadable via the API.
+//
+// Except for deleting the creating resource, all other attempts to modify
+// the instance configuration are rejected.
+//
+// Upon completion of the returned operation:
+//
+// Instances can be created using the instance configuration.
+//
+// The instance configuration’s
+// reconciling
+// field becomes false. Its state becomes READY.
+//
+// The returned [long-running operation][google.longrunning.Operation] will
+// have a name of the format
+// <instance_config_name>/operations/<operation_id> and can be used to track
+// creation of the instance configuration. The
+// metadata field type is
+// CreateInstanceConfigMetadata.
+// The response field type is
+// InstanceConfig, if
+// successful.
+//
+// Authorization requires spanner.instanceConfigs.create permission on
+// the resource
+// parent.
+func (c *InstanceAdminClient) CreateInstanceConfig(ctx context.Context, req *instancepb.CreateInstanceConfigRequest, opts ...gax.CallOption) (*CreateInstanceConfigOperation, error) {
+ return c.internalClient.CreateInstanceConfig(ctx, req, opts...)
+}
+
+// CreateInstanceConfigOperation returns a new CreateInstanceConfigOperation from a given name.
+// The name must be that of a previously created CreateInstanceConfigOperation, possibly from a different process.
+func (c *InstanceAdminClient) CreateInstanceConfigOperation(name string) *CreateInstanceConfigOperation {
+ return c.internalClient.CreateInstanceConfigOperation(name)
+}
+
+// UpdateInstanceConfig updates an instance configuration. The returned
+// [long-running operation][google.longrunning.Operation] can be used to track
+// the progress of updating the instance. If the named instance configuration
+// does not exist, returns NOT_FOUND.
+//
+// Only user-managed configurations can be updated.
+//
+// Immediately after the request returns:
+//
+// The instance configuration’s
+// reconciling
+// field is set to true.
+//
+// While the operation is pending:
+//
+// Cancelling the operation sets its metadata’s
+// cancel_time.
+// The operation is guaranteed to succeed at undoing all changes, after
+// which point it terminates with a CANCELLED status.
+//
+// All other attempts to modify the instance configuration are rejected.
+//
+// Reading the instance configuration via the API continues to give the
+// pre-request values.
+//
+// Upon completion of the returned operation:
+//
+// Creating instances using the instance configuration uses the new
+// values.
+//
+// The new values of the instance configuration are readable via the API.
+//
+// The instance configuration’s
+// reconciling
+// field becomes false.
+//
+// The returned [long-running operation][google.longrunning.Operation] will
+// have a name of the format
+// <instance_config_name>/operations/<operation_id> and can be used to track
+// the instance configuration modification. The
+// metadata field type is
+// UpdateInstanceConfigMetadata.
+// The response field type is
+// InstanceConfig, if
+// successful.
+//
+// Authorization requires spanner.instanceConfigs.update permission on
+// the resource [name][google.spanner.admin.instance.v1.InstanceConfig.name (at http://google.spanner.admin.instance.v1.InstanceConfig.name)].
+func (c *InstanceAdminClient) UpdateInstanceConfig(ctx context.Context, req *instancepb.UpdateInstanceConfigRequest, opts ...gax.CallOption) (*UpdateInstanceConfigOperation, error) {
+ return c.internalClient.UpdateInstanceConfig(ctx, req, opts...)
+}
+
+// UpdateInstanceConfigOperation returns a new UpdateInstanceConfigOperation from a given name.
+// The name must be that of a previously created UpdateInstanceConfigOperation, possibly from a different process.
+func (c *InstanceAdminClient) UpdateInstanceConfigOperation(name string) *UpdateInstanceConfigOperation {
+ return c.internalClient.UpdateInstanceConfigOperation(name)
+}
+
+// DeleteInstanceConfig deletes the instance configuration. Deletion is only allowed when no
+// instances are using the configuration. If any instances are using
+// the configuration, returns FAILED_PRECONDITION.
+//
+// Only user-managed configurations can be deleted.
+//
+// Authorization requires spanner.instanceConfigs.delete permission on
+// the resource [name][google.spanner.admin.instance.v1.InstanceConfig.name (at http://google.spanner.admin.instance.v1.InstanceConfig.name)].
+func (c *InstanceAdminClient) DeleteInstanceConfig(ctx context.Context, req *instancepb.DeleteInstanceConfigRequest, opts ...gax.CallOption) error {
+ return c.internalClient.DeleteInstanceConfig(ctx, req, opts...)
+}
+
+// ListInstanceConfigOperations lists the user-managed instance configuration [long-running
+// operations][google.longrunning.Operation] in the given project. An instance
+// configuration operation has a name of the form
+// projects/<project>/instanceConfigs/<instance_config>/operations/<operation>.
+// The long-running operation
+// metadata field type
+// metadata.type_url describes the type of the metadata. Operations returned
+// include those that have completed/failed/canceled within the last 7 days,
+// and pending operations. Operations returned are ordered by
+// operation.metadata.value.start_time in descending order starting
+// from the most recently started operation.
+func (c *InstanceAdminClient) ListInstanceConfigOperations(ctx context.Context, req *instancepb.ListInstanceConfigOperationsRequest, opts ...gax.CallOption) *OperationIterator {
+ return c.internalClient.ListInstanceConfigOperations(ctx, req, opts...)
+}
+
+// ListInstances lists all instances in the given project.
+func (c *InstanceAdminClient) ListInstances(ctx context.Context, req *instancepb.ListInstancesRequest, opts ...gax.CallOption) *InstanceIterator {
+ return c.internalClient.ListInstances(ctx, req, opts...)
+}
+
+// ListInstancePartitions lists all instance partitions for the given instance.
+func (c *InstanceAdminClient) ListInstancePartitions(ctx context.Context, req *instancepb.ListInstancePartitionsRequest, opts ...gax.CallOption) *InstancePartitionIterator {
+ return c.internalClient.ListInstancePartitions(ctx, req, opts...)
+}
+
+// GetInstance gets information about a particular instance.
+func (c *InstanceAdminClient) GetInstance(ctx context.Context, req *instancepb.GetInstanceRequest, opts ...gax.CallOption) (*instancepb.Instance, error) {
+ return c.internalClient.GetInstance(ctx, req, opts...)
+}
+
+// CreateInstance creates an instance and begins preparing it to begin serving. The
+// returned [long-running operation][google.longrunning.Operation]
+// can be used to track the progress of preparing the new
+// instance. The instance name is assigned by the caller. If the
+// named instance already exists, CreateInstance returns
+// ALREADY_EXISTS.
+//
+// Immediately upon completion of this request:
+//
+// The instance is readable via the API, with all requested attributes
+// but no allocated resources. Its state is CREATING.
+//
+// Until completion of the returned operation:
+//
+// Cancelling the operation renders the instance immediately unreadable
+// via the API.
+//
+// The instance can be deleted.
+//
+// All other attempts to modify the instance are rejected.
+//
+// Upon completion of the returned operation:
+//
+// Billing for all successfully-allocated resources begins (some types
+// may have lower than the requested levels).
+//
+// Databases can be created in the instance.
+//
+// The instance’s allocated resource levels are readable via the API.
+//
+// The instance’s state becomes READY.
+//
+// The returned [long-running operation][google.longrunning.Operation] will
+// have a name of the format <instance_name>/operations/<operation_id> and
+// can be used to track creation of the instance. The
+// metadata field type is
+// CreateInstanceMetadata.
+// The response field type is
+// Instance, if successful.
+func (c *InstanceAdminClient) CreateInstance(ctx context.Context, req *instancepb.CreateInstanceRequest, opts ...gax.CallOption) (*CreateInstanceOperation, error) {
+ return c.internalClient.CreateInstance(ctx, req, opts...)
+}
+
+// CreateInstanceOperation returns a new CreateInstanceOperation from a given name.
+// The name must be that of a previously created CreateInstanceOperation, possibly from a different process.
+func (c *InstanceAdminClient) CreateInstanceOperation(name string) *CreateInstanceOperation {
+ return c.internalClient.CreateInstanceOperation(name)
+}
+
+// UpdateInstance updates an instance, and begins allocating or releasing resources
+// as requested. The returned [long-running
+// operation][google.longrunning.Operation] can be used to track the
+// progress of updating the instance. If the named instance does not
+// exist, returns NOT_FOUND.
+//
+// Immediately upon completion of this request:
+//
+// For resource types for which a decrease in the instance’s allocation
+// has been requested, billing is based on the newly-requested level.
+//
+// Until completion of the returned operation:
+//
+// Cancelling the operation sets its metadata’s
+// cancel_time,
+// and begins restoring resources to their pre-request values. The
+// operation is guaranteed to succeed at undoing all resource changes,
+// after which point it terminates with a CANCELLED status.
+//
+// All other attempts to modify the instance are rejected.
+//
+// Reading the instance via the API continues to give the pre-request
+// resource levels.
+//
+// Upon completion of the returned operation:
+//
+// Billing begins for all successfully-allocated resources (some types
+// may have lower than the requested levels).
+//
+// All newly-reserved resources are available for serving the instance’s
+// tables.
+//
+// The instance’s new resource levels are readable via the API.
+//
+// The returned [long-running operation][google.longrunning.Operation] will
+// have a name of the format <instance_name>/operations/<operation_id> and
+// can be used to track the instance modification. The
+// metadata field type is
+// UpdateInstanceMetadata.
+// The response field type is
+// Instance, if successful.
+//
+// Authorization requires spanner.instances.update permission on
+// the resource [name][google.spanner.admin.instance.v1.Instance.name (at http://google.spanner.admin.instance.v1.Instance.name)].
+func (c *InstanceAdminClient) UpdateInstance(ctx context.Context, req *instancepb.UpdateInstanceRequest, opts ...gax.CallOption) (*UpdateInstanceOperation, error) {
+ return c.internalClient.UpdateInstance(ctx, req, opts...)
+}
+
+// UpdateInstanceOperation returns a new UpdateInstanceOperation from a given name.
+// The name must be that of a previously created UpdateInstanceOperation, possibly from a different process.
+func (c *InstanceAdminClient) UpdateInstanceOperation(name string) *UpdateInstanceOperation {
+ return c.internalClient.UpdateInstanceOperation(name)
+}
+
+// DeleteInstance deletes an instance.
+//
+// Immediately upon completion of the request:
+//
+// Billing ceases for all of the instance’s reserved resources.
+//
+// Soon afterward:
+//
+// The instance and all of its databases immediately and
+// irrevocably disappear from the API. All data in the databases
+// is permanently deleted.
+func (c *InstanceAdminClient) DeleteInstance(ctx context.Context, req *instancepb.DeleteInstanceRequest, opts ...gax.CallOption) error {
+ return c.internalClient.DeleteInstance(ctx, req, opts...)
+}
+
+// SetIamPolicy sets the access control policy on an instance resource. Replaces any
+// existing policy.
+//
+// Authorization requires spanner.instances.setIamPolicy on
+// resource.
+func (c *InstanceAdminClient) SetIamPolicy(ctx context.Context, req *iampb.SetIamPolicyRequest, opts ...gax.CallOption) (*iampb.Policy, error) {
+ return c.internalClient.SetIamPolicy(ctx, req, opts...)
+}
+
+// GetIamPolicy gets the access control policy for an instance resource. Returns an empty
+// policy if an instance exists but does not have a policy set.
+//
+// Authorization requires spanner.instances.getIamPolicy on
+// resource.
+func (c *InstanceAdminClient) GetIamPolicy(ctx context.Context, req *iampb.GetIamPolicyRequest, opts ...gax.CallOption) (*iampb.Policy, error) {
+ return c.internalClient.GetIamPolicy(ctx, req, opts...)
+}
+
+// TestIamPermissions returns permissions that the caller has on the specified instance resource.
+//
+// Attempting this RPC on a non-existent Cloud Spanner instance resource will
+// result in a NOT_FOUND error if the user has spanner.instances.list
+// permission on the containing Google Cloud Project. Otherwise returns an
+// empty set of permissions.
+func (c *InstanceAdminClient) TestIamPermissions(ctx context.Context, req *iampb.TestIamPermissionsRequest, opts ...gax.CallOption) (*iampb.TestIamPermissionsResponse, error) {
+ return c.internalClient.TestIamPermissions(ctx, req, opts...)
+}
+
+// GetInstancePartition gets information about a particular instance partition.
+func (c *InstanceAdminClient) GetInstancePartition(ctx context.Context, req *instancepb.GetInstancePartitionRequest, opts ...gax.CallOption) (*instancepb.InstancePartition, error) {
+ return c.internalClient.GetInstancePartition(ctx, req, opts...)
+}
+
+// CreateInstancePartition creates an instance partition and begins preparing it to be used. The
+// returned [long-running operation][google.longrunning.Operation]
+// can be used to track the progress of preparing the new instance partition.
+// The instance partition name is assigned by the caller. If the named
+// instance partition already exists, CreateInstancePartition returns
+// ALREADY_EXISTS.
+//
+// Immediately upon completion of this request:
+//
+// The instance partition is readable via the API, with all requested
+// attributes but no allocated resources. Its state is CREATING.
+//
+// Until completion of the returned operation:
+//
+// Cancelling the operation renders the instance partition immediately
+// unreadable via the API.
+//
+// The instance partition can be deleted.
+//
+// All other attempts to modify the instance partition are rejected.
+//
+// Upon completion of the returned operation:
+//
+// Billing for all successfully-allocated resources begins (some types
+// may have lower than the requested levels).
+//
+// Databases can start using this instance partition.
+//
+// The instance partition’s allocated resource levels are readable via the
+// API.
+//
+// The instance partition’s state becomes READY.
+//
+// The returned [long-running operation][google.longrunning.Operation] will
+// have a name of the format
+// <instance_partition_name>/operations/<operation_id> and can be used to
+// track creation of the instance partition. The
+// metadata field type is
+// CreateInstancePartitionMetadata.
+// The response field type is
+// InstancePartition, if
+// successful.
+func (c *InstanceAdminClient) CreateInstancePartition(ctx context.Context, req *instancepb.CreateInstancePartitionRequest, opts ...gax.CallOption) (*CreateInstancePartitionOperation, error) {
+ return c.internalClient.CreateInstancePartition(ctx, req, opts...)
+}
+
+// CreateInstancePartitionOperation returns a new CreateInstancePartitionOperation from a given name.
+// The name must be that of a previously created CreateInstancePartitionOperation, possibly from a different process.
+func (c *InstanceAdminClient) CreateInstancePartitionOperation(name string) *CreateInstancePartitionOperation {
+ return c.internalClient.CreateInstancePartitionOperation(name)
+}
+
+// DeleteInstancePartition deletes an existing instance partition. Requires that the
+// instance partition is not used by any database or backup and is not the
+// default instance partition of an instance.
+//
+// Authorization requires spanner.instancePartitions.delete permission on
+// the resource
+// [name][google.spanner.admin.instance.v1.InstancePartition.name (at http://google.spanner.admin.instance.v1.InstancePartition.name)].
+func (c *InstanceAdminClient) DeleteInstancePartition(ctx context.Context, req *instancepb.DeleteInstancePartitionRequest, opts ...gax.CallOption) error {
+ return c.internalClient.DeleteInstancePartition(ctx, req, opts...)
+}
+
+// UpdateInstancePartition updates an instance partition, and begins allocating or releasing resources
+// as requested. The returned [long-running
+// operation][google.longrunning.Operation] can be used to track the
+// progress of updating the instance partition. If the named instance
+// partition does not exist, returns NOT_FOUND.
+//
+// Immediately upon completion of this request:
+//
+// For resource types for which a decrease in the instance partition’s
+// allocation has been requested, billing is based on the newly-requested
+// level.
+//
+// Until completion of the returned operation:
+//
+// Cancelling the operation sets its metadata’s
+// cancel_time,
+// and begins restoring resources to their pre-request values. The
+// operation is guaranteed to succeed at undoing all resource changes,
+// after which point it terminates with a CANCELLED status.
+//
+// All other attempts to modify the instance partition are rejected.
+//
+// Reading the instance partition via the API continues to give the
+// pre-request resource levels.
+//
+// Upon completion of the returned operation:
+//
+// Billing begins for all successfully-allocated resources (some types
+// may have lower than the requested levels).
+//
+// All newly-reserved resources are available for serving the instance
+// partition’s tables.
+//
+// The instance partition’s new resource levels are readable via the API.
+//
+// The returned [long-running operation][google.longrunning.Operation] will
+// have a name of the format
+// <instance_partition_name>/operations/<operation_id> and can be used to
+// track the instance partition modification. The
+// metadata field type is
+// UpdateInstancePartitionMetadata.
+// The response field type is
+// InstancePartition, if
+// successful.
+//
+// Authorization requires spanner.instancePartitions.update permission on
+// the resource
+// [name][google.spanner.admin.instance.v1.InstancePartition.name (at http://google.spanner.admin.instance.v1.InstancePartition.name)].
+func (c *InstanceAdminClient) UpdateInstancePartition(ctx context.Context, req *instancepb.UpdateInstancePartitionRequest, opts ...gax.CallOption) (*UpdateInstancePartitionOperation, error) {
+ return c.internalClient.UpdateInstancePartition(ctx, req, opts...)
+}
+
+// UpdateInstancePartitionOperation returns a new UpdateInstancePartitionOperation from a given name.
+// The name must be that of a previously created UpdateInstancePartitionOperation, possibly from a different process.
+func (c *InstanceAdminClient) UpdateInstancePartitionOperation(name string) *UpdateInstancePartitionOperation {
+ return c.internalClient.UpdateInstancePartitionOperation(name)
+}
+
+// ListInstancePartitionOperations lists instance partition [long-running
+// operations][google.longrunning.Operation] in the given instance.
+// An instance partition operation has a name of the form
+// projects/<project>/instances/<instance>/instancePartitions/<instance_partition>/operations/<operation>.
+// The long-running operation
+// metadata field type
+// metadata.type_url describes the type of the metadata. Operations returned
+// include those that have completed/failed/canceled within the last 7 days,
+// and pending operations. Operations returned are ordered by
+// operation.metadata.value.start_time in descending order starting from the
+// most recently started operation.
+//
+// Authorization requires spanner.instancePartitionOperations.list
+// permission on the resource
+// parent.
+func (c *InstanceAdminClient) ListInstancePartitionOperations(ctx context.Context, req *instancepb.ListInstancePartitionOperationsRequest, opts ...gax.CallOption) *OperationIterator {
+ return c.internalClient.ListInstancePartitionOperations(ctx, req, opts...)
+}
+
+// MoveInstance moves an instance to the target instance configuration. You can use the
+// returned [long-running operation][google.longrunning.Operation] to track
+// the progress of moving the instance.
+//
+// MoveInstance returns FAILED_PRECONDITION if the instance meets any of
+// the following criteria:
+//
+// Is undergoing a move to a different instance configuration
+//
+// Has backups
+//
+// Has an ongoing update
+//
+// Contains any CMEK-enabled databases
+//
+// Is a free trial instance
+//
+// While the operation is pending:
+//
+// All other attempts to modify the instance, including changes to its
+// compute capacity, are rejected.
+//
+// The following database and backup admin operations are rejected:
+//
+// DatabaseAdmin.CreateDatabase
+//
+// DatabaseAdmin.UpdateDatabaseDdl (disabled if default_leader is
+// specified in the request.)
+//
+// DatabaseAdmin.RestoreDatabase
+//
+// DatabaseAdmin.CreateBackup
+//
+// DatabaseAdmin.CopyBackup
+//
+// Both the source and target instance configurations are subject to
+// hourly compute and storage charges.
+//
+// The instance might experience higher read-write latencies and a higher
+// transaction abort rate. However, moving an instance doesn’t cause any
+// downtime.
+//
+// The returned [long-running operation][google.longrunning.Operation] has
+// a name of the format
+// <instance_name>/operations/<operation_id> and can be used to track
+// the move instance operation. The
+// metadata field type is
+// MoveInstanceMetadata.
+// The response field type is
+// Instance,
+// if successful.
+// Cancelling the operation sets its metadata’s
+// cancel_time.
+// Cancellation is not immediate because it involves moving any data
+// previously moved to the target instance configuration back to the original
+// instance configuration. You can use this operation to track the progress of
+// the cancellation. Upon successful completion of the cancellation, the
+// operation terminates with CANCELLED status.
+//
+// If not cancelled, upon completion of the returned operation:
+//
+// The instance successfully moves to the target instance
+// configuration.
+//
+// You are billed for compute and storage in target instance
+// configuration.
+//
+// Authorization requires the spanner.instances.update permission on
+// the resource instance.
+//
+// For more details, see
+// Move an instance (at https://cloud.google.com/spanner/docs/move-instance).
+func (c *InstanceAdminClient) MoveInstance(ctx context.Context, req *instancepb.MoveInstanceRequest, opts ...gax.CallOption) (*MoveInstanceOperation, error) {
+ return c.internalClient.MoveInstance(ctx, req, opts...)
+}
+
+// MoveInstanceOperation returns a new MoveInstanceOperation from a given name.
+// The name must be that of a previously created MoveInstanceOperation, possibly from a different process.
+func (c *InstanceAdminClient) MoveInstanceOperation(name string) *MoveInstanceOperation {
+ return c.internalClient.MoveInstanceOperation(name)
+}
+
+// instanceAdminGRPCClient is a client for interacting with Cloud Spanner Instance Admin API over gRPC transport.
+//
+// Methods, except Close, may be called concurrently. However, fields must not be modified concurrently with method calls.
+type instanceAdminGRPCClient struct {
+ // Connection pool of gRPC connections to the service.
+ connPool gtransport.ConnPool
+
+ // Points back to the CallOptions field of the containing InstanceAdminClient
+ CallOptions **InstanceAdminCallOptions
+
+ // The gRPC API client.
+ instanceAdminClient instancepb.InstanceAdminClient
+
+ // LROClient is used internally to handle long-running operations.
+ // It is exposed so that its CallOptions can be modified if required.
+ // Users should not Close this client.
+ LROClient **lroauto.OperationsClient
+
+ // The x-goog-* metadata to be sent with each request.
+ xGoogHeaders []string
+}
+
+// NewInstanceAdminClient creates a new instance admin client based on gRPC.
+// The returned client must be Closed when it is done being used to clean up its underlying connections.
+//
+// # Cloud Spanner Instance Admin API
+//
+// The Cloud Spanner Instance Admin API can be used to create, delete,
+// modify and list instances. Instances are dedicated Cloud Spanner serving
+// and storage resources to be used by Cloud Spanner databases.
+//
+// Each instance has a “configuration”, which dictates where the
+// serving resources for the Cloud Spanner instance are located (e.g.,
+// US-central, Europe). Configurations are created by Google based on
+// resource availability.
+//
+// Cloud Spanner billing is based on the instances that exist and their
+// sizes. After an instance exists, there are no additional
+// per-database or per-operation charges for use of the instance
+// (though there may be additional network bandwidth charges).
+// Instances offer isolation: problems with databases in one instance
+// will not affect other instances. However, within an instance
+// databases can affect each other. For example, if one database in an
+// instance receives a lot of requests and consumes most of the
+// instance resources, fewer resources are available for other
+// databases in that instance, and their performance may suffer.
+func NewInstanceAdminClient(ctx context.Context, opts ...option.ClientOption) (*InstanceAdminClient, error) {
+ clientOpts := defaultInstanceAdminGRPCClientOptions()
+ if newInstanceAdminClientHook != nil {
+ hookOpts, err := newInstanceAdminClientHook(ctx, clientHookParams{})
+ if err != nil {
+ return nil, err
+ }
+ clientOpts = append(clientOpts, hookOpts...)
+ }
+
+ connPool, err := gtransport.DialPool(ctx, append(clientOpts, opts...)...)
+ if err != nil {
+ return nil, err
+ }
+ client := InstanceAdminClient{CallOptions: defaultInstanceAdminCallOptions()}
+
+ c := &instanceAdminGRPCClient{
+ connPool: connPool,
+ instanceAdminClient: instancepb.NewInstanceAdminClient(connPool),
+ CallOptions: &client.CallOptions,
+ }
+ c.setGoogleClientInfo()
+
+ client.internalClient = c
+
+ client.LROClient, err = lroauto.NewOperationsClient(ctx, gtransport.WithConnPool(connPool))
+ if err != nil {
+ // This error "should not happen", since we are just reusing old connection pool
+ // and never actually need to dial.
+ // If this does happen, we could leak connp. However, we cannot close conn:
+ // If the user invoked the constructor with option.WithGRPCConn,
+ // we would close a connection that's still in use.
+ // TODO: investigate error conditions.
+ return nil, err
+ }
+ c.LROClient = &client.LROClient
+ return &client, nil
+}
+
+// Connection returns a connection to the API service.
+//
+// Deprecated: Connections are now pooled so this method does not always
+// return the same resource.
+func (c *instanceAdminGRPCClient) Connection() *grpc.ClientConn {
+ return c.connPool.Conn()
+}
+
+// setGoogleClientInfo sets the name and version of the application in
+// the `x-goog-api-client` header passed on each request. Intended for
+// use by Google-written clients.
+func (c *instanceAdminGRPCClient) setGoogleClientInfo(keyval ...string) {
+ kv := append([]string{"gl-go", gax.GoVersion}, keyval...)
+ kv = append(kv, "gapic", getVersionClient(), "gax", gax.Version, "grpc", grpc.Version)
+ c.xGoogHeaders = []string{
+ "x-goog-api-client", gax.XGoogHeader(kv...),
+ }
+}
+
+// Close closes the connection to the API service. The user should invoke this when
+// the client is no longer required.
+func (c *instanceAdminGRPCClient) Close() error {
+ return c.connPool.Close()
+}
+
+// Methods, except Close, may be called concurrently. However, fields must not be modified concurrently with method calls.
+type instanceAdminRESTClient struct {
+ // The http endpoint to connect to.
+ endpoint string
+
+ // The http client.
+ httpClient *http.Client
+
+ // LROClient is used internally to handle long-running operations.
+ // It is exposed so that its CallOptions can be modified if required.
+ // Users should not Close this client.
+ LROClient **lroauto.OperationsClient
+
+ // The x-goog-* headers to be sent with each request.
+ xGoogHeaders []string
+
+ // Points back to the CallOptions field of the containing InstanceAdminClient
+ CallOptions **InstanceAdminCallOptions
+}
+
+// NewInstanceAdminRESTClient creates a new instance admin rest client.
+//
+// # Cloud Spanner Instance Admin API
+//
+// The Cloud Spanner Instance Admin API can be used to create, delete,
+// modify and list instances. Instances are dedicated Cloud Spanner serving
+// and storage resources to be used by Cloud Spanner databases.
+//
+// Each instance has a “configuration”, which dictates where the
+// serving resources for the Cloud Spanner instance are located (e.g.,
+// US-central, Europe). Configurations are created by Google based on
+// resource availability.
+//
+// Cloud Spanner billing is based on the instances that exist and their
+// sizes. After an instance exists, there are no additional
+// per-database or per-operation charges for use of the instance
+// (though there may be additional network bandwidth charges).
+// Instances offer isolation: problems with databases in one instance
+// will not affect other instances. However, within an instance
+// databases can affect each other. For example, if one database in an
+// instance receives a lot of requests and consumes most of the
+// instance resources, fewer resources are available for other
+// databases in that instance, and their performance may suffer.
+func NewInstanceAdminRESTClient(ctx context.Context, opts ...option.ClientOption) (*InstanceAdminClient, error) {
+ clientOpts := append(defaultInstanceAdminRESTClientOptions(), opts...)
+ httpClient, endpoint, err := httptransport.NewClient(ctx, clientOpts...)
+ if err != nil {
+ return nil, err
+ }
+
+ callOpts := defaultInstanceAdminRESTCallOptions()
+ c := &instanceAdminRESTClient{
+ endpoint: endpoint,
+ httpClient: httpClient,
+ CallOptions: &callOpts,
+ }
+ c.setGoogleClientInfo()
+
+ lroOpts := []option.ClientOption{
+ option.WithHTTPClient(httpClient),
+ option.WithEndpoint(endpoint),
+ }
+ opClient, err := lroauto.NewOperationsRESTClient(ctx, lroOpts...)
+ if err != nil {
+ return nil, err
+ }
+ c.LROClient = &opClient
+
+ return &InstanceAdminClient{internalClient: c, CallOptions: callOpts}, nil
+}
+
+func defaultInstanceAdminRESTClientOptions() []option.ClientOption {
+ return []option.ClientOption{
+ internaloption.WithDefaultEndpoint("https://spanner.googleapis.com"),
+ internaloption.WithDefaultEndpointTemplate("https://spanner.UNIVERSE_DOMAIN"),
+ internaloption.WithDefaultMTLSEndpoint("https://spanner.mtls.googleapis.com"),
+ internaloption.WithDefaultUniverseDomain("googleapis.com"),
+ internaloption.WithDefaultAudience("https://spanner.googleapis.com/"),
+ internaloption.WithDefaultScopes(DefaultAuthScopes()...),
+ }
+}
+
+// setGoogleClientInfo sets the name and version of the application in
+// the `x-goog-api-client` header passed on each request. Intended for
+// use by Google-written clients.
+func (c *instanceAdminRESTClient) setGoogleClientInfo(keyval ...string) {
+ kv := append([]string{"gl-go", gax.GoVersion}, keyval...)
+ kv = append(kv, "gapic", getVersionClient(), "gax", gax.Version, "rest", "UNKNOWN")
+ c.xGoogHeaders = []string{
+ "x-goog-api-client", gax.XGoogHeader(kv...),
+ }
+}
+
+// Close closes the connection to the API service. The user should invoke this when
+// the client is no longer required.
+func (c *instanceAdminRESTClient) Close() error {
+ // Replace httpClient with nil to force cleanup.
+ c.httpClient = nil
+ return nil
+}
+
+// Connection returns a connection to the API service.
+//
+// Deprecated: This method always returns nil.
+func (c *instanceAdminRESTClient) Connection() *grpc.ClientConn {
+ return nil
+}
+func (c *instanceAdminGRPCClient) ListInstanceConfigs(ctx context.Context, req *instancepb.ListInstanceConfigsRequest, opts ...gax.CallOption) *InstanceConfigIterator {
+ hds := []string{"x-goog-request-params", fmt.Sprintf("%s=%v", "parent", url.QueryEscape(req.GetParent()))}
+
+ hds = append(c.xGoogHeaders, hds...)
+ ctx = gax.InsertMetadataIntoOutgoingContext(ctx, hds...)
+ opts = append((*c.CallOptions).ListInstanceConfigs[0:len((*c.CallOptions).ListInstanceConfigs):len((*c.CallOptions).ListInstanceConfigs)], opts...)
+ it := &InstanceConfigIterator{}
+ req = proto.Clone(req).(*instancepb.ListInstanceConfigsRequest)
+ it.InternalFetch = func(pageSize int, pageToken string) ([]*instancepb.InstanceConfig, string, error) {
+ resp := &instancepb.ListInstanceConfigsResponse{}
+ if pageToken != "" {
+ req.PageToken = pageToken
+ }
+ if pageSize > math.MaxInt32 {
+ req.PageSize = math.MaxInt32
+ } else if pageSize != 0 {
+ req.PageSize = int32(pageSize)
+ }
+ err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
+ var err error
+ resp, err = c.instanceAdminClient.ListInstanceConfigs(ctx, req, settings.GRPC...)
+ return err
+ }, opts...)
+ if err != nil {
+ return nil, "", err
+ }
+
+ it.Response = resp
+ return resp.GetInstanceConfigs(), resp.GetNextPageToken(), nil
+ }
+ fetch := func(pageSize int, pageToken string) (string, error) {
+ items, nextPageToken, err := it.InternalFetch(pageSize, pageToken)
+ if err != nil {
+ return "", err
+ }
+ it.items = append(it.items, items...)
+ return nextPageToken, nil
+ }
+
+ it.pageInfo, it.nextFunc = iterator.NewPageInfo(fetch, it.bufLen, it.takeBuf)
+ it.pageInfo.MaxSize = int(req.GetPageSize())
+ it.pageInfo.Token = req.GetPageToken()
+
+ return it
+}
+
+func (c *instanceAdminGRPCClient) GetInstanceConfig(ctx context.Context, req *instancepb.GetInstanceConfigRequest, opts ...gax.CallOption) (*instancepb.InstanceConfig, error) {
+ hds := []string{"x-goog-request-params", fmt.Sprintf("%s=%v", "name", url.QueryEscape(req.GetName()))}
+
+ hds = append(c.xGoogHeaders, hds...)
+ ctx = gax.InsertMetadataIntoOutgoingContext(ctx, hds...)
+ opts = append((*c.CallOptions).GetInstanceConfig[0:len((*c.CallOptions).GetInstanceConfig):len((*c.CallOptions).GetInstanceConfig)], opts...)
+ var resp *instancepb.InstanceConfig
+ err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
+ var err error
+ resp, err = c.instanceAdminClient.GetInstanceConfig(ctx, req, settings.GRPC...)
+ return err
+ }, opts...)
+ if err != nil {
+ return nil, err
+ }
+ return resp, nil
+}
+
+func (c *instanceAdminGRPCClient) CreateInstanceConfig(ctx context.Context, req *instancepb.CreateInstanceConfigRequest, opts ...gax.CallOption) (*CreateInstanceConfigOperation, error) {
+ hds := []string{"x-goog-request-params", fmt.Sprintf("%s=%v", "parent", url.QueryEscape(req.GetParent()))}
+
+ hds = append(c.xGoogHeaders, hds...)
+ ctx = gax.InsertMetadataIntoOutgoingContext(ctx, hds...)
+ opts = append((*c.CallOptions).CreateInstanceConfig[0:len((*c.CallOptions).CreateInstanceConfig):len((*c.CallOptions).CreateInstanceConfig)], opts...)
+ var resp *longrunningpb.Operation
+ err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
+ var err error
+ resp, err = c.instanceAdminClient.CreateInstanceConfig(ctx, req, settings.GRPC...)
+ return err
+ }, opts...)
+ if err != nil {
+ return nil, err
+ }
+ return &CreateInstanceConfigOperation{
+ lro: longrunning.InternalNewOperation(*c.LROClient, resp),
+ }, nil
+}
+
+func (c *instanceAdminGRPCClient) UpdateInstanceConfig(ctx context.Context, req *instancepb.UpdateInstanceConfigRequest, opts ...gax.CallOption) (*UpdateInstanceConfigOperation, error) {
+ hds := []string{"x-goog-request-params", fmt.Sprintf("%s=%v", "instance_config.name", url.QueryEscape(req.GetInstanceConfig().GetName()))}
+
+ hds = append(c.xGoogHeaders, hds...)
+ ctx = gax.InsertMetadataIntoOutgoingContext(ctx, hds...)
+ opts = append((*c.CallOptions).UpdateInstanceConfig[0:len((*c.CallOptions).UpdateInstanceConfig):len((*c.CallOptions).UpdateInstanceConfig)], opts...)
+ var resp *longrunningpb.Operation
+ err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
+ var err error
+ resp, err = c.instanceAdminClient.UpdateInstanceConfig(ctx, req, settings.GRPC...)
+ return err
+ }, opts...)
+ if err != nil {
+ return nil, err
+ }
+ return &UpdateInstanceConfigOperation{
+ lro: longrunning.InternalNewOperation(*c.LROClient, resp),
+ }, nil
+}
+
+func (c *instanceAdminGRPCClient) DeleteInstanceConfig(ctx context.Context, req *instancepb.DeleteInstanceConfigRequest, opts ...gax.CallOption) error {
+ hds := []string{"x-goog-request-params", fmt.Sprintf("%s=%v", "name", url.QueryEscape(req.GetName()))}
+
+ hds = append(c.xGoogHeaders, hds...)
+ ctx = gax.InsertMetadataIntoOutgoingContext(ctx, hds...)
+ opts = append((*c.CallOptions).DeleteInstanceConfig[0:len((*c.CallOptions).DeleteInstanceConfig):len((*c.CallOptions).DeleteInstanceConfig)], opts...)
+ err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
+ var err error
+ _, err = c.instanceAdminClient.DeleteInstanceConfig(ctx, req, settings.GRPC...)
+ return err
+ }, opts...)
+ return err
+}
+
+func (c *instanceAdminGRPCClient) ListInstanceConfigOperations(ctx context.Context, req *instancepb.ListInstanceConfigOperationsRequest, opts ...gax.CallOption) *OperationIterator {
+ hds := []string{"x-goog-request-params", fmt.Sprintf("%s=%v", "parent", url.QueryEscape(req.GetParent()))}
+
+ hds = append(c.xGoogHeaders, hds...)
+ ctx = gax.InsertMetadataIntoOutgoingContext(ctx, hds...)
+ opts = append((*c.CallOptions).ListInstanceConfigOperations[0:len((*c.CallOptions).ListInstanceConfigOperations):len((*c.CallOptions).ListInstanceConfigOperations)], opts...)
+ it := &OperationIterator{}
+ req = proto.Clone(req).(*instancepb.ListInstanceConfigOperationsRequest)
+ it.InternalFetch = func(pageSize int, pageToken string) ([]*longrunningpb.Operation, string, error) {
+ resp := &instancepb.ListInstanceConfigOperationsResponse{}
+ if pageToken != "" {
+ req.PageToken = pageToken
+ }
+ if pageSize > math.MaxInt32 {
+ req.PageSize = math.MaxInt32
+ } else if pageSize != 0 {
+ req.PageSize = int32(pageSize)
+ }
+ err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
+ var err error
+ resp, err = c.instanceAdminClient.ListInstanceConfigOperations(ctx, req, settings.GRPC...)
+ return err
+ }, opts...)
+ if err != nil {
+ return nil, "", err
+ }
+
+ it.Response = resp
+ return resp.GetOperations(), resp.GetNextPageToken(), nil
+ }
+ fetch := func(pageSize int, pageToken string) (string, error) {
+ items, nextPageToken, err := it.InternalFetch(pageSize, pageToken)
+ if err != nil {
+ return "", err
+ }
+ it.items = append(it.items, items...)
+ return nextPageToken, nil
+ }
+
+ it.pageInfo, it.nextFunc = iterator.NewPageInfo(fetch, it.bufLen, it.takeBuf)
+ it.pageInfo.MaxSize = int(req.GetPageSize())
+ it.pageInfo.Token = req.GetPageToken()
+
+ return it
+}
+
+func (c *instanceAdminGRPCClient) ListInstances(ctx context.Context, req *instancepb.ListInstancesRequest, opts ...gax.CallOption) *InstanceIterator {
+ hds := []string{"x-goog-request-params", fmt.Sprintf("%s=%v", "parent", url.QueryEscape(req.GetParent()))}
+
+ hds = append(c.xGoogHeaders, hds...)
+ ctx = gax.InsertMetadataIntoOutgoingContext(ctx, hds...)
+ opts = append((*c.CallOptions).ListInstances[0:len((*c.CallOptions).ListInstances):len((*c.CallOptions).ListInstances)], opts...)
+ it := &InstanceIterator{}
+ req = proto.Clone(req).(*instancepb.ListInstancesRequest)
+ it.InternalFetch = func(pageSize int, pageToken string) ([]*instancepb.Instance, string, error) {
+ resp := &instancepb.ListInstancesResponse{}
+ if pageToken != "" {
+ req.PageToken = pageToken
+ }
+ if pageSize > math.MaxInt32 {
+ req.PageSize = math.MaxInt32
+ } else if pageSize != 0 {
+ req.PageSize = int32(pageSize)
+ }
+ err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
+ var err error
+ resp, err = c.instanceAdminClient.ListInstances(ctx, req, settings.GRPC...)
+ return err
+ }, opts...)
+ if err != nil {
+ return nil, "", err
+ }
+
+ it.Response = resp
+ return resp.GetInstances(), resp.GetNextPageToken(), nil
+ }
+ fetch := func(pageSize int, pageToken string) (string, error) {
+ items, nextPageToken, err := it.InternalFetch(pageSize, pageToken)
+ if err != nil {
+ return "", err
+ }
+ it.items = append(it.items, items...)
+ return nextPageToken, nil
+ }
+
+ it.pageInfo, it.nextFunc = iterator.NewPageInfo(fetch, it.bufLen, it.takeBuf)
+ it.pageInfo.MaxSize = int(req.GetPageSize())
+ it.pageInfo.Token = req.GetPageToken()
+
+ return it
+}
+
+func (c *instanceAdminGRPCClient) ListInstancePartitions(ctx context.Context, req *instancepb.ListInstancePartitionsRequest, opts ...gax.CallOption) *InstancePartitionIterator {
+ hds := []string{"x-goog-request-params", fmt.Sprintf("%s=%v", "parent", url.QueryEscape(req.GetParent()))}
+
+ hds = append(c.xGoogHeaders, hds...)
+ ctx = gax.InsertMetadataIntoOutgoingContext(ctx, hds...)
+ opts = append((*c.CallOptions).ListInstancePartitions[0:len((*c.CallOptions).ListInstancePartitions):len((*c.CallOptions).ListInstancePartitions)], opts...)
+ it := &InstancePartitionIterator{}
+ req = proto.Clone(req).(*instancepb.ListInstancePartitionsRequest)
+ it.InternalFetch = func(pageSize int, pageToken string) ([]*instancepb.InstancePartition, string, error) {
+ resp := &instancepb.ListInstancePartitionsResponse{}
+ if pageToken != "" {
+ req.PageToken = pageToken
+ }
+ if pageSize > math.MaxInt32 {
+ req.PageSize = math.MaxInt32
+ } else if pageSize != 0 {
+ req.PageSize = int32(pageSize)
+ }
+ err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
+ var err error
+ resp, err = c.instanceAdminClient.ListInstancePartitions(ctx, req, settings.GRPC...)
+ return err
+ }, opts...)
+ if err != nil {
+ return nil, "", err
+ }
+
+ it.Response = resp
+ return resp.GetInstancePartitions(), resp.GetNextPageToken(), nil
+ }
+ fetch := func(pageSize int, pageToken string) (string, error) {
+ items, nextPageToken, err := it.InternalFetch(pageSize, pageToken)
+ if err != nil {
+ return "", err
+ }
+ it.items = append(it.items, items...)
+ return nextPageToken, nil
+ }
+
+ it.pageInfo, it.nextFunc = iterator.NewPageInfo(fetch, it.bufLen, it.takeBuf)
+ it.pageInfo.MaxSize = int(req.GetPageSize())
+ it.pageInfo.Token = req.GetPageToken()
+
+ return it
+}
+
+func (c *instanceAdminGRPCClient) GetInstance(ctx context.Context, req *instancepb.GetInstanceRequest, opts ...gax.CallOption) (*instancepb.Instance, error) {
+ hds := []string{"x-goog-request-params", fmt.Sprintf("%s=%v", "name", url.QueryEscape(req.GetName()))}
+
+ hds = append(c.xGoogHeaders, hds...)
+ ctx = gax.InsertMetadataIntoOutgoingContext(ctx, hds...)
+ opts = append((*c.CallOptions).GetInstance[0:len((*c.CallOptions).GetInstance):len((*c.CallOptions).GetInstance)], opts...)
+ var resp *instancepb.Instance
+ err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
+ var err error
+ resp, err = c.instanceAdminClient.GetInstance(ctx, req, settings.GRPC...)
+ return err
+ }, opts...)
+ if err != nil {
+ return nil, err
+ }
+ return resp, nil
+}
+
+func (c *instanceAdminGRPCClient) CreateInstance(ctx context.Context, req *instancepb.CreateInstanceRequest, opts ...gax.CallOption) (*CreateInstanceOperation, error) {
+ hds := []string{"x-goog-request-params", fmt.Sprintf("%s=%v", "parent", url.QueryEscape(req.GetParent()))}
+
+ hds = append(c.xGoogHeaders, hds...)
+ ctx = gax.InsertMetadataIntoOutgoingContext(ctx, hds...)
+ opts = append((*c.CallOptions).CreateInstance[0:len((*c.CallOptions).CreateInstance):len((*c.CallOptions).CreateInstance)], opts...)
+ var resp *longrunningpb.Operation
+ err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
+ var err error
+ resp, err = c.instanceAdminClient.CreateInstance(ctx, req, settings.GRPC...)
+ return err
+ }, opts...)
+ if err != nil {
+ return nil, err
+ }
+ return &CreateInstanceOperation{
+ lro: longrunning.InternalNewOperation(*c.LROClient, resp),
+ }, nil
+}
+
+func (c *instanceAdminGRPCClient) UpdateInstance(ctx context.Context, req *instancepb.UpdateInstanceRequest, opts ...gax.CallOption) (*UpdateInstanceOperation, error) {
+ hds := []string{"x-goog-request-params", fmt.Sprintf("%s=%v", "instance.name", url.QueryEscape(req.GetInstance().GetName()))}
+
+ hds = append(c.xGoogHeaders, hds...)
+ ctx = gax.InsertMetadataIntoOutgoingContext(ctx, hds...)
+ opts = append((*c.CallOptions).UpdateInstance[0:len((*c.CallOptions).UpdateInstance):len((*c.CallOptions).UpdateInstance)], opts...)
+ var resp *longrunningpb.Operation
+ err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
+ var err error
+ resp, err = c.instanceAdminClient.UpdateInstance(ctx, req, settings.GRPC...)
+ return err
+ }, opts...)
+ if err != nil {
+ return nil, err
+ }
+ return &UpdateInstanceOperation{
+ lro: longrunning.InternalNewOperation(*c.LROClient, resp),
+ }, nil
+}
+
+func (c *instanceAdminGRPCClient) DeleteInstance(ctx context.Context, req *instancepb.DeleteInstanceRequest, opts ...gax.CallOption) error {
+ hds := []string{"x-goog-request-params", fmt.Sprintf("%s=%v", "name", url.QueryEscape(req.GetName()))}
+
+ hds = append(c.xGoogHeaders, hds...)
+ ctx = gax.InsertMetadataIntoOutgoingContext(ctx, hds...)
+ opts = append((*c.CallOptions).DeleteInstance[0:len((*c.CallOptions).DeleteInstance):len((*c.CallOptions).DeleteInstance)], opts...)
+ err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
+ var err error
+ _, err = c.instanceAdminClient.DeleteInstance(ctx, req, settings.GRPC...)
+ return err
+ }, opts...)
+ return err
+}
+
+func (c *instanceAdminGRPCClient) SetIamPolicy(ctx context.Context, req *iampb.SetIamPolicyRequest, opts ...gax.CallOption) (*iampb.Policy, error) {
+ hds := []string{"x-goog-request-params", fmt.Sprintf("%s=%v", "resource", url.QueryEscape(req.GetResource()))}
+
+ hds = append(c.xGoogHeaders, hds...)
+ ctx = gax.InsertMetadataIntoOutgoingContext(ctx, hds...)
+ opts = append((*c.CallOptions).SetIamPolicy[0:len((*c.CallOptions).SetIamPolicy):len((*c.CallOptions).SetIamPolicy)], opts...)
+ var resp *iampb.Policy
+ err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
+ var err error
+ resp, err = c.instanceAdminClient.SetIamPolicy(ctx, req, settings.GRPC...)
+ return err
+ }, opts...)
+ if err != nil {
+ return nil, err
+ }
+ return resp, nil
+}
+
+func (c *instanceAdminGRPCClient) GetIamPolicy(ctx context.Context, req *iampb.GetIamPolicyRequest, opts ...gax.CallOption) (*iampb.Policy, error) {
+ hds := []string{"x-goog-request-params", fmt.Sprintf("%s=%v", "resource", url.QueryEscape(req.GetResource()))}
+
+ hds = append(c.xGoogHeaders, hds...)
+ ctx = gax.InsertMetadataIntoOutgoingContext(ctx, hds...)
+ opts = append((*c.CallOptions).GetIamPolicy[0:len((*c.CallOptions).GetIamPolicy):len((*c.CallOptions).GetIamPolicy)], opts...)
+ var resp *iampb.Policy
+ err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
+ var err error
+ resp, err = c.instanceAdminClient.GetIamPolicy(ctx, req, settings.GRPC...)
+ return err
+ }, opts...)
+ if err != nil {
+ return nil, err
+ }
+ return resp, nil
+}
+
+func (c *instanceAdminGRPCClient) TestIamPermissions(ctx context.Context, req *iampb.TestIamPermissionsRequest, opts ...gax.CallOption) (*iampb.TestIamPermissionsResponse, error) {
+ hds := []string{"x-goog-request-params", fmt.Sprintf("%s=%v", "resource", url.QueryEscape(req.GetResource()))}
+
+ hds = append(c.xGoogHeaders, hds...)
+ ctx = gax.InsertMetadataIntoOutgoingContext(ctx, hds...)
+ opts = append((*c.CallOptions).TestIamPermissions[0:len((*c.CallOptions).TestIamPermissions):len((*c.CallOptions).TestIamPermissions)], opts...)
+ var resp *iampb.TestIamPermissionsResponse
+ err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
+ var err error
+ resp, err = c.instanceAdminClient.TestIamPermissions(ctx, req, settings.GRPC...)
+ return err
+ }, opts...)
+ if err != nil {
+ return nil, err
+ }
+ return resp, nil
+}
+
+func (c *instanceAdminGRPCClient) GetInstancePartition(ctx context.Context, req *instancepb.GetInstancePartitionRequest, opts ...gax.CallOption) (*instancepb.InstancePartition, error) {
+ hds := []string{"x-goog-request-params", fmt.Sprintf("%s=%v", "name", url.QueryEscape(req.GetName()))}
+
+ hds = append(c.xGoogHeaders, hds...)
+ ctx = gax.InsertMetadataIntoOutgoingContext(ctx, hds...)
+ opts = append((*c.CallOptions).GetInstancePartition[0:len((*c.CallOptions).GetInstancePartition):len((*c.CallOptions).GetInstancePartition)], opts...)
+ var resp *instancepb.InstancePartition
+ err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
+ var err error
+ resp, err = c.instanceAdminClient.GetInstancePartition(ctx, req, settings.GRPC...)
+ return err
+ }, opts...)
+ if err != nil {
+ return nil, err
+ }
+ return resp, nil
+}
+
+func (c *instanceAdminGRPCClient) CreateInstancePartition(ctx context.Context, req *instancepb.CreateInstancePartitionRequest, opts ...gax.CallOption) (*CreateInstancePartitionOperation, error) {
+ hds := []string{"x-goog-request-params", fmt.Sprintf("%s=%v", "parent", url.QueryEscape(req.GetParent()))}
+
+ hds = append(c.xGoogHeaders, hds...)
+ ctx = gax.InsertMetadataIntoOutgoingContext(ctx, hds...)
+ opts = append((*c.CallOptions).CreateInstancePartition[0:len((*c.CallOptions).CreateInstancePartition):len((*c.CallOptions).CreateInstancePartition)], opts...)
+ var resp *longrunningpb.Operation
+ err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
+ var err error
+ resp, err = c.instanceAdminClient.CreateInstancePartition(ctx, req, settings.GRPC...)
+ return err
+ }, opts...)
+ if err != nil {
+ return nil, err
+ }
+ return &CreateInstancePartitionOperation{
+ lro: longrunning.InternalNewOperation(*c.LROClient, resp),
+ }, nil
+}
+
+func (c *instanceAdminGRPCClient) DeleteInstancePartition(ctx context.Context, req *instancepb.DeleteInstancePartitionRequest, opts ...gax.CallOption) error {
+ hds := []string{"x-goog-request-params", fmt.Sprintf("%s=%v", "name", url.QueryEscape(req.GetName()))}
+
+ hds = append(c.xGoogHeaders, hds...)
+ ctx = gax.InsertMetadataIntoOutgoingContext(ctx, hds...)
+ opts = append((*c.CallOptions).DeleteInstancePartition[0:len((*c.CallOptions).DeleteInstancePartition):len((*c.CallOptions).DeleteInstancePartition)], opts...)
+ err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
+ var err error
+ _, err = c.instanceAdminClient.DeleteInstancePartition(ctx, req, settings.GRPC...)
+ return err
+ }, opts...)
+ return err
+}
+
+func (c *instanceAdminGRPCClient) UpdateInstancePartition(ctx context.Context, req *instancepb.UpdateInstancePartitionRequest, opts ...gax.CallOption) (*UpdateInstancePartitionOperation, error) {
+ hds := []string{"x-goog-request-params", fmt.Sprintf("%s=%v", "instance_partition.name", url.QueryEscape(req.GetInstancePartition().GetName()))}
+
+ hds = append(c.xGoogHeaders, hds...)
+ ctx = gax.InsertMetadataIntoOutgoingContext(ctx, hds...)
+ opts = append((*c.CallOptions).UpdateInstancePartition[0:len((*c.CallOptions).UpdateInstancePartition):len((*c.CallOptions).UpdateInstancePartition)], opts...)
+ var resp *longrunningpb.Operation
+ err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
+ var err error
+ resp, err = c.instanceAdminClient.UpdateInstancePartition(ctx, req, settings.GRPC...)
+ return err
+ }, opts...)
+ if err != nil {
+ return nil, err
+ }
+ return &UpdateInstancePartitionOperation{
+ lro: longrunning.InternalNewOperation(*c.LROClient, resp),
+ }, nil
+}
+
+func (c *instanceAdminGRPCClient) ListInstancePartitionOperations(ctx context.Context, req *instancepb.ListInstancePartitionOperationsRequest, opts ...gax.CallOption) *OperationIterator {
+ hds := []string{"x-goog-request-params", fmt.Sprintf("%s=%v", "parent", url.QueryEscape(req.GetParent()))}
+
+ hds = append(c.xGoogHeaders, hds...)
+ ctx = gax.InsertMetadataIntoOutgoingContext(ctx, hds...)
+ opts = append((*c.CallOptions).ListInstancePartitionOperations[0:len((*c.CallOptions).ListInstancePartitionOperations):len((*c.CallOptions).ListInstancePartitionOperations)], opts...)
+ it := &OperationIterator{}
+ req = proto.Clone(req).(*instancepb.ListInstancePartitionOperationsRequest)
+ it.InternalFetch = func(pageSize int, pageToken string) ([]*longrunningpb.Operation, string, error) {
+ resp := &instancepb.ListInstancePartitionOperationsResponse{}
+ if pageToken != "" {
+ req.PageToken = pageToken
+ }
+ if pageSize > math.MaxInt32 {
+ req.PageSize = math.MaxInt32
+ } else if pageSize != 0 {
+ req.PageSize = int32(pageSize)
+ }
+ err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
+ var err error
+ resp, err = c.instanceAdminClient.ListInstancePartitionOperations(ctx, req, settings.GRPC...)
+ return err
+ }, opts...)
+ if err != nil {
+ return nil, "", err
+ }
+
+ it.Response = resp
+ return resp.GetOperations(), resp.GetNextPageToken(), nil
+ }
+ fetch := func(pageSize int, pageToken string) (string, error) {
+ items, nextPageToken, err := it.InternalFetch(pageSize, pageToken)
+ if err != nil {
+ return "", err
+ }
+ it.items = append(it.items, items...)
+ return nextPageToken, nil
+ }
+
+ it.pageInfo, it.nextFunc = iterator.NewPageInfo(fetch, it.bufLen, it.takeBuf)
+ it.pageInfo.MaxSize = int(req.GetPageSize())
+ it.pageInfo.Token = req.GetPageToken()
+
+ return it
+}
+
+func (c *instanceAdminGRPCClient) MoveInstance(ctx context.Context, req *instancepb.MoveInstanceRequest, opts ...gax.CallOption) (*MoveInstanceOperation, error) {
+ hds := []string{"x-goog-request-params", fmt.Sprintf("%s=%v", "name", url.QueryEscape(req.GetName()))}
+
+ hds = append(c.xGoogHeaders, hds...)
+ ctx = gax.InsertMetadataIntoOutgoingContext(ctx, hds...)
+ opts = append((*c.CallOptions).MoveInstance[0:len((*c.CallOptions).MoveInstance):len((*c.CallOptions).MoveInstance)], opts...)
+ var resp *longrunningpb.Operation
+ err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
+ var err error
+ resp, err = c.instanceAdminClient.MoveInstance(ctx, req, settings.GRPC...)
+ return err
+ }, opts...)
+ if err != nil {
+ return nil, err
+ }
+ return &MoveInstanceOperation{
+ lro: longrunning.InternalNewOperation(*c.LROClient, resp),
+ }, nil
+}
+
+// ListInstanceConfigs lists the supported instance configurations for a given project.
+func (c *instanceAdminRESTClient) ListInstanceConfigs(ctx context.Context, req *instancepb.ListInstanceConfigsRequest, opts ...gax.CallOption) *InstanceConfigIterator {
+ it := &InstanceConfigIterator{}
+ req = proto.Clone(req).(*instancepb.ListInstanceConfigsRequest)
+ unm := protojson.UnmarshalOptions{AllowPartial: true, DiscardUnknown: true}
+ it.InternalFetch = func(pageSize int, pageToken string) ([]*instancepb.InstanceConfig, string, error) {
+ resp := &instancepb.ListInstanceConfigsResponse{}
+ if pageToken != "" {
+ req.PageToken = pageToken
+ }
+ if pageSize > math.MaxInt32 {
+ req.PageSize = math.MaxInt32
+ } else if pageSize != 0 {
+ req.PageSize = int32(pageSize)
+ }
+ baseUrl, err := url.Parse(c.endpoint)
+ if err != nil {
+ return nil, "", err
+ }
+ baseUrl.Path += fmt.Sprintf("/v1/%v/instanceConfigs", req.GetParent())
+
+ params := url.Values{}
+ params.Add("$alt", "json;enum-encoding=int")
+ if req.GetPageSize() != 0 {
+ params.Add("pageSize", fmt.Sprintf("%v", req.GetPageSize()))
+ }
+ if req.GetPageToken() != "" {
+ params.Add("pageToken", fmt.Sprintf("%v", req.GetPageToken()))
+ }
+
+ baseUrl.RawQuery = params.Encode()
+
+ // Build HTTP headers from client and context metadata.
+ hds := append(c.xGoogHeaders, "Content-Type", "application/json")
+ headers := gax.BuildHeaders(ctx, hds...)
+ e := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
+ if settings.Path != "" {
+ baseUrl.Path = settings.Path
+ }
+ httpReq, err := http.NewRequest("GET", baseUrl.String(), nil)
+ if err != nil {
+ return err
+ }
+ httpReq.Header = headers
+
+ httpRsp, err := c.httpClient.Do(httpReq)
+ if err != nil {
+ return err
+ }
+ defer httpRsp.Body.Close()
+
+ if err = googleapi.CheckResponse(httpRsp); err != nil {
+ return err
+ }
+
+ buf, err := io.ReadAll(httpRsp.Body)
+ if err != nil {
+ return err
+ }
+
+ if err := unm.Unmarshal(buf, resp); err != nil {
+ return err
+ }
+
+ return nil
+ }, opts...)
+ if e != nil {
+ return nil, "", e
+ }
+ it.Response = resp
+ return resp.GetInstanceConfigs(), resp.GetNextPageToken(), nil
+ }
+
+ fetch := func(pageSize int, pageToken string) (string, error) {
+ items, nextPageToken, err := it.InternalFetch(pageSize, pageToken)
+ if err != nil {
+ return "", err
+ }
+ it.items = append(it.items, items...)
+ return nextPageToken, nil
+ }
+
+ it.pageInfo, it.nextFunc = iterator.NewPageInfo(fetch, it.bufLen, it.takeBuf)
+ it.pageInfo.MaxSize = int(req.GetPageSize())
+ it.pageInfo.Token = req.GetPageToken()
+
+ return it
+}
+
+// GetInstanceConfig gets information about a particular instance configuration.
+func (c *instanceAdminRESTClient) GetInstanceConfig(ctx context.Context, req *instancepb.GetInstanceConfigRequest, opts ...gax.CallOption) (*instancepb.InstanceConfig, error) {
+ baseUrl, err := url.Parse(c.endpoint)
+ if err != nil {
+ return nil, err
+ }
+ baseUrl.Path += fmt.Sprintf("/v1/%v", req.GetName())
+
+ params := url.Values{}
+ params.Add("$alt", "json;enum-encoding=int")
+
+ baseUrl.RawQuery = params.Encode()
+
+ // Build HTTP headers from client and context metadata.
+ hds := []string{"x-goog-request-params", fmt.Sprintf("%s=%v", "name", url.QueryEscape(req.GetName()))}
+
+ hds = append(c.xGoogHeaders, hds...)
+ hds = append(hds, "Content-Type", "application/json")
+ headers := gax.BuildHeaders(ctx, hds...)
+ opts = append((*c.CallOptions).GetInstanceConfig[0:len((*c.CallOptions).GetInstanceConfig):len((*c.CallOptions).GetInstanceConfig)], opts...)
+ unm := protojson.UnmarshalOptions{AllowPartial: true, DiscardUnknown: true}
+ resp := &instancepb.InstanceConfig{}
+ e := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
+ if settings.Path != "" {
+ baseUrl.Path = settings.Path
+ }
+ httpReq, err := http.NewRequest("GET", baseUrl.String(), nil)
+ if err != nil {
+ return err
+ }
+ httpReq = httpReq.WithContext(ctx)
+ httpReq.Header = headers
+
+ httpRsp, err := c.httpClient.Do(httpReq)
+ if err != nil {
+ return err
+ }
+ defer httpRsp.Body.Close()
+
+ if err = googleapi.CheckResponse(httpRsp); err != nil {
+ return err
+ }
+
+ buf, err := io.ReadAll(httpRsp.Body)
+ if err != nil {
+ return err
+ }
+
+ if err := unm.Unmarshal(buf, resp); err != nil {
+ return err
+ }
+
+ return nil
+ }, opts...)
+ if e != nil {
+ return nil, e
+ }
+ return resp, nil
+}
+
+// CreateInstanceConfig creates an instance configuration and begins preparing it to be used. The
+// returned [long-running operation][google.longrunning.Operation]
+// can be used to track the progress of preparing the new
+// instance configuration. The instance configuration name is assigned by the
+// caller. If the named instance configuration already exists,
+// CreateInstanceConfig returns ALREADY_EXISTS.
+//
+// Immediately after the request returns:
+//
+// The instance configuration is readable via the API, with all requested
+// attributes. The instance configuration’s
+// reconciling
+// field is set to true. Its state is CREATING.
+//
+// While the operation is pending:
+//
+// Cancelling the operation renders the instance configuration immediately
+// unreadable via the API.
+//
+// Except for deleting the creating resource, all other attempts to modify
+// the instance configuration are rejected.
+//
+// Upon completion of the returned operation:
+//
+// Instances can be created using the instance configuration.
+//
+// The instance configuration’s
+// reconciling
+// field becomes false. Its state becomes READY.
+//
+// The returned [long-running operation][google.longrunning.Operation] will
+// have a name of the format
+// <instance_config_name>/operations/<operation_id> and can be used to track
+// creation of the instance configuration. The
+// metadata field type is
+// CreateInstanceConfigMetadata.
+// The response field type is
+// InstanceConfig, if
+// successful.
+//
+// Authorization requires spanner.instanceConfigs.create permission on
+// the resource
+// parent.
+func (c *instanceAdminRESTClient) CreateInstanceConfig(ctx context.Context, req *instancepb.CreateInstanceConfigRequest, opts ...gax.CallOption) (*CreateInstanceConfigOperation, error) {
+ m := protojson.MarshalOptions{AllowPartial: true, UseEnumNumbers: true}
+ jsonReq, err := m.Marshal(req)
+ if err != nil {
+ return nil, err
+ }
+
+ baseUrl, err := url.Parse(c.endpoint)
+ if err != nil {
+ return nil, err
+ }
+ baseUrl.Path += fmt.Sprintf("/v1/%v/instanceConfigs", req.GetParent())
+
+ params := url.Values{}
+ params.Add("$alt", "json;enum-encoding=int")
+
+ baseUrl.RawQuery = params.Encode()
+
+ // Build HTTP headers from client and context metadata.
+ hds := []string{"x-goog-request-params", fmt.Sprintf("%s=%v", "parent", url.QueryEscape(req.GetParent()))}
+
+ hds = append(c.xGoogHeaders, hds...)
+ hds = append(hds, "Content-Type", "application/json")
+ headers := gax.BuildHeaders(ctx, hds...)
+ unm := protojson.UnmarshalOptions{AllowPartial: true, DiscardUnknown: true}
+ resp := &longrunningpb.Operation{}
+ e := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
+ if settings.Path != "" {
+ baseUrl.Path = settings.Path
+ }
+ httpReq, err := http.NewRequest("POST", baseUrl.String(), bytes.NewReader(jsonReq))
+ if err != nil {
+ return err
+ }
+ httpReq = httpReq.WithContext(ctx)
+ httpReq.Header = headers
+
+ httpRsp, err := c.httpClient.Do(httpReq)
+ if err != nil {
+ return err
+ }
+ defer httpRsp.Body.Close()
+
+ if err = googleapi.CheckResponse(httpRsp); err != nil {
+ return err
+ }
+
+ buf, err := io.ReadAll(httpRsp.Body)
+ if err != nil {
+ return err
+ }
+
+ if err := unm.Unmarshal(buf, resp); err != nil {
+ return err
+ }
+
+ return nil
+ }, opts...)
+ if e != nil {
+ return nil, e
+ }
+
+ override := fmt.Sprintf("/v1/%s", resp.GetName())
+ return &CreateInstanceConfigOperation{
+ lro: longrunning.InternalNewOperation(*c.LROClient, resp),
+ pollPath: override,
+ }, nil
+}
+
+// UpdateInstanceConfig updates an instance configuration. The returned
+// [long-running operation][google.longrunning.Operation] can be used to track
+// the progress of updating the instance. If the named instance configuration
+// does not exist, returns NOT_FOUND.
+//
+// Only user-managed configurations can be updated.
+//
+// Immediately after the request returns:
+//
+// The instance configuration’s
+// reconciling
+// field is set to true.
+//
+// While the operation is pending:
+//
+// Cancelling the operation sets its metadata’s
+// cancel_time.
+// The operation is guaranteed to succeed at undoing all changes, after
+// which point it terminates with a CANCELLED status.
+//
+// All other attempts to modify the instance configuration are rejected.
+//
+// Reading the instance configuration via the API continues to give the
+// pre-request values.
+//
+// Upon completion of the returned operation:
+//
+// Creating instances using the instance configuration uses the new
+// values.
+//
+// The new values of the instance configuration are readable via the API.
+//
+// The instance configuration’s
+// reconciling
+// field becomes false.
+//
+// The returned [long-running operation][google.longrunning.Operation] will
+// have a name of the format
+// <instance_config_name>/operations/<operation_id> and can be used to track
+// the instance configuration modification. The
+// metadata field type is
+// UpdateInstanceConfigMetadata.
+// The response field type is
+// InstanceConfig, if
+// successful.
+//
+// Authorization requires spanner.instanceConfigs.update permission on
+// the resource [name][google.spanner.admin.instance.v1.InstanceConfig.name (at http://google.spanner.admin.instance.v1.InstanceConfig.name)].
+func (c *instanceAdminRESTClient) UpdateInstanceConfig(ctx context.Context, req *instancepb.UpdateInstanceConfigRequest, opts ...gax.CallOption) (*UpdateInstanceConfigOperation, error) {
+ m := protojson.MarshalOptions{AllowPartial: true, UseEnumNumbers: true}
+ jsonReq, err := m.Marshal(req)
+ if err != nil {
+ return nil, err
+ }
+
+ baseUrl, err := url.Parse(c.endpoint)
+ if err != nil {
+ return nil, err
+ }
+ baseUrl.Path += fmt.Sprintf("/v1/%v", req.GetInstanceConfig().GetName())
+
+ params := url.Values{}
+ params.Add("$alt", "json;enum-encoding=int")
+
+ baseUrl.RawQuery = params.Encode()
+
+ // Build HTTP headers from client and context metadata.
+ hds := []string{"x-goog-request-params", fmt.Sprintf("%s=%v", "instance_config.name", url.QueryEscape(req.GetInstanceConfig().GetName()))}
+
+ hds = append(c.xGoogHeaders, hds...)
+ hds = append(hds, "Content-Type", "application/json")
+ headers := gax.BuildHeaders(ctx, hds...)
+ unm := protojson.UnmarshalOptions{AllowPartial: true, DiscardUnknown: true}
+ resp := &longrunningpb.Operation{}
+ e := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
+ if settings.Path != "" {
+ baseUrl.Path = settings.Path
+ }
+ httpReq, err := http.NewRequest("PATCH", baseUrl.String(), bytes.NewReader(jsonReq))
+ if err != nil {
+ return err
+ }
+ httpReq = httpReq.WithContext(ctx)
+ httpReq.Header = headers
+
+ httpRsp, err := c.httpClient.Do(httpReq)
+ if err != nil {
+ return err
+ }
+ defer httpRsp.Body.Close()
+
+ if err = googleapi.CheckResponse(httpRsp); err != nil {
+ return err
+ }
+
+ buf, err := io.ReadAll(httpRsp.Body)
+ if err != nil {
+ return err
+ }
+
+ if err := unm.Unmarshal(buf, resp); err != nil {
+ return err
+ }
+
+ return nil
+ }, opts...)
+ if e != nil {
+ return nil, e
+ }
+
+ override := fmt.Sprintf("/v1/%s", resp.GetName())
+ return &UpdateInstanceConfigOperation{
+ lro: longrunning.InternalNewOperation(*c.LROClient, resp),
+ pollPath: override,
+ }, nil
+}
+
+// DeleteInstanceConfig deletes the instance configuration. Deletion is only allowed when no
+// instances are using the configuration. If any instances are using
+// the configuration, returns FAILED_PRECONDITION.
+//
+// Only user-managed configurations can be deleted.
+//
+// Authorization requires spanner.instanceConfigs.delete permission on
+// the resource [name][google.spanner.admin.instance.v1.InstanceConfig.name (at http://google.spanner.admin.instance.v1.InstanceConfig.name)].
+func (c *instanceAdminRESTClient) DeleteInstanceConfig(ctx context.Context, req *instancepb.DeleteInstanceConfigRequest, opts ...gax.CallOption) error {
+ baseUrl, err := url.Parse(c.endpoint)
+ if err != nil {
+ return err
+ }
+ baseUrl.Path += fmt.Sprintf("/v1/%v", req.GetName())
+
+ params := url.Values{}
+ params.Add("$alt", "json;enum-encoding=int")
+ if req.GetEtag() != "" {
+ params.Add("etag", fmt.Sprintf("%v", req.GetEtag()))
+ }
+ if req.GetValidateOnly() {
+ params.Add("validateOnly", fmt.Sprintf("%v", req.GetValidateOnly()))
+ }
+
+ baseUrl.RawQuery = params.Encode()
+
+ // Build HTTP headers from client and context metadata.
+ hds := []string{"x-goog-request-params", fmt.Sprintf("%s=%v", "name", url.QueryEscape(req.GetName()))}
+
+ hds = append(c.xGoogHeaders, hds...)
+ hds = append(hds, "Content-Type", "application/json")
+ headers := gax.BuildHeaders(ctx, hds...)
+ return gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
+ if settings.Path != "" {
+ baseUrl.Path = settings.Path
+ }
+ httpReq, err := http.NewRequest("DELETE", baseUrl.String(), nil)
+ if err != nil {
+ return err
+ }
+ httpReq = httpReq.WithContext(ctx)
+ httpReq.Header = headers
+
+ httpRsp, err := c.httpClient.Do(httpReq)
+ if err != nil {
+ return err
+ }
+ defer httpRsp.Body.Close()
+
+ // Returns nil if there is no error, otherwise wraps
+ // the response code and body into a non-nil error
+ return googleapi.CheckResponse(httpRsp)
+ }, opts...)
+}
+
+// ListInstanceConfigOperations lists the user-managed instance configuration [long-running
+// operations][google.longrunning.Operation] in the given project. An instance
+// configuration operation has a name of the form
+// projects/<project>/instanceConfigs/<instance_config>/operations/<operation>.
+// The long-running operation
+// metadata field type
+// metadata.type_url describes the type of the metadata. Operations returned
+// include those that have completed/failed/canceled within the last 7 days,
+// and pending operations. Operations returned are ordered by
+// operation.metadata.value.start_time in descending order starting
+// from the most recently started operation.
+func (c *instanceAdminRESTClient) ListInstanceConfigOperations(ctx context.Context, req *instancepb.ListInstanceConfigOperationsRequest, opts ...gax.CallOption) *OperationIterator {
+ it := &OperationIterator{}
+ req = proto.Clone(req).(*instancepb.ListInstanceConfigOperationsRequest)
+ unm := protojson.UnmarshalOptions{AllowPartial: true, DiscardUnknown: true}
+ it.InternalFetch = func(pageSize int, pageToken string) ([]*longrunningpb.Operation, string, error) {
+ resp := &instancepb.ListInstanceConfigOperationsResponse{}
+ if pageToken != "" {
+ req.PageToken = pageToken
+ }
+ if pageSize > math.MaxInt32 {
+ req.PageSize = math.MaxInt32
+ } else if pageSize != 0 {
+ req.PageSize = int32(pageSize)
+ }
+ baseUrl, err := url.Parse(c.endpoint)
+ if err != nil {
+ return nil, "", err
+ }
+ baseUrl.Path += fmt.Sprintf("/v1/%v/instanceConfigOperations", req.GetParent())
+
+ params := url.Values{}
+ params.Add("$alt", "json;enum-encoding=int")
+ if req.GetFilter() != "" {
+ params.Add("filter", fmt.Sprintf("%v", req.GetFilter()))
+ }
+ if req.GetPageSize() != 0 {
+ params.Add("pageSize", fmt.Sprintf("%v", req.GetPageSize()))
+ }
+ if req.GetPageToken() != "" {
+ params.Add("pageToken", fmt.Sprintf("%v", req.GetPageToken()))
+ }
+
+ baseUrl.RawQuery = params.Encode()
+
+ // Build HTTP headers from client and context metadata.
+ hds := append(c.xGoogHeaders, "Content-Type", "application/json")
+ headers := gax.BuildHeaders(ctx, hds...)
+ e := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
+ if settings.Path != "" {
+ baseUrl.Path = settings.Path
+ }
+ httpReq, err := http.NewRequest("GET", baseUrl.String(), nil)
+ if err != nil {
+ return err
+ }
+ httpReq.Header = headers
+
+ httpRsp, err := c.httpClient.Do(httpReq)
+ if err != nil {
+ return err
+ }
+ defer httpRsp.Body.Close()
+
+ if err = googleapi.CheckResponse(httpRsp); err != nil {
+ return err
+ }
+
+ buf, err := io.ReadAll(httpRsp.Body)
+ if err != nil {
+ return err
+ }
+
+ if err := unm.Unmarshal(buf, resp); err != nil {
+ return err
+ }
+
+ return nil
+ }, opts...)
+ if e != nil {
+ return nil, "", e
+ }
+ it.Response = resp
+ return resp.GetOperations(), resp.GetNextPageToken(), nil
+ }
+
+ fetch := func(pageSize int, pageToken string) (string, error) {
+ items, nextPageToken, err := it.InternalFetch(pageSize, pageToken)
+ if err != nil {
+ return "", err
+ }
+ it.items = append(it.items, items...)
+ return nextPageToken, nil
+ }
+
+ it.pageInfo, it.nextFunc = iterator.NewPageInfo(fetch, it.bufLen, it.takeBuf)
+ it.pageInfo.MaxSize = int(req.GetPageSize())
+ it.pageInfo.Token = req.GetPageToken()
+
+ return it
+}
+
+// ListInstances lists all instances in the given project.
+func (c *instanceAdminRESTClient) ListInstances(ctx context.Context, req *instancepb.ListInstancesRequest, opts ...gax.CallOption) *InstanceIterator {
+ it := &InstanceIterator{}
+ req = proto.Clone(req).(*instancepb.ListInstancesRequest)
+ unm := protojson.UnmarshalOptions{AllowPartial: true, DiscardUnknown: true}
+ it.InternalFetch = func(pageSize int, pageToken string) ([]*instancepb.Instance, string, error) {
+ resp := &instancepb.ListInstancesResponse{}
+ if pageToken != "" {
+ req.PageToken = pageToken
+ }
+ if pageSize > math.MaxInt32 {
+ req.PageSize = math.MaxInt32
+ } else if pageSize != 0 {
+ req.PageSize = int32(pageSize)
+ }
+ baseUrl, err := url.Parse(c.endpoint)
+ if err != nil {
+ return nil, "", err
+ }
+ baseUrl.Path += fmt.Sprintf("/v1/%v/instances", req.GetParent())
+
+ params := url.Values{}
+ params.Add("$alt", "json;enum-encoding=int")
+ if req.GetFilter() != "" {
+ params.Add("filter", fmt.Sprintf("%v", req.GetFilter()))
+ }
+ if req.GetInstanceDeadline() != nil {
+ instanceDeadline, err := protojson.Marshal(req.GetInstanceDeadline())
+ if err != nil {
+ return nil, "", err
+ }
+ params.Add("instanceDeadline", string(instanceDeadline[1:len(instanceDeadline)-1]))
+ }
+ if req.GetPageSize() != 0 {
+ params.Add("pageSize", fmt.Sprintf("%v", req.GetPageSize()))
+ }
+ if req.GetPageToken() != "" {
+ params.Add("pageToken", fmt.Sprintf("%v", req.GetPageToken()))
+ }
+
+ baseUrl.RawQuery = params.Encode()
+
+ // Build HTTP headers from client and context metadata.
+ hds := append(c.xGoogHeaders, "Content-Type", "application/json")
+ headers := gax.BuildHeaders(ctx, hds...)
+ e := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
+ if settings.Path != "" {
+ baseUrl.Path = settings.Path
+ }
+ httpReq, err := http.NewRequest("GET", baseUrl.String(), nil)
+ if err != nil {
+ return err
+ }
+ httpReq.Header = headers
+
+ httpRsp, err := c.httpClient.Do(httpReq)
+ if err != nil {
+ return err
+ }
+ defer httpRsp.Body.Close()
+
+ if err = googleapi.CheckResponse(httpRsp); err != nil {
+ return err
+ }
+
+ buf, err := io.ReadAll(httpRsp.Body)
+ if err != nil {
+ return err
+ }
+
+ if err := unm.Unmarshal(buf, resp); err != nil {
+ return err
+ }
+
+ return nil
+ }, opts...)
+ if e != nil {
+ return nil, "", e
+ }
+ it.Response = resp
+ return resp.GetInstances(), resp.GetNextPageToken(), nil
+ }
+
+ fetch := func(pageSize int, pageToken string) (string, error) {
+ items, nextPageToken, err := it.InternalFetch(pageSize, pageToken)
+ if err != nil {
+ return "", err
+ }
+ it.items = append(it.items, items...)
+ return nextPageToken, nil
+ }
+
+ it.pageInfo, it.nextFunc = iterator.NewPageInfo(fetch, it.bufLen, it.takeBuf)
+ it.pageInfo.MaxSize = int(req.GetPageSize())
+ it.pageInfo.Token = req.GetPageToken()
+
+ return it
+}
+
+// ListInstancePartitions lists all instance partitions for the given instance.
+func (c *instanceAdminRESTClient) ListInstancePartitions(ctx context.Context, req *instancepb.ListInstancePartitionsRequest, opts ...gax.CallOption) *InstancePartitionIterator {
+ it := &InstancePartitionIterator{}
+ req = proto.Clone(req).(*instancepb.ListInstancePartitionsRequest)
+ unm := protojson.UnmarshalOptions{AllowPartial: true, DiscardUnknown: true}
+ it.InternalFetch = func(pageSize int, pageToken string) ([]*instancepb.InstancePartition, string, error) {
+ resp := &instancepb.ListInstancePartitionsResponse{}
+ if pageToken != "" {
+ req.PageToken = pageToken
+ }
+ if pageSize > math.MaxInt32 {
+ req.PageSize = math.MaxInt32
+ } else if pageSize != 0 {
+ req.PageSize = int32(pageSize)
+ }
+ baseUrl, err := url.Parse(c.endpoint)
+ if err != nil {
+ return nil, "", err
+ }
+ baseUrl.Path += fmt.Sprintf("/v1/%v/instancePartitions", req.GetParent())
+
+ params := url.Values{}
+ params.Add("$alt", "json;enum-encoding=int")
+ if req.GetInstancePartitionDeadline() != nil {
+ instancePartitionDeadline, err := protojson.Marshal(req.GetInstancePartitionDeadline())
+ if err != nil {
+ return nil, "", err
+ }
+ params.Add("instancePartitionDeadline", string(instancePartitionDeadline[1:len(instancePartitionDeadline)-1]))
+ }
+ if req.GetPageSize() != 0 {
+ params.Add("pageSize", fmt.Sprintf("%v", req.GetPageSize()))
+ }
+ if req.GetPageToken() != "" {
+ params.Add("pageToken", fmt.Sprintf("%v", req.GetPageToken()))
+ }
+
+ baseUrl.RawQuery = params.Encode()
+
+ // Build HTTP headers from client and context metadata.
+ hds := append(c.xGoogHeaders, "Content-Type", "application/json")
+ headers := gax.BuildHeaders(ctx, hds...)
+ e := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
+ if settings.Path != "" {
+ baseUrl.Path = settings.Path
+ }
+ httpReq, err := http.NewRequest("GET", baseUrl.String(), nil)
+ if err != nil {
+ return err
+ }
+ httpReq.Header = headers
+
+ httpRsp, err := c.httpClient.Do(httpReq)
+ if err != nil {
+ return err
+ }
+ defer httpRsp.Body.Close()
+
+ if err = googleapi.CheckResponse(httpRsp); err != nil {
+ return err
+ }
+
+ buf, err := io.ReadAll(httpRsp.Body)
+ if err != nil {
+ return err
+ }
+
+ if err := unm.Unmarshal(buf, resp); err != nil {
+ return err
+ }
+
+ return nil
+ }, opts...)
+ if e != nil {
+ return nil, "", e
+ }
+ it.Response = resp
+ return resp.GetInstancePartitions(), resp.GetNextPageToken(), nil
+ }
+
+ fetch := func(pageSize int, pageToken string) (string, error) {
+ items, nextPageToken, err := it.InternalFetch(pageSize, pageToken)
+ if err != nil {
+ return "", err
+ }
+ it.items = append(it.items, items...)
+ return nextPageToken, nil
+ }
+
+ it.pageInfo, it.nextFunc = iterator.NewPageInfo(fetch, it.bufLen, it.takeBuf)
+ it.pageInfo.MaxSize = int(req.GetPageSize())
+ it.pageInfo.Token = req.GetPageToken()
+
+ return it
+}
+
+// GetInstance gets information about a particular instance.
+func (c *instanceAdminRESTClient) GetInstance(ctx context.Context, req *instancepb.GetInstanceRequest, opts ...gax.CallOption) (*instancepb.Instance, error) {
+ baseUrl, err := url.Parse(c.endpoint)
+ if err != nil {
+ return nil, err
+ }
+ baseUrl.Path += fmt.Sprintf("/v1/%v", req.GetName())
+
+ params := url.Values{}
+ params.Add("$alt", "json;enum-encoding=int")
+ if req.GetFieldMask() != nil {
+ fieldMask, err := protojson.Marshal(req.GetFieldMask())
+ if err != nil {
+ return nil, err
+ }
+ params.Add("fieldMask", string(fieldMask[1:len(fieldMask)-1]))
+ }
+
+ baseUrl.RawQuery = params.Encode()
+
+ // Build HTTP headers from client and context metadata.
+ hds := []string{"x-goog-request-params", fmt.Sprintf("%s=%v", "name", url.QueryEscape(req.GetName()))}
+
+ hds = append(c.xGoogHeaders, hds...)
+ hds = append(hds, "Content-Type", "application/json")
+ headers := gax.BuildHeaders(ctx, hds...)
+ opts = append((*c.CallOptions).GetInstance[0:len((*c.CallOptions).GetInstance):len((*c.CallOptions).GetInstance)], opts...)
+ unm := protojson.UnmarshalOptions{AllowPartial: true, DiscardUnknown: true}
+ resp := &instancepb.Instance{}
+ e := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
+ if settings.Path != "" {
+ baseUrl.Path = settings.Path
+ }
+ httpReq, err := http.NewRequest("GET", baseUrl.String(), nil)
+ if err != nil {
+ return err
+ }
+ httpReq = httpReq.WithContext(ctx)
+ httpReq.Header = headers
+
+ httpRsp, err := c.httpClient.Do(httpReq)
+ if err != nil {
+ return err
+ }
+ defer httpRsp.Body.Close()
+
+ if err = googleapi.CheckResponse(httpRsp); err != nil {
+ return err
+ }
+
+ buf, err := io.ReadAll(httpRsp.Body)
+ if err != nil {
+ return err
+ }
+
+ if err := unm.Unmarshal(buf, resp); err != nil {
+ return err
+ }
+
+ return nil
+ }, opts...)
+ if e != nil {
+ return nil, e
+ }
+ return resp, nil
+}
+
+// CreateInstance creates an instance and begins preparing it to begin serving. The
+// returned [long-running operation][google.longrunning.Operation]
+// can be used to track the progress of preparing the new
+// instance. The instance name is assigned by the caller. If the
+// named instance already exists, CreateInstance returns
+// ALREADY_EXISTS.
+//
+// Immediately upon completion of this request:
+//
+// The instance is readable via the API, with all requested attributes
+// but no allocated resources. Its state is CREATING.
+//
+// Until completion of the returned operation:
+//
+// Cancelling the operation renders the instance immediately unreadable
+// via the API.
+//
+// The instance can be deleted.
+//
+// All other attempts to modify the instance are rejected.
+//
+// Upon completion of the returned operation:
+//
+// Billing for all successfully-allocated resources begins (some types
+// may have lower than the requested levels).
+//
+// Databases can be created in the instance.
+//
+// The instance’s allocated resource levels are readable via the API.
+//
+// The instance’s state becomes READY.
+//
+// The returned [long-running operation][google.longrunning.Operation] will
+// have a name of the format <instance_name>/operations/<operation_id> and
+// can be used to track creation of the instance. The
+// metadata field type is
+// CreateInstanceMetadata.
+// The response field type is
+// Instance, if successful.
+func (c *instanceAdminRESTClient) CreateInstance(ctx context.Context, req *instancepb.CreateInstanceRequest, opts ...gax.CallOption) (*CreateInstanceOperation, error) {
+ m := protojson.MarshalOptions{AllowPartial: true, UseEnumNumbers: true}
+ jsonReq, err := m.Marshal(req)
+ if err != nil {
+ return nil, err
+ }
+
+ baseUrl, err := url.Parse(c.endpoint)
+ if err != nil {
+ return nil, err
+ }
+ baseUrl.Path += fmt.Sprintf("/v1/%v/instances", req.GetParent())
+
+ params := url.Values{}
+ params.Add("$alt", "json;enum-encoding=int")
+
+ baseUrl.RawQuery = params.Encode()
+
+ // Build HTTP headers from client and context metadata.
+ hds := []string{"x-goog-request-params", fmt.Sprintf("%s=%v", "parent", url.QueryEscape(req.GetParent()))}
+
+ hds = append(c.xGoogHeaders, hds...)
+ hds = append(hds, "Content-Type", "application/json")
+ headers := gax.BuildHeaders(ctx, hds...)
+ unm := protojson.UnmarshalOptions{AllowPartial: true, DiscardUnknown: true}
+ resp := &longrunningpb.Operation{}
+ e := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
+ if settings.Path != "" {
+ baseUrl.Path = settings.Path
+ }
+ httpReq, err := http.NewRequest("POST", baseUrl.String(), bytes.NewReader(jsonReq))
+ if err != nil {
+ return err
+ }
+ httpReq = httpReq.WithContext(ctx)
+ httpReq.Header = headers
+
+ httpRsp, err := c.httpClient.Do(httpReq)
+ if err != nil {
+ return err
+ }
+ defer httpRsp.Body.Close()
+
+ if err = googleapi.CheckResponse(httpRsp); err != nil {
+ return err
+ }
+
+ buf, err := io.ReadAll(httpRsp.Body)
+ if err != nil {
+ return err
+ }
+
+ if err := unm.Unmarshal(buf, resp); err != nil {
+ return err
+ }
+
+ return nil
+ }, opts...)
+ if e != nil {
+ return nil, e
+ }
+
+ override := fmt.Sprintf("/v1/%s", resp.GetName())
+ return &CreateInstanceOperation{
+ lro: longrunning.InternalNewOperation(*c.LROClient, resp),
+ pollPath: override,
+ }, nil
+}
+
+// UpdateInstance updates an instance, and begins allocating or releasing resources
+// as requested. The returned [long-running
+// operation][google.longrunning.Operation] can be used to track the
+// progress of updating the instance. If the named instance does not
+// exist, returns NOT_FOUND.
+//
+// Immediately upon completion of this request:
+//
+// For resource types for which a decrease in the instance’s allocation
+// has been requested, billing is based on the newly-requested level.
+//
+// Until completion of the returned operation:
+//
+// Cancelling the operation sets its metadata’s
+// cancel_time,
+// and begins restoring resources to their pre-request values. The
+// operation is guaranteed to succeed at undoing all resource changes,
+// after which point it terminates with a CANCELLED status.
+//
+// All other attempts to modify the instance are rejected.
+//
+// Reading the instance via the API continues to give the pre-request
+// resource levels.
+//
+// Upon completion of the returned operation:
+//
+// Billing begins for all successfully-allocated resources (some types
+// may have lower than the requested levels).
+//
+// All newly-reserved resources are available for serving the instance’s
+// tables.
+//
+// The instance’s new resource levels are readable via the API.
+//
+// The returned [long-running operation][google.longrunning.Operation] will
+// have a name of the format <instance_name>/operations/<operation_id> and
+// can be used to track the instance modification. The
+// metadata field type is
+// UpdateInstanceMetadata.
+// The response field type is
+// Instance, if successful.
+//
+// Authorization requires spanner.instances.update permission on
+// the resource [name][google.spanner.admin.instance.v1.Instance.name (at http://google.spanner.admin.instance.v1.Instance.name)].
+func (c *instanceAdminRESTClient) UpdateInstance(ctx context.Context, req *instancepb.UpdateInstanceRequest, opts ...gax.CallOption) (*UpdateInstanceOperation, error) {
+ m := protojson.MarshalOptions{AllowPartial: true, UseEnumNumbers: true}
+ jsonReq, err := m.Marshal(req)
+ if err != nil {
+ return nil, err
+ }
+
+ baseUrl, err := url.Parse(c.endpoint)
+ if err != nil {
+ return nil, err
+ }
+ baseUrl.Path += fmt.Sprintf("/v1/%v", req.GetInstance().GetName())
+
+ params := url.Values{}
+ params.Add("$alt", "json;enum-encoding=int")
+
+ baseUrl.RawQuery = params.Encode()
+
+ // Build HTTP headers from client and context metadata.
+ hds := []string{"x-goog-request-params", fmt.Sprintf("%s=%v", "instance.name", url.QueryEscape(req.GetInstance().GetName()))}
+
+ hds = append(c.xGoogHeaders, hds...)
+ hds = append(hds, "Content-Type", "application/json")
+ headers := gax.BuildHeaders(ctx, hds...)
+ unm := protojson.UnmarshalOptions{AllowPartial: true, DiscardUnknown: true}
+ resp := &longrunningpb.Operation{}
+ e := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
+ if settings.Path != "" {
+ baseUrl.Path = settings.Path
+ }
+ httpReq, err := http.NewRequest("PATCH", baseUrl.String(), bytes.NewReader(jsonReq))
+ if err != nil {
+ return err
+ }
+ httpReq = httpReq.WithContext(ctx)
+ httpReq.Header = headers
+
+ httpRsp, err := c.httpClient.Do(httpReq)
+ if err != nil {
+ return err
+ }
+ defer httpRsp.Body.Close()
+
+ if err = googleapi.CheckResponse(httpRsp); err != nil {
+ return err
+ }
+
+ buf, err := io.ReadAll(httpRsp.Body)
+ if err != nil {
+ return err
+ }
+
+ if err := unm.Unmarshal(buf, resp); err != nil {
+ return err
+ }
+
+ return nil
+ }, opts...)
+ if e != nil {
+ return nil, e
+ }
+
+ override := fmt.Sprintf("/v1/%s", resp.GetName())
+ return &UpdateInstanceOperation{
+ lro: longrunning.InternalNewOperation(*c.LROClient, resp),
+ pollPath: override,
+ }, nil
+}
+
+// DeleteInstance deletes an instance.
+//
+// Immediately upon completion of the request:
+//
+// Billing ceases for all of the instance’s reserved resources.
+//
+// Soon afterward:
+//
+// The instance and all of its databases immediately and
+// irrevocably disappear from the API. All data in the databases
+// is permanently deleted.
+func (c *instanceAdminRESTClient) DeleteInstance(ctx context.Context, req *instancepb.DeleteInstanceRequest, opts ...gax.CallOption) error {
+ baseUrl, err := url.Parse(c.endpoint)
+ if err != nil {
+ return err
+ }
+ baseUrl.Path += fmt.Sprintf("/v1/%v", req.GetName())
+
+ params := url.Values{}
+ params.Add("$alt", "json;enum-encoding=int")
+
+ baseUrl.RawQuery = params.Encode()
+
+ // Build HTTP headers from client and context metadata.
+ hds := []string{"x-goog-request-params", fmt.Sprintf("%s=%v", "name", url.QueryEscape(req.GetName()))}
+
+ hds = append(c.xGoogHeaders, hds...)
+ hds = append(hds, "Content-Type", "application/json")
+ headers := gax.BuildHeaders(ctx, hds...)
+ return gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
+ if settings.Path != "" {
+ baseUrl.Path = settings.Path
+ }
+ httpReq, err := http.NewRequest("DELETE", baseUrl.String(), nil)
+ if err != nil {
+ return err
+ }
+ httpReq = httpReq.WithContext(ctx)
+ httpReq.Header = headers
+
+ httpRsp, err := c.httpClient.Do(httpReq)
+ if err != nil {
+ return err
+ }
+ defer httpRsp.Body.Close()
+
+ // Returns nil if there is no error, otherwise wraps
+ // the response code and body into a non-nil error
+ return googleapi.CheckResponse(httpRsp)
+ }, opts...)
+}
+
+// SetIamPolicy sets the access control policy on an instance resource. Replaces any
+// existing policy.
+//
+// Authorization requires spanner.instances.setIamPolicy on
+// resource.
+func (c *instanceAdminRESTClient) SetIamPolicy(ctx context.Context, req *iampb.SetIamPolicyRequest, opts ...gax.CallOption) (*iampb.Policy, error) {
+ m := protojson.MarshalOptions{AllowPartial: true, UseEnumNumbers: true}
+ jsonReq, err := m.Marshal(req)
+ if err != nil {
+ return nil, err
+ }
+
+ baseUrl, err := url.Parse(c.endpoint)
+ if err != nil {
+ return nil, err
+ }
+ baseUrl.Path += fmt.Sprintf("/v1/%v:setIamPolicy", req.GetResource())
+
+ params := url.Values{}
+ params.Add("$alt", "json;enum-encoding=int")
+
+ baseUrl.RawQuery = params.Encode()
+
+ // Build HTTP headers from client and context metadata.
+ hds := []string{"x-goog-request-params", fmt.Sprintf("%s=%v", "resource", url.QueryEscape(req.GetResource()))}
+
+ hds = append(c.xGoogHeaders, hds...)
+ hds = append(hds, "Content-Type", "application/json")
+ headers := gax.BuildHeaders(ctx, hds...)
+ opts = append((*c.CallOptions).SetIamPolicy[0:len((*c.CallOptions).SetIamPolicy):len((*c.CallOptions).SetIamPolicy)], opts...)
+ unm := protojson.UnmarshalOptions{AllowPartial: true, DiscardUnknown: true}
+ resp := &iampb.Policy{}
+ e := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
+ if settings.Path != "" {
+ baseUrl.Path = settings.Path
+ }
+ httpReq, err := http.NewRequest("POST", baseUrl.String(), bytes.NewReader(jsonReq))
+ if err != nil {
+ return err
+ }
+ httpReq = httpReq.WithContext(ctx)
+ httpReq.Header = headers
+
+ httpRsp, err := c.httpClient.Do(httpReq)
+ if err != nil {
+ return err
+ }
+ defer httpRsp.Body.Close()
+
+ if err = googleapi.CheckResponse(httpRsp); err != nil {
+ return err
+ }
+
+ buf, err := io.ReadAll(httpRsp.Body)
+ if err != nil {
+ return err
+ }
+
+ if err := unm.Unmarshal(buf, resp); err != nil {
+ return err
+ }
+
+ return nil
+ }, opts...)
+ if e != nil {
+ return nil, e
+ }
+ return resp, nil
+}
+
+// GetIamPolicy gets the access control policy for an instance resource. Returns an empty
+// policy if an instance exists but does not have a policy set.
+//
+// Authorization requires spanner.instances.getIamPolicy on
+// resource.
+func (c *instanceAdminRESTClient) GetIamPolicy(ctx context.Context, req *iampb.GetIamPolicyRequest, opts ...gax.CallOption) (*iampb.Policy, error) {
+ m := protojson.MarshalOptions{AllowPartial: true, UseEnumNumbers: true}
+ jsonReq, err := m.Marshal(req)
+ if err != nil {
+ return nil, err
+ }
+
+ baseUrl, err := url.Parse(c.endpoint)
+ if err != nil {
+ return nil, err
+ }
+ baseUrl.Path += fmt.Sprintf("/v1/%v:getIamPolicy", req.GetResource())
+
+ params := url.Values{}
+ params.Add("$alt", "json;enum-encoding=int")
+
+ baseUrl.RawQuery = params.Encode()
+
+ // Build HTTP headers from client and context metadata.
+ hds := []string{"x-goog-request-params", fmt.Sprintf("%s=%v", "resource", url.QueryEscape(req.GetResource()))}
+
+ hds = append(c.xGoogHeaders, hds...)
+ hds = append(hds, "Content-Type", "application/json")
+ headers := gax.BuildHeaders(ctx, hds...)
+ opts = append((*c.CallOptions).GetIamPolicy[0:len((*c.CallOptions).GetIamPolicy):len((*c.CallOptions).GetIamPolicy)], opts...)
+ unm := protojson.UnmarshalOptions{AllowPartial: true, DiscardUnknown: true}
+ resp := &iampb.Policy{}
+ e := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
+ if settings.Path != "" {
+ baseUrl.Path = settings.Path
+ }
+ httpReq, err := http.NewRequest("POST", baseUrl.String(), bytes.NewReader(jsonReq))
+ if err != nil {
+ return err
+ }
+ httpReq = httpReq.WithContext(ctx)
+ httpReq.Header = headers
+
+ httpRsp, err := c.httpClient.Do(httpReq)
+ if err != nil {
+ return err
+ }
+ defer httpRsp.Body.Close()
+
+ if err = googleapi.CheckResponse(httpRsp); err != nil {
+ return err
+ }
+
+ buf, err := io.ReadAll(httpRsp.Body)
+ if err != nil {
+ return err
+ }
+
+ if err := unm.Unmarshal(buf, resp); err != nil {
+ return err
+ }
+
+ return nil
+ }, opts...)
+ if e != nil {
+ return nil, e
+ }
+ return resp, nil
+}
+
+// TestIamPermissions returns permissions that the caller has on the specified instance resource.
+//
+// Attempting this RPC on a non-existent Cloud Spanner instance resource will
+// result in a NOT_FOUND error if the user has spanner.instances.list
+// permission on the containing Google Cloud Project. Otherwise returns an
+// empty set of permissions.
+func (c *instanceAdminRESTClient) TestIamPermissions(ctx context.Context, req *iampb.TestIamPermissionsRequest, opts ...gax.CallOption) (*iampb.TestIamPermissionsResponse, error) {
+ m := protojson.MarshalOptions{AllowPartial: true, UseEnumNumbers: true}
+ jsonReq, err := m.Marshal(req)
+ if err != nil {
+ return nil, err
+ }
+
+ baseUrl, err := url.Parse(c.endpoint)
+ if err != nil {
+ return nil, err
+ }
+ baseUrl.Path += fmt.Sprintf("/v1/%v:testIamPermissions", req.GetResource())
+
+ params := url.Values{}
+ params.Add("$alt", "json;enum-encoding=int")
+
+ baseUrl.RawQuery = params.Encode()
+
+ // Build HTTP headers from client and context metadata.
+ hds := []string{"x-goog-request-params", fmt.Sprintf("%s=%v", "resource", url.QueryEscape(req.GetResource()))}
+
+ hds = append(c.xGoogHeaders, hds...)
+ hds = append(hds, "Content-Type", "application/json")
+ headers := gax.BuildHeaders(ctx, hds...)
+ opts = append((*c.CallOptions).TestIamPermissions[0:len((*c.CallOptions).TestIamPermissions):len((*c.CallOptions).TestIamPermissions)], opts...)
+ unm := protojson.UnmarshalOptions{AllowPartial: true, DiscardUnknown: true}
+ resp := &iampb.TestIamPermissionsResponse{}
+ e := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
+ if settings.Path != "" {
+ baseUrl.Path = settings.Path
+ }
+ httpReq, err := http.NewRequest("POST", baseUrl.String(), bytes.NewReader(jsonReq))
+ if err != nil {
+ return err
+ }
+ httpReq = httpReq.WithContext(ctx)
+ httpReq.Header = headers
+
+ httpRsp, err := c.httpClient.Do(httpReq)
+ if err != nil {
+ return err
+ }
+ defer httpRsp.Body.Close()
+
+ if err = googleapi.CheckResponse(httpRsp); err != nil {
+ return err
+ }
+
+ buf, err := io.ReadAll(httpRsp.Body)
+ if err != nil {
+ return err
+ }
+
+ if err := unm.Unmarshal(buf, resp); err != nil {
+ return err
+ }
+
+ return nil
+ }, opts...)
+ if e != nil {
+ return nil, e
+ }
+ return resp, nil
+}
+
+// GetInstancePartition gets information about a particular instance partition.
+func (c *instanceAdminRESTClient) GetInstancePartition(ctx context.Context, req *instancepb.GetInstancePartitionRequest, opts ...gax.CallOption) (*instancepb.InstancePartition, error) {
+ baseUrl, err := url.Parse(c.endpoint)
+ if err != nil {
+ return nil, err
+ }
+ baseUrl.Path += fmt.Sprintf("/v1/%v", req.GetName())
+
+ params := url.Values{}
+ params.Add("$alt", "json;enum-encoding=int")
+
+ baseUrl.RawQuery = params.Encode()
+
+ // Build HTTP headers from client and context metadata.
+ hds := []string{"x-goog-request-params", fmt.Sprintf("%s=%v", "name", url.QueryEscape(req.GetName()))}
+
+ hds = append(c.xGoogHeaders, hds...)
+ hds = append(hds, "Content-Type", "application/json")
+ headers := gax.BuildHeaders(ctx, hds...)
+ opts = append((*c.CallOptions).GetInstancePartition[0:len((*c.CallOptions).GetInstancePartition):len((*c.CallOptions).GetInstancePartition)], opts...)
+ unm := protojson.UnmarshalOptions{AllowPartial: true, DiscardUnknown: true}
+ resp := &instancepb.InstancePartition{}
+ e := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
+ if settings.Path != "" {
+ baseUrl.Path = settings.Path
+ }
+ httpReq, err := http.NewRequest("GET", baseUrl.String(), nil)
+ if err != nil {
+ return err
+ }
+ httpReq = httpReq.WithContext(ctx)
+ httpReq.Header = headers
+
+ httpRsp, err := c.httpClient.Do(httpReq)
+ if err != nil {
+ return err
+ }
+ defer httpRsp.Body.Close()
+
+ if err = googleapi.CheckResponse(httpRsp); err != nil {
+ return err
+ }
+
+ buf, err := io.ReadAll(httpRsp.Body)
+ if err != nil {
+ return err
+ }
+
+ if err := unm.Unmarshal(buf, resp); err != nil {
+ return err
+ }
+
+ return nil
+ }, opts...)
+ if e != nil {
+ return nil, e
+ }
+ return resp, nil
+}
+
+// CreateInstancePartition creates an instance partition and begins preparing it to be used. The
+// returned [long-running operation][google.longrunning.Operation]
+// can be used to track the progress of preparing the new instance partition.
+// The instance partition name is assigned by the caller. If the named
+// instance partition already exists, CreateInstancePartition returns
+// ALREADY_EXISTS.
+//
+// Immediately upon completion of this request:
+//
+// The instance partition is readable via the API, with all requested
+// attributes but no allocated resources. Its state is CREATING.
+//
+// Until completion of the returned operation:
+//
+// Cancelling the operation renders the instance partition immediately
+// unreadable via the API.
+//
+// The instance partition can be deleted.
+//
+// All other attempts to modify the instance partition are rejected.
+//
+// Upon completion of the returned operation:
+//
+// Billing for all successfully-allocated resources begins (some types
+// may have lower than the requested levels).
+//
+// Databases can start using this instance partition.
+//
+// The instance partition’s allocated resource levels are readable via the
+// API.
+//
+// The instance partition’s state becomes READY.
+//
+// The returned [long-running operation][google.longrunning.Operation] will
+// have a name of the format
+// <instance_partition_name>/operations/<operation_id> and can be used to
+// track creation of the instance partition. The
+// metadata field type is
+// CreateInstancePartitionMetadata.
+// The response field type is
+// InstancePartition, if
+// successful.
+func (c *instanceAdminRESTClient) CreateInstancePartition(ctx context.Context, req *instancepb.CreateInstancePartitionRequest, opts ...gax.CallOption) (*CreateInstancePartitionOperation, error) {
+ m := protojson.MarshalOptions{AllowPartial: true, UseEnumNumbers: true}
+ jsonReq, err := m.Marshal(req)
+ if err != nil {
+ return nil, err
+ }
+
+ baseUrl, err := url.Parse(c.endpoint)
+ if err != nil {
+ return nil, err
+ }
+ baseUrl.Path += fmt.Sprintf("/v1/%v/instancePartitions", req.GetParent())
+
+ params := url.Values{}
+ params.Add("$alt", "json;enum-encoding=int")
+
+ baseUrl.RawQuery = params.Encode()
+
+ // Build HTTP headers from client and context metadata.
+ hds := []string{"x-goog-request-params", fmt.Sprintf("%s=%v", "parent", url.QueryEscape(req.GetParent()))}
+
+ hds = append(c.xGoogHeaders, hds...)
+ hds = append(hds, "Content-Type", "application/json")
+ headers := gax.BuildHeaders(ctx, hds...)
+ unm := protojson.UnmarshalOptions{AllowPartial: true, DiscardUnknown: true}
+ resp := &longrunningpb.Operation{}
+ e := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
+ if settings.Path != "" {
+ baseUrl.Path = settings.Path
+ }
+ httpReq, err := http.NewRequest("POST", baseUrl.String(), bytes.NewReader(jsonReq))
+ if err != nil {
+ return err
+ }
+ httpReq = httpReq.WithContext(ctx)
+ httpReq.Header = headers
+
+ httpRsp, err := c.httpClient.Do(httpReq)
+ if err != nil {
+ return err
+ }
+ defer httpRsp.Body.Close()
+
+ if err = googleapi.CheckResponse(httpRsp); err != nil {
+ return err
+ }
+
+ buf, err := io.ReadAll(httpRsp.Body)
+ if err != nil {
+ return err
+ }
+
+ if err := unm.Unmarshal(buf, resp); err != nil {
+ return err
+ }
+
+ return nil
+ }, opts...)
+ if e != nil {
+ return nil, e
+ }
+
+ override := fmt.Sprintf("/v1/%s", resp.GetName())
+ return &CreateInstancePartitionOperation{
+ lro: longrunning.InternalNewOperation(*c.LROClient, resp),
+ pollPath: override,
+ }, nil
+}
+
+// DeleteInstancePartition deletes an existing instance partition. Requires that the
+// instance partition is not used by any database or backup and is not the
+// default instance partition of an instance.
+//
+// Authorization requires spanner.instancePartitions.delete permission on
+// the resource
+// [name][google.spanner.admin.instance.v1.InstancePartition.name (at http://google.spanner.admin.instance.v1.InstancePartition.name)].
+func (c *instanceAdminRESTClient) DeleteInstancePartition(ctx context.Context, req *instancepb.DeleteInstancePartitionRequest, opts ...gax.CallOption) error {
+ baseUrl, err := url.Parse(c.endpoint)
+ if err != nil {
+ return err
+ }
+ baseUrl.Path += fmt.Sprintf("/v1/%v", req.GetName())
+
+ params := url.Values{}
+ params.Add("$alt", "json;enum-encoding=int")
+ if req.GetEtag() != "" {
+ params.Add("etag", fmt.Sprintf("%v", req.GetEtag()))
+ }
+
+ baseUrl.RawQuery = params.Encode()
+
+ // Build HTTP headers from client and context metadata.
+ hds := []string{"x-goog-request-params", fmt.Sprintf("%s=%v", "name", url.QueryEscape(req.GetName()))}
+
+ hds = append(c.xGoogHeaders, hds...)
+ hds = append(hds, "Content-Type", "application/json")
+ headers := gax.BuildHeaders(ctx, hds...)
+ return gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
+ if settings.Path != "" {
+ baseUrl.Path = settings.Path
+ }
+ httpReq, err := http.NewRequest("DELETE", baseUrl.String(), nil)
+ if err != nil {
+ return err
+ }
+ httpReq = httpReq.WithContext(ctx)
+ httpReq.Header = headers
+
+ httpRsp, err := c.httpClient.Do(httpReq)
+ if err != nil {
+ return err
+ }
+ defer httpRsp.Body.Close()
+
+ // Returns nil if there is no error, otherwise wraps
+ // the response code and body into a non-nil error
+ return googleapi.CheckResponse(httpRsp)
+ }, opts...)
+}
+
+// UpdateInstancePartition updates an instance partition, and begins allocating or releasing resources
+// as requested. The returned [long-running
+// operation][google.longrunning.Operation] can be used to track the
+// progress of updating the instance partition. If the named instance
+// partition does not exist, returns NOT_FOUND.
+//
+// Immediately upon completion of this request:
+//
+// For resource types for which a decrease in the instance partition’s
+// allocation has been requested, billing is based on the newly-requested
+// level.
+//
+// Until completion of the returned operation:
+//
+// Cancelling the operation sets its metadata’s
+// cancel_time,
+// and begins restoring resources to their pre-request values. The
+// operation is guaranteed to succeed at undoing all resource changes,
+// after which point it terminates with a CANCELLED status.
+//
+// All other attempts to modify the instance partition are rejected.
+//
+// Reading the instance partition via the API continues to give the
+// pre-request resource levels.
+//
+// Upon completion of the returned operation:
+//
+// Billing begins for all successfully-allocated resources (some types
+// may have lower than the requested levels).
+//
+// All newly-reserved resources are available for serving the instance
+// partition’s tables.
+//
+// The instance partition’s new resource levels are readable via the API.
+//
+// The returned [long-running operation][google.longrunning.Operation] will
+// have a name of the format
+// <instance_partition_name>/operations/<operation_id> and can be used to
+// track the instance partition modification. The
+// metadata field type is
+// UpdateInstancePartitionMetadata.
+// The response field type is
+// InstancePartition, if
+// successful.
+//
+// Authorization requires spanner.instancePartitions.update permission on
+// the resource
+// [name][google.spanner.admin.instance.v1.InstancePartition.name (at http://google.spanner.admin.instance.v1.InstancePartition.name)].
+func (c *instanceAdminRESTClient) UpdateInstancePartition(ctx context.Context, req *instancepb.UpdateInstancePartitionRequest, opts ...gax.CallOption) (*UpdateInstancePartitionOperation, error) {
+ m := protojson.MarshalOptions{AllowPartial: true, UseEnumNumbers: true}
+ jsonReq, err := m.Marshal(req)
+ if err != nil {
+ return nil, err
+ }
+
+ baseUrl, err := url.Parse(c.endpoint)
+ if err != nil {
+ return nil, err
+ }
+ baseUrl.Path += fmt.Sprintf("/v1/%v", req.GetInstancePartition().GetName())
+
+ params := url.Values{}
+ params.Add("$alt", "json;enum-encoding=int")
+
+ baseUrl.RawQuery = params.Encode()
+
+ // Build HTTP headers from client and context metadata.
+ hds := []string{"x-goog-request-params", fmt.Sprintf("%s=%v", "instance_partition.name", url.QueryEscape(req.GetInstancePartition().GetName()))}
+
+ hds = append(c.xGoogHeaders, hds...)
+ hds = append(hds, "Content-Type", "application/json")
+ headers := gax.BuildHeaders(ctx, hds...)
+ unm := protojson.UnmarshalOptions{AllowPartial: true, DiscardUnknown: true}
+ resp := &longrunningpb.Operation{}
+ e := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
+ if settings.Path != "" {
+ baseUrl.Path = settings.Path
+ }
+ httpReq, err := http.NewRequest("PATCH", baseUrl.String(), bytes.NewReader(jsonReq))
+ if err != nil {
+ return err
+ }
+ httpReq = httpReq.WithContext(ctx)
+ httpReq.Header = headers
+
+ httpRsp, err := c.httpClient.Do(httpReq)
+ if err != nil {
+ return err
+ }
+ defer httpRsp.Body.Close()
+
+ if err = googleapi.CheckResponse(httpRsp); err != nil {
+ return err
+ }
+
+ buf, err := io.ReadAll(httpRsp.Body)
+ if err != nil {
+ return err
+ }
+
+ if err := unm.Unmarshal(buf, resp); err != nil {
+ return err
+ }
+
+ return nil
+ }, opts...)
+ if e != nil {
+ return nil, e
+ }
+
+ override := fmt.Sprintf("/v1/%s", resp.GetName())
+ return &UpdateInstancePartitionOperation{
+ lro: longrunning.InternalNewOperation(*c.LROClient, resp),
+ pollPath: override,
+ }, nil
+}
+
+// ListInstancePartitionOperations lists instance partition [long-running
+// operations][google.longrunning.Operation] in the given instance.
+// An instance partition operation has a name of the form
+// projects/<project>/instances/<instance>/instancePartitions/<instance_partition>/operations/<operation>.
+// The long-running operation
+// metadata field type
+// metadata.type_url describes the type of the metadata. Operations returned
+// include those that have completed/failed/canceled within the last 7 days,
+// and pending operations. Operations returned are ordered by
+// operation.metadata.value.start_time in descending order starting from the
+// most recently started operation.
+//
+// Authorization requires spanner.instancePartitionOperations.list
+// permission on the resource
+// parent.
+func (c *instanceAdminRESTClient) ListInstancePartitionOperations(ctx context.Context, req *instancepb.ListInstancePartitionOperationsRequest, opts ...gax.CallOption) *OperationIterator {
+ it := &OperationIterator{}
+ req = proto.Clone(req).(*instancepb.ListInstancePartitionOperationsRequest)
+ unm := protojson.UnmarshalOptions{AllowPartial: true, DiscardUnknown: true}
+ it.InternalFetch = func(pageSize int, pageToken string) ([]*longrunningpb.Operation, string, error) {
+ resp := &instancepb.ListInstancePartitionOperationsResponse{}
+ if pageToken != "" {
+ req.PageToken = pageToken
+ }
+ if pageSize > math.MaxInt32 {
+ req.PageSize = math.MaxInt32
+ } else if pageSize != 0 {
+ req.PageSize = int32(pageSize)
+ }
+ baseUrl, err := url.Parse(c.endpoint)
+ if err != nil {
+ return nil, "", err
+ }
+ baseUrl.Path += fmt.Sprintf("/v1/%v/instancePartitionOperations", req.GetParent())
+
+ params := url.Values{}
+ params.Add("$alt", "json;enum-encoding=int")
+ if req.GetFilter() != "" {
+ params.Add("filter", fmt.Sprintf("%v", req.GetFilter()))
+ }
+ if req.GetInstancePartitionDeadline() != nil {
+ instancePartitionDeadline, err := protojson.Marshal(req.GetInstancePartitionDeadline())
+ if err != nil {
+ return nil, "", err
+ }
+ params.Add("instancePartitionDeadline", string(instancePartitionDeadline[1:len(instancePartitionDeadline)-1]))
+ }
+ if req.GetPageSize() != 0 {
+ params.Add("pageSize", fmt.Sprintf("%v", req.GetPageSize()))
+ }
+ if req.GetPageToken() != "" {
+ params.Add("pageToken", fmt.Sprintf("%v", req.GetPageToken()))
+ }
+
+ baseUrl.RawQuery = params.Encode()
+
+ // Build HTTP headers from client and context metadata.
+ hds := append(c.xGoogHeaders, "Content-Type", "application/json")
+ headers := gax.BuildHeaders(ctx, hds...)
+ e := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
+ if settings.Path != "" {
+ baseUrl.Path = settings.Path
+ }
+ httpReq, err := http.NewRequest("GET", baseUrl.String(), nil)
+ if err != nil {
+ return err
+ }
+ httpReq.Header = headers
+
+ httpRsp, err := c.httpClient.Do(httpReq)
+ if err != nil {
+ return err
+ }
+ defer httpRsp.Body.Close()
+
+ if err = googleapi.CheckResponse(httpRsp); err != nil {
+ return err
+ }
+
+ buf, err := io.ReadAll(httpRsp.Body)
+ if err != nil {
+ return err
+ }
+
+ if err := unm.Unmarshal(buf, resp); err != nil {
+ return err
+ }
+
+ return nil
+ }, opts...)
+ if e != nil {
+ return nil, "", e
+ }
+ it.Response = resp
+ return resp.GetOperations(), resp.GetNextPageToken(), nil
+ }
+
+ fetch := func(pageSize int, pageToken string) (string, error) {
+ items, nextPageToken, err := it.InternalFetch(pageSize, pageToken)
+ if err != nil {
+ return "", err
+ }
+ it.items = append(it.items, items...)
+ return nextPageToken, nil
+ }
+
+ it.pageInfo, it.nextFunc = iterator.NewPageInfo(fetch, it.bufLen, it.takeBuf)
+ it.pageInfo.MaxSize = int(req.GetPageSize())
+ it.pageInfo.Token = req.GetPageToken()
+
+ return it
+}
+
+// MoveInstance moves an instance to the target instance configuration. You can use the
+// returned [long-running operation][google.longrunning.Operation] to track
+// the progress of moving the instance.
+//
+// MoveInstance returns FAILED_PRECONDITION if the instance meets any of
+// the following criteria:
+//
+// Is undergoing a move to a different instance configuration
+//
+// Has backups
+//
+// Has an ongoing update
+//
+// Contains any CMEK-enabled databases
+//
+// Is a free trial instance
+//
+// While the operation is pending:
+//
+// All other attempts to modify the instance, including changes to its
+// compute capacity, are rejected.
+//
+// The following database and backup admin operations are rejected:
+//
+// DatabaseAdmin.CreateDatabase
+//
+// DatabaseAdmin.UpdateDatabaseDdl (disabled if default_leader is
+// specified in the request.)
+//
+// DatabaseAdmin.RestoreDatabase
+//
+// DatabaseAdmin.CreateBackup
+//
+// DatabaseAdmin.CopyBackup
+//
+// Both the source and target instance configurations are subject to
+// hourly compute and storage charges.
+//
+// The instance might experience higher read-write latencies and a higher
+// transaction abort rate. However, moving an instance doesn’t cause any
+// downtime.
+//
+// The returned [long-running operation][google.longrunning.Operation] has
+// a name of the format
+// <instance_name>/operations/<operation_id> and can be used to track
+// the move instance operation. The
+// metadata field type is
+// MoveInstanceMetadata.
+// The response field type is
+// Instance,
+// if successful.
+// Cancelling the operation sets its metadata’s
+// cancel_time.
+// Cancellation is not immediate because it involves moving any data
+// previously moved to the target instance configuration back to the original
+// instance configuration. You can use this operation to track the progress of
+// the cancellation. Upon successful completion of the cancellation, the
+// operation terminates with CANCELLED status.
+//
+// If not cancelled, upon completion of the returned operation:
+//
+// The instance successfully moves to the target instance
+// configuration.
+//
+// You are billed for compute and storage in target instance
+// configuration.
+//
+// Authorization requires the spanner.instances.update permission on
+// the resource instance.
+//
+// For more details, see
+// Move an instance (at https://cloud.google.com/spanner/docs/move-instance).
+func (c *instanceAdminRESTClient) MoveInstance(ctx context.Context, req *instancepb.MoveInstanceRequest, opts ...gax.CallOption) (*MoveInstanceOperation, error) {
+ m := protojson.MarshalOptions{AllowPartial: true, UseEnumNumbers: true}
+ jsonReq, err := m.Marshal(req)
+ if err != nil {
+ return nil, err
+ }
+
+ baseUrl, err := url.Parse(c.endpoint)
+ if err != nil {
+ return nil, err
+ }
+ baseUrl.Path += fmt.Sprintf("/v1/%v:move", req.GetName())
+
+ params := url.Values{}
+ params.Add("$alt", "json;enum-encoding=int")
+
+ baseUrl.RawQuery = params.Encode()
+
+ // Build HTTP headers from client and context metadata.
+ hds := []string{"x-goog-request-params", fmt.Sprintf("%s=%v", "name", url.QueryEscape(req.GetName()))}
+
+ hds = append(c.xGoogHeaders, hds...)
+ hds = append(hds, "Content-Type", "application/json")
+ headers := gax.BuildHeaders(ctx, hds...)
+ unm := protojson.UnmarshalOptions{AllowPartial: true, DiscardUnknown: true}
+ resp := &longrunningpb.Operation{}
+ e := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
+ if settings.Path != "" {
+ baseUrl.Path = settings.Path
+ }
+ httpReq, err := http.NewRequest("POST", baseUrl.String(), bytes.NewReader(jsonReq))
+ if err != nil {
+ return err
+ }
+ httpReq = httpReq.WithContext(ctx)
+ httpReq.Header = headers
+
+ httpRsp, err := c.httpClient.Do(httpReq)
+ if err != nil {
+ return err
+ }
+ defer httpRsp.Body.Close()
+
+ if err = googleapi.CheckResponse(httpRsp); err != nil {
+ return err
+ }
+
+ buf, err := io.ReadAll(httpRsp.Body)
+ if err != nil {
+ return err
+ }
+
+ if err := unm.Unmarshal(buf, resp); err != nil {
+ return err
+ }
+
+ return nil
+ }, opts...)
+ if e != nil {
+ return nil, e
+ }
+
+ override := fmt.Sprintf("/v1/%s", resp.GetName())
+ return &MoveInstanceOperation{
+ lro: longrunning.InternalNewOperation(*c.LROClient, resp),
+ pollPath: override,
+ }, nil
+}
+
+// CreateInstanceOperation returns a new CreateInstanceOperation from a given name.
+// The name must be that of a previously created CreateInstanceOperation, possibly from a different process.
+func (c *instanceAdminGRPCClient) CreateInstanceOperation(name string) *CreateInstanceOperation {
+ return &CreateInstanceOperation{
+ lro: longrunning.InternalNewOperation(*c.LROClient, &longrunningpb.Operation{Name: name}),
+ }
+}
+
+// CreateInstanceOperation returns a new CreateInstanceOperation from a given name.
+// The name must be that of a previously created CreateInstanceOperation, possibly from a different process.
+func (c *instanceAdminRESTClient) CreateInstanceOperation(name string) *CreateInstanceOperation {
+ override := fmt.Sprintf("/v1/%s", name)
+ return &CreateInstanceOperation{
+ lro: longrunning.InternalNewOperation(*c.LROClient, &longrunningpb.Operation{Name: name}),
+ pollPath: override,
+ }
+}
+
+// CreateInstanceConfigOperation returns a new CreateInstanceConfigOperation from a given name.
+// The name must be that of a previously created CreateInstanceConfigOperation, possibly from a different process.
+func (c *instanceAdminGRPCClient) CreateInstanceConfigOperation(name string) *CreateInstanceConfigOperation {
+ return &CreateInstanceConfigOperation{
+ lro: longrunning.InternalNewOperation(*c.LROClient, &longrunningpb.Operation{Name: name}),
+ }
+}
+
+// CreateInstanceConfigOperation returns a new CreateInstanceConfigOperation from a given name.
+// The name must be that of a previously created CreateInstanceConfigOperation, possibly from a different process.
+func (c *instanceAdminRESTClient) CreateInstanceConfigOperation(name string) *CreateInstanceConfigOperation {
+ override := fmt.Sprintf("/v1/%s", name)
+ return &CreateInstanceConfigOperation{
+ lro: longrunning.InternalNewOperation(*c.LROClient, &longrunningpb.Operation{Name: name}),
+ pollPath: override,
+ }
+}
+
+// CreateInstancePartitionOperation returns a new CreateInstancePartitionOperation from a given name.
+// The name must be that of a previously created CreateInstancePartitionOperation, possibly from a different process.
+func (c *instanceAdminGRPCClient) CreateInstancePartitionOperation(name string) *CreateInstancePartitionOperation {
+ return &CreateInstancePartitionOperation{
+ lro: longrunning.InternalNewOperation(*c.LROClient, &longrunningpb.Operation{Name: name}),
+ }
+}
+
+// CreateInstancePartitionOperation returns a new CreateInstancePartitionOperation from a given name.
+// The name must be that of a previously created CreateInstancePartitionOperation, possibly from a different process.
+func (c *instanceAdminRESTClient) CreateInstancePartitionOperation(name string) *CreateInstancePartitionOperation {
+ override := fmt.Sprintf("/v1/%s", name)
+ return &CreateInstancePartitionOperation{
+ lro: longrunning.InternalNewOperation(*c.LROClient, &longrunningpb.Operation{Name: name}),
+ pollPath: override,
+ }
+}
+
+// MoveInstanceOperation returns a new MoveInstanceOperation from a given name.
+// The name must be that of a previously created MoveInstanceOperation, possibly from a different process.
+func (c *instanceAdminGRPCClient) MoveInstanceOperation(name string) *MoveInstanceOperation {
+ return &MoveInstanceOperation{
+ lro: longrunning.InternalNewOperation(*c.LROClient, &longrunningpb.Operation{Name: name}),
+ }
+}
+
+// MoveInstanceOperation returns a new MoveInstanceOperation from a given name.
+// The name must be that of a previously created MoveInstanceOperation, possibly from a different process.
+func (c *instanceAdminRESTClient) MoveInstanceOperation(name string) *MoveInstanceOperation {
+ override := fmt.Sprintf("/v1/%s", name)
+ return &MoveInstanceOperation{
+ lro: longrunning.InternalNewOperation(*c.LROClient, &longrunningpb.Operation{Name: name}),
+ pollPath: override,
+ }
+}
+
+// UpdateInstanceOperation returns a new UpdateInstanceOperation from a given name.
+// The name must be that of a previously created UpdateInstanceOperation, possibly from a different process.
+func (c *instanceAdminGRPCClient) UpdateInstanceOperation(name string) *UpdateInstanceOperation {
+ return &UpdateInstanceOperation{
+ lro: longrunning.InternalNewOperation(*c.LROClient, &longrunningpb.Operation{Name: name}),
+ }
+}
+
+// UpdateInstanceOperation returns a new UpdateInstanceOperation from a given name.
+// The name must be that of a previously created UpdateInstanceOperation, possibly from a different process.
+func (c *instanceAdminRESTClient) UpdateInstanceOperation(name string) *UpdateInstanceOperation {
+ override := fmt.Sprintf("/v1/%s", name)
+ return &UpdateInstanceOperation{
+ lro: longrunning.InternalNewOperation(*c.LROClient, &longrunningpb.Operation{Name: name}),
+ pollPath: override,
+ }
+}
+
+// UpdateInstanceConfigOperation returns a new UpdateInstanceConfigOperation from a given name.
+// The name must be that of a previously created UpdateInstanceConfigOperation, possibly from a different process.
+func (c *instanceAdminGRPCClient) UpdateInstanceConfigOperation(name string) *UpdateInstanceConfigOperation {
+ return &UpdateInstanceConfigOperation{
+ lro: longrunning.InternalNewOperation(*c.LROClient, &longrunningpb.Operation{Name: name}),
+ }
+}
+
+// UpdateInstanceConfigOperation returns a new UpdateInstanceConfigOperation from a given name.
+// The name must be that of a previously created UpdateInstanceConfigOperation, possibly from a different process.
+func (c *instanceAdminRESTClient) UpdateInstanceConfigOperation(name string) *UpdateInstanceConfigOperation {
+ override := fmt.Sprintf("/v1/%s", name)
+ return &UpdateInstanceConfigOperation{
+ lro: longrunning.InternalNewOperation(*c.LROClient, &longrunningpb.Operation{Name: name}),
+ pollPath: override,
+ }
+}
+
+// UpdateInstancePartitionOperation returns a new UpdateInstancePartitionOperation from a given name.
+// The name must be that of a previously created UpdateInstancePartitionOperation, possibly from a different process.
+func (c *instanceAdminGRPCClient) UpdateInstancePartitionOperation(name string) *UpdateInstancePartitionOperation {
+ return &UpdateInstancePartitionOperation{
+ lro: longrunning.InternalNewOperation(*c.LROClient, &longrunningpb.Operation{Name: name}),
+ }
+}
+
+// UpdateInstancePartitionOperation returns a new UpdateInstancePartitionOperation from a given name.
+// The name must be that of a previously created UpdateInstancePartitionOperation, possibly from a different process.
+func (c *instanceAdminRESTClient) UpdateInstancePartitionOperation(name string) *UpdateInstancePartitionOperation {
+ override := fmt.Sprintf("/v1/%s", name)
+ return &UpdateInstancePartitionOperation{
+ lro: longrunning.InternalNewOperation(*c.LROClient, &longrunningpb.Operation{Name: name}),
+ pollPath: override,
+ }
+}
diff --git a/vendor/cloud.google.com/go/spanner/admin/instance/apiv1/instancepb/common.pb.go b/vendor/cloud.google.com/go/spanner/admin/instance/apiv1/instancepb/common.pb.go
new file mode 100644
index 000000000..0e529b45c
--- /dev/null
+++ b/vendor/cloud.google.com/go/spanner/admin/instance/apiv1/instancepb/common.pb.go
@@ -0,0 +1,279 @@
+// Copyright 2024 Google LLC
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Code generated by protoc-gen-go. DO NOT EDIT.
+// versions:
+// protoc-gen-go v1.34.2
+// protoc v4.25.3
+// source: google/spanner/admin/instance/v1/common.proto
+
+package instancepb
+
+import (
+ reflect "reflect"
+ sync "sync"
+
+ protoreflect "google.golang.org/protobuf/reflect/protoreflect"
+ protoimpl "google.golang.org/protobuf/runtime/protoimpl"
+ timestamppb "google.golang.org/protobuf/types/known/timestamppb"
+)
+
+const (
+ // Verify that this generated code is sufficiently up-to-date.
+ _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion)
+ // Verify that runtime/protoimpl is sufficiently up-to-date.
+ _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20)
+)
+
+// Indicates the expected fulfillment period of an operation.
+type FulfillmentPeriod int32
+
+const (
+ // Not specified.
+ FulfillmentPeriod_FULFILLMENT_PERIOD_UNSPECIFIED FulfillmentPeriod = 0
+ // Normal fulfillment period. The operation is expected to complete within
+ // minutes.
+ FulfillmentPeriod_FULFILLMENT_PERIOD_NORMAL FulfillmentPeriod = 1
+ // Extended fulfillment period. It can take up to an hour for the operation
+ // to complete.
+ FulfillmentPeriod_FULFILLMENT_PERIOD_EXTENDED FulfillmentPeriod = 2
+)
+
+// Enum value maps for FulfillmentPeriod.
+var (
+ FulfillmentPeriod_name = map[int32]string{
+ 0: "FULFILLMENT_PERIOD_UNSPECIFIED",
+ 1: "FULFILLMENT_PERIOD_NORMAL",
+ 2: "FULFILLMENT_PERIOD_EXTENDED",
+ }
+ FulfillmentPeriod_value = map[string]int32{
+ "FULFILLMENT_PERIOD_UNSPECIFIED": 0,
+ "FULFILLMENT_PERIOD_NORMAL": 1,
+ "FULFILLMENT_PERIOD_EXTENDED": 2,
+ }
+)
+
+func (x FulfillmentPeriod) Enum() *FulfillmentPeriod {
+ p := new(FulfillmentPeriod)
+ *p = x
+ return p
+}
+
+func (x FulfillmentPeriod) String() string {
+ return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x))
+}
+
+func (FulfillmentPeriod) Descriptor() protoreflect.EnumDescriptor {
+ return file_google_spanner_admin_instance_v1_common_proto_enumTypes[0].Descriptor()
+}
+
+func (FulfillmentPeriod) Type() protoreflect.EnumType {
+ return &file_google_spanner_admin_instance_v1_common_proto_enumTypes[0]
+}
+
+func (x FulfillmentPeriod) Number() protoreflect.EnumNumber {
+ return protoreflect.EnumNumber(x)
+}
+
+// Deprecated: Use FulfillmentPeriod.Descriptor instead.
+func (FulfillmentPeriod) EnumDescriptor() ([]byte, []int) {
+ return file_google_spanner_admin_instance_v1_common_proto_rawDescGZIP(), []int{0}
+}
+
+// Encapsulates progress related information for a Cloud Spanner long
+// running instance operations.
+type OperationProgress struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // Percent completion of the operation.
+ // Values are between 0 and 100 inclusive.
+ ProgressPercent int32 `protobuf:"varint,1,opt,name=progress_percent,json=progressPercent,proto3" json:"progress_percent,omitempty"`
+ // Time the request was received.
+ StartTime *timestamppb.Timestamp `protobuf:"bytes,2,opt,name=start_time,json=startTime,proto3" json:"start_time,omitempty"`
+ // If set, the time at which this operation failed or was completed
+ // successfully.
+ EndTime *timestamppb.Timestamp `protobuf:"bytes,3,opt,name=end_time,json=endTime,proto3" json:"end_time,omitempty"`
+}
+
+func (x *OperationProgress) Reset() {
+ *x = OperationProgress{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_google_spanner_admin_instance_v1_common_proto_msgTypes[0]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *OperationProgress) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*OperationProgress) ProtoMessage() {}
+
+func (x *OperationProgress) ProtoReflect() protoreflect.Message {
+ mi := &file_google_spanner_admin_instance_v1_common_proto_msgTypes[0]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use OperationProgress.ProtoReflect.Descriptor instead.
+func (*OperationProgress) Descriptor() ([]byte, []int) {
+ return file_google_spanner_admin_instance_v1_common_proto_rawDescGZIP(), []int{0}
+}
+
+func (x *OperationProgress) GetProgressPercent() int32 {
+ if x != nil {
+ return x.ProgressPercent
+ }
+ return 0
+}
+
+func (x *OperationProgress) GetStartTime() *timestamppb.Timestamp {
+ if x != nil {
+ return x.StartTime
+ }
+ return nil
+}
+
+func (x *OperationProgress) GetEndTime() *timestamppb.Timestamp {
+ if x != nil {
+ return x.EndTime
+ }
+ return nil
+}
+
+var File_google_spanner_admin_instance_v1_common_proto protoreflect.FileDescriptor
+
+var file_google_spanner_admin_instance_v1_common_proto_rawDesc = []byte{
+ 0x0a, 0x2d, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x73, 0x70, 0x61, 0x6e, 0x6e, 0x65, 0x72,
+ 0x2f, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2f, 0x69, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x2f,
+ 0x76, 0x31, 0x2f, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12,
+ 0x20, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x70, 0x61, 0x6e, 0x6e, 0x65, 0x72, 0x2e,
+ 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x69, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x2e, 0x76,
+ 0x31, 0x1a, 0x1f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62,
+ 0x75, 0x66, 0x2f, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x2e, 0x70, 0x72, 0x6f,
+ 0x74, 0x6f, 0x22, 0xb0, 0x01, 0x0a, 0x11, 0x4f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e,
+ 0x50, 0x72, 0x6f, 0x67, 0x72, 0x65, 0x73, 0x73, 0x12, 0x29, 0x0a, 0x10, 0x70, 0x72, 0x6f, 0x67,
+ 0x72, 0x65, 0x73, 0x73, 0x5f, 0x70, 0x65, 0x72, 0x63, 0x65, 0x6e, 0x74, 0x18, 0x01, 0x20, 0x01,
+ 0x28, 0x05, 0x52, 0x0f, 0x70, 0x72, 0x6f, 0x67, 0x72, 0x65, 0x73, 0x73, 0x50, 0x65, 0x72, 0x63,
+ 0x65, 0x6e, 0x74, 0x12, 0x39, 0x0a, 0x0a, 0x73, 0x74, 0x61, 0x72, 0x74, 0x5f, 0x74, 0x69, 0x6d,
+ 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65,
+ 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74,
+ 0x61, 0x6d, 0x70, 0x52, 0x09, 0x73, 0x74, 0x61, 0x72, 0x74, 0x54, 0x69, 0x6d, 0x65, 0x12, 0x35,
+ 0x0a, 0x08, 0x65, 0x6e, 0x64, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b,
+ 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62,
+ 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x07, 0x65, 0x6e,
+ 0x64, 0x54, 0x69, 0x6d, 0x65, 0x2a, 0x77, 0x0a, 0x11, 0x46, 0x75, 0x6c, 0x66, 0x69, 0x6c, 0x6c,
+ 0x6d, 0x65, 0x6e, 0x74, 0x50, 0x65, 0x72, 0x69, 0x6f, 0x64, 0x12, 0x22, 0x0a, 0x1e, 0x46, 0x55,
+ 0x4c, 0x46, 0x49, 0x4c, 0x4c, 0x4d, 0x45, 0x4e, 0x54, 0x5f, 0x50, 0x45, 0x52, 0x49, 0x4f, 0x44,
+ 0x5f, 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, 0x49, 0x45, 0x44, 0x10, 0x00, 0x12, 0x1d,
+ 0x0a, 0x19, 0x46, 0x55, 0x4c, 0x46, 0x49, 0x4c, 0x4c, 0x4d, 0x45, 0x4e, 0x54, 0x5f, 0x50, 0x45,
+ 0x52, 0x49, 0x4f, 0x44, 0x5f, 0x4e, 0x4f, 0x52, 0x4d, 0x41, 0x4c, 0x10, 0x01, 0x12, 0x1f, 0x0a,
+ 0x1b, 0x46, 0x55, 0x4c, 0x46, 0x49, 0x4c, 0x4c, 0x4d, 0x45, 0x4e, 0x54, 0x5f, 0x50, 0x45, 0x52,
+ 0x49, 0x4f, 0x44, 0x5f, 0x45, 0x58, 0x54, 0x45, 0x4e, 0x44, 0x45, 0x44, 0x10, 0x02, 0x42, 0xfd,
+ 0x01, 0x0a, 0x24, 0x63, 0x6f, 0x6d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x70,
+ 0x61, 0x6e, 0x6e, 0x65, 0x72, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x69, 0x6e, 0x73, 0x74,
+ 0x61, 0x6e, 0x63, 0x65, 0x2e, 0x76, 0x31, 0x42, 0x0b, 0x43, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x50,
+ 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x46, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x67, 0x6f,
+ 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x67, 0x6f, 0x2f, 0x73, 0x70, 0x61, 0x6e,
+ 0x6e, 0x65, 0x72, 0x2f, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2f, 0x69, 0x6e, 0x73, 0x74, 0x61, 0x6e,
+ 0x63, 0x65, 0x2f, 0x61, 0x70, 0x69, 0x76, 0x31, 0x2f, 0x69, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63,
+ 0x65, 0x70, 0x62, 0x3b, 0x69, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x70, 0x62, 0xaa, 0x02,
+ 0x26, 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x43, 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x53, 0x70,
+ 0x61, 0x6e, 0x6e, 0x65, 0x72, 0x2e, 0x41, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x49, 0x6e, 0x73, 0x74,
+ 0x61, 0x6e, 0x63, 0x65, 0x2e, 0x56, 0x31, 0xca, 0x02, 0x26, 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65,
+ 0x5c, 0x43, 0x6c, 0x6f, 0x75, 0x64, 0x5c, 0x53, 0x70, 0x61, 0x6e, 0x6e, 0x65, 0x72, 0x5c, 0x41,
+ 0x64, 0x6d, 0x69, 0x6e, 0x5c, 0x49, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x5c, 0x56, 0x31,
+ 0xea, 0x02, 0x2b, 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x3a, 0x3a, 0x43, 0x6c, 0x6f, 0x75, 0x64,
+ 0x3a, 0x3a, 0x53, 0x70, 0x61, 0x6e, 0x6e, 0x65, 0x72, 0x3a, 0x3a, 0x41, 0x64, 0x6d, 0x69, 0x6e,
+ 0x3a, 0x3a, 0x49, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x3a, 0x3a, 0x56, 0x31, 0x62, 0x06,
+ 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33,
+}
+
+var (
+ file_google_spanner_admin_instance_v1_common_proto_rawDescOnce sync.Once
+ file_google_spanner_admin_instance_v1_common_proto_rawDescData = file_google_spanner_admin_instance_v1_common_proto_rawDesc
+)
+
+func file_google_spanner_admin_instance_v1_common_proto_rawDescGZIP() []byte {
+ file_google_spanner_admin_instance_v1_common_proto_rawDescOnce.Do(func() {
+ file_google_spanner_admin_instance_v1_common_proto_rawDescData = protoimpl.X.CompressGZIP(file_google_spanner_admin_instance_v1_common_proto_rawDescData)
+ })
+ return file_google_spanner_admin_instance_v1_common_proto_rawDescData
+}
+
+var file_google_spanner_admin_instance_v1_common_proto_enumTypes = make([]protoimpl.EnumInfo, 1)
+var file_google_spanner_admin_instance_v1_common_proto_msgTypes = make([]protoimpl.MessageInfo, 1)
+var file_google_spanner_admin_instance_v1_common_proto_goTypes = []any{
+ (FulfillmentPeriod)(0), // 0: google.spanner.admin.instance.v1.FulfillmentPeriod
+ (*OperationProgress)(nil), // 1: google.spanner.admin.instance.v1.OperationProgress
+ (*timestamppb.Timestamp)(nil), // 2: google.protobuf.Timestamp
+}
+var file_google_spanner_admin_instance_v1_common_proto_depIdxs = []int32{
+ 2, // 0: google.spanner.admin.instance.v1.OperationProgress.start_time:type_name -> google.protobuf.Timestamp
+ 2, // 1: google.spanner.admin.instance.v1.OperationProgress.end_time:type_name -> google.protobuf.Timestamp
+ 2, // [2:2] is the sub-list for method output_type
+ 2, // [2:2] is the sub-list for method input_type
+ 2, // [2:2] is the sub-list for extension type_name
+ 2, // [2:2] is the sub-list for extension extendee
+ 0, // [0:2] is the sub-list for field type_name
+}
+
+func init() { file_google_spanner_admin_instance_v1_common_proto_init() }
+func file_google_spanner_admin_instance_v1_common_proto_init() {
+ if File_google_spanner_admin_instance_v1_common_proto != nil {
+ return
+ }
+ if !protoimpl.UnsafeEnabled {
+ file_google_spanner_admin_instance_v1_common_proto_msgTypes[0].Exporter = func(v any, i int) any {
+ switch v := v.(*OperationProgress); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ }
+ type x struct{}
+ out := protoimpl.TypeBuilder{
+ File: protoimpl.DescBuilder{
+ GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
+ RawDescriptor: file_google_spanner_admin_instance_v1_common_proto_rawDesc,
+ NumEnums: 1,
+ NumMessages: 1,
+ NumExtensions: 0,
+ NumServices: 0,
+ },
+ GoTypes: file_google_spanner_admin_instance_v1_common_proto_goTypes,
+ DependencyIndexes: file_google_spanner_admin_instance_v1_common_proto_depIdxs,
+ EnumInfos: file_google_spanner_admin_instance_v1_common_proto_enumTypes,
+ MessageInfos: file_google_spanner_admin_instance_v1_common_proto_msgTypes,
+ }.Build()
+ File_google_spanner_admin_instance_v1_common_proto = out.File
+ file_google_spanner_admin_instance_v1_common_proto_rawDesc = nil
+ file_google_spanner_admin_instance_v1_common_proto_goTypes = nil
+ file_google_spanner_admin_instance_v1_common_proto_depIdxs = nil
+}
diff --git a/vendor/cloud.google.com/go/spanner/admin/instance/apiv1/instancepb/spanner_instance_admin.pb.go b/vendor/cloud.google.com/go/spanner/admin/instance/apiv1/instancepb/spanner_instance_admin.pb.go
new file mode 100644
index 000000000..10b80281b
--- /dev/null
+++ b/vendor/cloud.google.com/go/spanner/admin/instance/apiv1/instancepb/spanner_instance_admin.pb.go
@@ -0,0 +1,6928 @@
+// Copyright 2024 Google LLC
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Code generated by protoc-gen-go. DO NOT EDIT.
+// versions:
+// protoc-gen-go v1.34.2
+// protoc v4.25.3
+// source: google/spanner/admin/instance/v1/spanner_instance_admin.proto
+
+package instancepb
+
+import (
+ context "context"
+ reflect "reflect"
+ sync "sync"
+
+ iampb "cloud.google.com/go/iam/apiv1/iampb"
+ longrunningpb "cloud.google.com/go/longrunning/autogen/longrunningpb"
+ _ "google.golang.org/genproto/googleapis/api/annotations"
+ grpc "google.golang.org/grpc"
+ codes "google.golang.org/grpc/codes"
+ status "google.golang.org/grpc/status"
+ protoreflect "google.golang.org/protobuf/reflect/protoreflect"
+ protoimpl "google.golang.org/protobuf/runtime/protoimpl"
+ emptypb "google.golang.org/protobuf/types/known/emptypb"
+ fieldmaskpb "google.golang.org/protobuf/types/known/fieldmaskpb"
+ timestamppb "google.golang.org/protobuf/types/known/timestamppb"
+)
+
+const (
+ // Verify that this generated code is sufficiently up-to-date.
+ _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion)
+ // Verify that runtime/protoimpl is sufficiently up-to-date.
+ _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20)
+)
+
+// Indicates the type of replica. See the [replica types
+// documentation](https://cloud.google.com/spanner/docs/replication#replica_types)
+// for more details.
+type ReplicaInfo_ReplicaType int32
+
+const (
+ // Not specified.
+ ReplicaInfo_TYPE_UNSPECIFIED ReplicaInfo_ReplicaType = 0
+ // Read-write replicas support both reads and writes. These replicas:
+ //
+ // * Maintain a full copy of your data.
+ // * Serve reads.
+ // * Can vote whether to commit a write.
+ // * Participate in leadership election.
+ // * Are eligible to become a leader.
+ ReplicaInfo_READ_WRITE ReplicaInfo_ReplicaType = 1
+ // Read-only replicas only support reads (not writes). Read-only replicas:
+ //
+ // * Maintain a full copy of your data.
+ // * Serve reads.
+ // * Do not participate in voting to commit writes.
+ // * Are not eligible to become a leader.
+ ReplicaInfo_READ_ONLY ReplicaInfo_ReplicaType = 2
+ // Witness replicas don't support reads but do participate in voting to
+ // commit writes. Witness replicas:
+ //
+ // * Do not maintain a full copy of data.
+ // * Do not serve reads.
+ // * Vote whether to commit writes.
+ // * Participate in leader election but are not eligible to become leader.
+ ReplicaInfo_WITNESS ReplicaInfo_ReplicaType = 3
+)
+
+// Enum value maps for ReplicaInfo_ReplicaType.
+var (
+ ReplicaInfo_ReplicaType_name = map[int32]string{
+ 0: "TYPE_UNSPECIFIED",
+ 1: "READ_WRITE",
+ 2: "READ_ONLY",
+ 3: "WITNESS",
+ }
+ ReplicaInfo_ReplicaType_value = map[string]int32{
+ "TYPE_UNSPECIFIED": 0,
+ "READ_WRITE": 1,
+ "READ_ONLY": 2,
+ "WITNESS": 3,
+ }
+)
+
+func (x ReplicaInfo_ReplicaType) Enum() *ReplicaInfo_ReplicaType {
+ p := new(ReplicaInfo_ReplicaType)
+ *p = x
+ return p
+}
+
+func (x ReplicaInfo_ReplicaType) String() string {
+ return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x))
+}
+
+func (ReplicaInfo_ReplicaType) Descriptor() protoreflect.EnumDescriptor {
+ return file_google_spanner_admin_instance_v1_spanner_instance_admin_proto_enumTypes[0].Descriptor()
+}
+
+func (ReplicaInfo_ReplicaType) Type() protoreflect.EnumType {
+ return &file_google_spanner_admin_instance_v1_spanner_instance_admin_proto_enumTypes[0]
+}
+
+func (x ReplicaInfo_ReplicaType) Number() protoreflect.EnumNumber {
+ return protoreflect.EnumNumber(x)
+}
+
+// Deprecated: Use ReplicaInfo_ReplicaType.Descriptor instead.
+func (ReplicaInfo_ReplicaType) EnumDescriptor() ([]byte, []int) {
+ return file_google_spanner_admin_instance_v1_spanner_instance_admin_proto_rawDescGZIP(), []int{0, 0}
+}
+
+// The type of this configuration.
+type InstanceConfig_Type int32
+
+const (
+ // Unspecified.
+ InstanceConfig_TYPE_UNSPECIFIED InstanceConfig_Type = 0
+ // Google managed configuration.
+ InstanceConfig_GOOGLE_MANAGED InstanceConfig_Type = 1
+ // User managed configuration.
+ InstanceConfig_USER_MANAGED InstanceConfig_Type = 2
+)
+
+// Enum value maps for InstanceConfig_Type.
+var (
+ InstanceConfig_Type_name = map[int32]string{
+ 0: "TYPE_UNSPECIFIED",
+ 1: "GOOGLE_MANAGED",
+ 2: "USER_MANAGED",
+ }
+ InstanceConfig_Type_value = map[string]int32{
+ "TYPE_UNSPECIFIED": 0,
+ "GOOGLE_MANAGED": 1,
+ "USER_MANAGED": 2,
+ }
+)
+
+func (x InstanceConfig_Type) Enum() *InstanceConfig_Type {
+ p := new(InstanceConfig_Type)
+ *p = x
+ return p
+}
+
+func (x InstanceConfig_Type) String() string {
+ return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x))
+}
+
+func (InstanceConfig_Type) Descriptor() protoreflect.EnumDescriptor {
+ return file_google_spanner_admin_instance_v1_spanner_instance_admin_proto_enumTypes[1].Descriptor()
+}
+
+func (InstanceConfig_Type) Type() protoreflect.EnumType {
+ return &file_google_spanner_admin_instance_v1_spanner_instance_admin_proto_enumTypes[1]
+}
+
+func (x InstanceConfig_Type) Number() protoreflect.EnumNumber {
+ return protoreflect.EnumNumber(x)
+}
+
+// Deprecated: Use InstanceConfig_Type.Descriptor instead.
+func (InstanceConfig_Type) EnumDescriptor() ([]byte, []int) {
+ return file_google_spanner_admin_instance_v1_spanner_instance_admin_proto_rawDescGZIP(), []int{1, 0}
+}
+
+// Indicates the current state of the instance configuration.
+type InstanceConfig_State int32
+
+const (
+ // Not specified.
+ InstanceConfig_STATE_UNSPECIFIED InstanceConfig_State = 0
+ // The instance configuration is still being created.
+ InstanceConfig_CREATING InstanceConfig_State = 1
+ // The instance configuration is fully created and ready to be used to
+ // create instances.
+ InstanceConfig_READY InstanceConfig_State = 2
+)
+
+// Enum value maps for InstanceConfig_State.
+var (
+ InstanceConfig_State_name = map[int32]string{
+ 0: "STATE_UNSPECIFIED",
+ 1: "CREATING",
+ 2: "READY",
+ }
+ InstanceConfig_State_value = map[string]int32{
+ "STATE_UNSPECIFIED": 0,
+ "CREATING": 1,
+ "READY": 2,
+ }
+)
+
+func (x InstanceConfig_State) Enum() *InstanceConfig_State {
+ p := new(InstanceConfig_State)
+ *p = x
+ return p
+}
+
+func (x InstanceConfig_State) String() string {
+ return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x))
+}
+
+func (InstanceConfig_State) Descriptor() protoreflect.EnumDescriptor {
+ return file_google_spanner_admin_instance_v1_spanner_instance_admin_proto_enumTypes[2].Descriptor()
+}
+
+func (InstanceConfig_State) Type() protoreflect.EnumType {
+ return &file_google_spanner_admin_instance_v1_spanner_instance_admin_proto_enumTypes[2]
+}
+
+func (x InstanceConfig_State) Number() protoreflect.EnumNumber {
+ return protoreflect.EnumNumber(x)
+}
+
+// Deprecated: Use InstanceConfig_State.Descriptor instead.
+func (InstanceConfig_State) EnumDescriptor() ([]byte, []int) {
+ return file_google_spanner_admin_instance_v1_spanner_instance_admin_proto_rawDescGZIP(), []int{1, 1}
+}
+
+// Indicates the current state of the instance.
+type Instance_State int32
+
+const (
+ // Not specified.
+ Instance_STATE_UNSPECIFIED Instance_State = 0
+ // The instance is still being created. Resources may not be
+ // available yet, and operations such as database creation may not
+ // work.
+ Instance_CREATING Instance_State = 1
+ // The instance is fully created and ready to do work such as
+ // creating databases.
+ Instance_READY Instance_State = 2
+)
+
+// Enum value maps for Instance_State.
+var (
+ Instance_State_name = map[int32]string{
+ 0: "STATE_UNSPECIFIED",
+ 1: "CREATING",
+ 2: "READY",
+ }
+ Instance_State_value = map[string]int32{
+ "STATE_UNSPECIFIED": 0,
+ "CREATING": 1,
+ "READY": 2,
+ }
+)
+
+func (x Instance_State) Enum() *Instance_State {
+ p := new(Instance_State)
+ *p = x
+ return p
+}
+
+func (x Instance_State) String() string {
+ return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x))
+}
+
+func (Instance_State) Descriptor() protoreflect.EnumDescriptor {
+ return file_google_spanner_admin_instance_v1_spanner_instance_admin_proto_enumTypes[3].Descriptor()
+}
+
+func (Instance_State) Type() protoreflect.EnumType {
+ return &file_google_spanner_admin_instance_v1_spanner_instance_admin_proto_enumTypes[3]
+}
+
+func (x Instance_State) Number() protoreflect.EnumNumber {
+ return protoreflect.EnumNumber(x)
+}
+
+// Deprecated: Use Instance_State.Descriptor instead.
+func (Instance_State) EnumDescriptor() ([]byte, []int) {
+ return file_google_spanner_admin_instance_v1_spanner_instance_admin_proto_rawDescGZIP(), []int{3, 0}
+}
+
+// The edition selected for this instance. Different editions provide
+// different capabilities at different price points.
+type Instance_Edition int32
+
+const (
+ // Edition not specified.
+ Instance_EDITION_UNSPECIFIED Instance_Edition = 0
+ // Standard edition.
+ Instance_STANDARD Instance_Edition = 1
+ // Enterprise edition.
+ Instance_ENTERPRISE Instance_Edition = 2
+ // Enterprise Plus edition.
+ Instance_ENTERPRISE_PLUS Instance_Edition = 3
+)
+
+// Enum value maps for Instance_Edition.
+var (
+ Instance_Edition_name = map[int32]string{
+ 0: "EDITION_UNSPECIFIED",
+ 1: "STANDARD",
+ 2: "ENTERPRISE",
+ 3: "ENTERPRISE_PLUS",
+ }
+ Instance_Edition_value = map[string]int32{
+ "EDITION_UNSPECIFIED": 0,
+ "STANDARD": 1,
+ "ENTERPRISE": 2,
+ "ENTERPRISE_PLUS": 3,
+ }
+)
+
+func (x Instance_Edition) Enum() *Instance_Edition {
+ p := new(Instance_Edition)
+ *p = x
+ return p
+}
+
+func (x Instance_Edition) String() string {
+ return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x))
+}
+
+func (Instance_Edition) Descriptor() protoreflect.EnumDescriptor {
+ return file_google_spanner_admin_instance_v1_spanner_instance_admin_proto_enumTypes[4].Descriptor()
+}
+
+func (Instance_Edition) Type() protoreflect.EnumType {
+ return &file_google_spanner_admin_instance_v1_spanner_instance_admin_proto_enumTypes[4]
+}
+
+func (x Instance_Edition) Number() protoreflect.EnumNumber {
+ return protoreflect.EnumNumber(x)
+}
+
+// Deprecated: Use Instance_Edition.Descriptor instead.
+func (Instance_Edition) EnumDescriptor() ([]byte, []int) {
+ return file_google_spanner_admin_instance_v1_spanner_instance_admin_proto_rawDescGZIP(), []int{3, 1}
+}
+
+// Indicates the current state of the instance partition.
+type InstancePartition_State int32
+
+const (
+ // Not specified.
+ InstancePartition_STATE_UNSPECIFIED InstancePartition_State = 0
+ // The instance partition is still being created. Resources may not be
+ // available yet, and operations such as creating placements using this
+ // instance partition may not work.
+ InstancePartition_CREATING InstancePartition_State = 1
+ // The instance partition is fully created and ready to do work such as
+ // creating placements and using in databases.
+ InstancePartition_READY InstancePartition_State = 2
+)
+
+// Enum value maps for InstancePartition_State.
+var (
+ InstancePartition_State_name = map[int32]string{
+ 0: "STATE_UNSPECIFIED",
+ 1: "CREATING",
+ 2: "READY",
+ }
+ InstancePartition_State_value = map[string]int32{
+ "STATE_UNSPECIFIED": 0,
+ "CREATING": 1,
+ "READY": 2,
+ }
+)
+
+func (x InstancePartition_State) Enum() *InstancePartition_State {
+ p := new(InstancePartition_State)
+ *p = x
+ return p
+}
+
+func (x InstancePartition_State) String() string {
+ return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x))
+}
+
+func (InstancePartition_State) Descriptor() protoreflect.EnumDescriptor {
+ return file_google_spanner_admin_instance_v1_spanner_instance_admin_proto_enumTypes[5].Descriptor()
+}
+
+func (InstancePartition_State) Type() protoreflect.EnumType {
+ return &file_google_spanner_admin_instance_v1_spanner_instance_admin_proto_enumTypes[5]
+}
+
+func (x InstancePartition_State) Number() protoreflect.EnumNumber {
+ return protoreflect.EnumNumber(x)
+}
+
+// Deprecated: Use InstancePartition_State.Descriptor instead.
+func (InstancePartition_State) EnumDescriptor() ([]byte, []int) {
+ return file_google_spanner_admin_instance_v1_spanner_instance_admin_proto_rawDescGZIP(), []int{22, 0}
+}
+
+type ReplicaInfo struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // The location of the serving resources, e.g. "us-central1".
+ Location string `protobuf:"bytes,1,opt,name=location,proto3" json:"location,omitempty"`
+ // The type of replica.
+ Type ReplicaInfo_ReplicaType `protobuf:"varint,2,opt,name=type,proto3,enum=google.spanner.admin.instance.v1.ReplicaInfo_ReplicaType" json:"type,omitempty"`
+ // If true, this location is designated as the default leader location where
+ // leader replicas are placed. See the [region types
+ // documentation](https://cloud.google.com/spanner/docs/instances#region_types)
+ // for more details.
+ DefaultLeaderLocation bool `protobuf:"varint,3,opt,name=default_leader_location,json=defaultLeaderLocation,proto3" json:"default_leader_location,omitempty"`
+}
+
+func (x *ReplicaInfo) Reset() {
+ *x = ReplicaInfo{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_google_spanner_admin_instance_v1_spanner_instance_admin_proto_msgTypes[0]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *ReplicaInfo) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*ReplicaInfo) ProtoMessage() {}
+
+func (x *ReplicaInfo) ProtoReflect() protoreflect.Message {
+ mi := &file_google_spanner_admin_instance_v1_spanner_instance_admin_proto_msgTypes[0]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use ReplicaInfo.ProtoReflect.Descriptor instead.
+func (*ReplicaInfo) Descriptor() ([]byte, []int) {
+ return file_google_spanner_admin_instance_v1_spanner_instance_admin_proto_rawDescGZIP(), []int{0}
+}
+
+func (x *ReplicaInfo) GetLocation() string {
+ if x != nil {
+ return x.Location
+ }
+ return ""
+}
+
+func (x *ReplicaInfo) GetType() ReplicaInfo_ReplicaType {
+ if x != nil {
+ return x.Type
+ }
+ return ReplicaInfo_TYPE_UNSPECIFIED
+}
+
+func (x *ReplicaInfo) GetDefaultLeaderLocation() bool {
+ if x != nil {
+ return x.DefaultLeaderLocation
+ }
+ return false
+}
+
+// A possible configuration for a Cloud Spanner instance. Configurations
+// define the geographic placement of nodes and their replication.
+type InstanceConfig struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // A unique identifier for the instance configuration. Values
+ // are of the form
+ // `projects/<project>/instanceConfigs/[a-z][-a-z0-9]*`.
+ //
+ // User instance configuration must start with `custom-`.
+ Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"`
+ // The name of this instance configuration as it appears in UIs.
+ DisplayName string `protobuf:"bytes,2,opt,name=display_name,json=displayName,proto3" json:"display_name,omitempty"`
+ // Output only. Whether this instance configuration is a Google-managed or
+ // user-managed configuration.
+ ConfigType InstanceConfig_Type `protobuf:"varint,5,opt,name=config_type,json=configType,proto3,enum=google.spanner.admin.instance.v1.InstanceConfig_Type" json:"config_type,omitempty"`
+ // The geographic placement of nodes in this instance configuration and their
+ // replication properties.
+ Replicas []*ReplicaInfo `protobuf:"bytes,3,rep,name=replicas,proto3" json:"replicas,omitempty"`
+ // Output only. The available optional replicas to choose from for user
+ // managed configurations. Populated for Google managed configurations.
+ OptionalReplicas []*ReplicaInfo `protobuf:"bytes,6,rep,name=optional_replicas,json=optionalReplicas,proto3" json:"optional_replicas,omitempty"`
+ // Base configuration name, e.g. projects/<project_name>/instanceConfigs/nam3,
+ // based on which this configuration is created. Only set for user managed
+ // configurations. `base_config` must refer to a configuration of type
+ // GOOGLE_MANAGED in the same project as this configuration.
+ BaseConfig string `protobuf:"bytes,7,opt,name=base_config,json=baseConfig,proto3" json:"base_config,omitempty"`
+ // Cloud Labels are a flexible and lightweight mechanism for organizing cloud
+ // resources into groups that reflect a customer's organizational needs and
+ // deployment strategies. Cloud Labels can be used to filter collections of
+ // resources. They can be used to control how resource metrics are aggregated.
+ // And they can be used as arguments to policy management rules (e.g. route,
+ // firewall, load balancing, etc.).
+ //
+ // - Label keys must be between 1 and 63 characters long and must conform to
+ // the following regular expression: `[a-z][a-z0-9_-]{0,62}`.
+ // - Label values must be between 0 and 63 characters long and must conform
+ // to the regular expression `[a-z0-9_-]{0,63}`.
+ // - No more than 64 labels can be associated with a given resource.
+ //
+ // See https://goo.gl/xmQnxf for more information on and examples of labels.
+ //
+ // If you plan to use labels in your own code, please note that additional
+ // characters may be allowed in the future. Therefore, you are advised to use
+ // an internal label representation, such as JSON, which doesn't rely upon
+ // specific characters being disallowed. For example, representing labels
+ // as the string: name + "_" + value would prove problematic if we were to
+ // allow "_" in a future release.
+ Labels map[string]string `protobuf:"bytes,8,rep,name=labels,proto3" json:"labels,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"`
+ // etag is used for optimistic concurrency control as a way
+ // to help prevent simultaneous updates of a instance configuration from
+ // overwriting each other. It is strongly suggested that systems make use of
+ // the etag in the read-modify-write cycle to perform instance configuration
+ // updates in order to avoid race conditions: An etag is returned in the
+ // response which contains instance configurations, and systems are expected
+ // to put that etag in the request to update instance configuration to ensure
+ // that their change is applied to the same version of the instance
+ // configuration. If no etag is provided in the call to update the instance
+ // configuration, then the existing instance configuration is overwritten
+ // blindly.
+ Etag string `protobuf:"bytes,9,opt,name=etag,proto3" json:"etag,omitempty"`
+ // Allowed values of the "default_leader" schema option for databases in
+ // instances that use this instance configuration.
+ LeaderOptions []string `protobuf:"bytes,4,rep,name=leader_options,json=leaderOptions,proto3" json:"leader_options,omitempty"`
+ // Output only. If true, the instance configuration is being created or
+ // updated. If false, there are no ongoing operations for the instance
+ // configuration.
+ Reconciling bool `protobuf:"varint,10,opt,name=reconciling,proto3" json:"reconciling,omitempty"`
+ // Output only. The current instance configuration state. Applicable only for
+ // `USER_MANAGED` configurations.
+ State InstanceConfig_State `protobuf:"varint,11,opt,name=state,proto3,enum=google.spanner.admin.instance.v1.InstanceConfig_State" json:"state,omitempty"`
+}
+
+func (x *InstanceConfig) Reset() {
+ *x = InstanceConfig{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_google_spanner_admin_instance_v1_spanner_instance_admin_proto_msgTypes[1]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *InstanceConfig) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*InstanceConfig) ProtoMessage() {}
+
+func (x *InstanceConfig) ProtoReflect() protoreflect.Message {
+ mi := &file_google_spanner_admin_instance_v1_spanner_instance_admin_proto_msgTypes[1]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use InstanceConfig.ProtoReflect.Descriptor instead.
+func (*InstanceConfig) Descriptor() ([]byte, []int) {
+ return file_google_spanner_admin_instance_v1_spanner_instance_admin_proto_rawDescGZIP(), []int{1}
+}
+
+func (x *InstanceConfig) GetName() string {
+ if x != nil {
+ return x.Name
+ }
+ return ""
+}
+
+func (x *InstanceConfig) GetDisplayName() string {
+ if x != nil {
+ return x.DisplayName
+ }
+ return ""
+}
+
+func (x *InstanceConfig) GetConfigType() InstanceConfig_Type {
+ if x != nil {
+ return x.ConfigType
+ }
+ return InstanceConfig_TYPE_UNSPECIFIED
+}
+
+func (x *InstanceConfig) GetReplicas() []*ReplicaInfo {
+ if x != nil {
+ return x.Replicas
+ }
+ return nil
+}
+
+func (x *InstanceConfig) GetOptionalReplicas() []*ReplicaInfo {
+ if x != nil {
+ return x.OptionalReplicas
+ }
+ return nil
+}
+
+func (x *InstanceConfig) GetBaseConfig() string {
+ if x != nil {
+ return x.BaseConfig
+ }
+ return ""
+}
+
+func (x *InstanceConfig) GetLabels() map[string]string {
+ if x != nil {
+ return x.Labels
+ }
+ return nil
+}
+
+func (x *InstanceConfig) GetEtag() string {
+ if x != nil {
+ return x.Etag
+ }
+ return ""
+}
+
+func (x *InstanceConfig) GetLeaderOptions() []string {
+ if x != nil {
+ return x.LeaderOptions
+ }
+ return nil
+}
+
+func (x *InstanceConfig) GetReconciling() bool {
+ if x != nil {
+ return x.Reconciling
+ }
+ return false
+}
+
+func (x *InstanceConfig) GetState() InstanceConfig_State {
+ if x != nil {
+ return x.State
+ }
+ return InstanceConfig_STATE_UNSPECIFIED
+}
+
+// Autoscaling configuration for an instance.
+type AutoscalingConfig struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // Required. Autoscaling limits for an instance.
+ AutoscalingLimits *AutoscalingConfig_AutoscalingLimits `protobuf:"bytes,1,opt,name=autoscaling_limits,json=autoscalingLimits,proto3" json:"autoscaling_limits,omitempty"`
+ // Required. The autoscaling targets for an instance.
+ AutoscalingTargets *AutoscalingConfig_AutoscalingTargets `protobuf:"bytes,2,opt,name=autoscaling_targets,json=autoscalingTargets,proto3" json:"autoscaling_targets,omitempty"`
+}
+
+func (x *AutoscalingConfig) Reset() {
+ *x = AutoscalingConfig{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_google_spanner_admin_instance_v1_spanner_instance_admin_proto_msgTypes[2]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *AutoscalingConfig) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*AutoscalingConfig) ProtoMessage() {}
+
+func (x *AutoscalingConfig) ProtoReflect() protoreflect.Message {
+ mi := &file_google_spanner_admin_instance_v1_spanner_instance_admin_proto_msgTypes[2]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use AutoscalingConfig.ProtoReflect.Descriptor instead.
+func (*AutoscalingConfig) Descriptor() ([]byte, []int) {
+ return file_google_spanner_admin_instance_v1_spanner_instance_admin_proto_rawDescGZIP(), []int{2}
+}
+
+func (x *AutoscalingConfig) GetAutoscalingLimits() *AutoscalingConfig_AutoscalingLimits {
+ if x != nil {
+ return x.AutoscalingLimits
+ }
+ return nil
+}
+
+func (x *AutoscalingConfig) GetAutoscalingTargets() *AutoscalingConfig_AutoscalingTargets {
+ if x != nil {
+ return x.AutoscalingTargets
+ }
+ return nil
+}
+
+// An isolated set of Cloud Spanner resources on which databases can be hosted.
+type Instance struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // Required. A unique identifier for the instance, which cannot be changed
+ // after the instance is created. Values are of the form
+ // `projects/<project>/instances/[a-z][-a-z0-9]*[a-z0-9]`. The final
+ // segment of the name must be between 2 and 64 characters in length.
+ Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"`
+ // Required. The name of the instance's configuration. Values are of the form
+ // `projects/<project>/instanceConfigs/<configuration>`. See
+ // also [InstanceConfig][google.spanner.admin.instance.v1.InstanceConfig] and
+ // [ListInstanceConfigs][google.spanner.admin.instance.v1.InstanceAdmin.ListInstanceConfigs].
+ Config string `protobuf:"bytes,2,opt,name=config,proto3" json:"config,omitempty"`
+ // Required. The descriptive name for this instance as it appears in UIs.
+ // Must be unique per project and between 4 and 30 characters in length.
+ DisplayName string `protobuf:"bytes,3,opt,name=display_name,json=displayName,proto3" json:"display_name,omitempty"`
+ // The number of nodes allocated to this instance. At most one of either
+ // node_count or processing_units should be present in the message.
+ //
+ // Users can set the node_count field to specify the target number of nodes
+ // allocated to the instance.
+ //
+ // This may be zero in API responses for instances that are not yet in state
+ // `READY`.
+ //
+ // See [the
+ // documentation](https://cloud.google.com/spanner/docs/compute-capacity)
+ // for more information about nodes and processing units.
+ NodeCount int32 `protobuf:"varint,5,opt,name=node_count,json=nodeCount,proto3" json:"node_count,omitempty"`
+ // The number of processing units allocated to this instance. At most one of
+ // processing_units or node_count should be present in the message.
+ //
+ // Users can set the processing_units field to specify the target number of
+ // processing units allocated to the instance.
+ //
+ // This may be zero in API responses for instances that are not yet in state
+ // `READY`.
+ //
+ // See [the
+ // documentation](https://cloud.google.com/spanner/docs/compute-capacity)
+ // for more information about nodes and processing units.
+ ProcessingUnits int32 `protobuf:"varint,9,opt,name=processing_units,json=processingUnits,proto3" json:"processing_units,omitempty"`
+ // Optional. The autoscaling configuration. Autoscaling is enabled if this
+ // field is set. When autoscaling is enabled, node_count and processing_units
+ // are treated as OUTPUT_ONLY fields and reflect the current compute capacity
+ // allocated to the instance.
+ AutoscalingConfig *AutoscalingConfig `protobuf:"bytes,17,opt,name=autoscaling_config,json=autoscalingConfig,proto3" json:"autoscaling_config,omitempty"`
+ // Output only. The current instance state. For
+ // [CreateInstance][google.spanner.admin.instance.v1.InstanceAdmin.CreateInstance],
+ // the state must be either omitted or set to `CREATING`. For
+ // [UpdateInstance][google.spanner.admin.instance.v1.InstanceAdmin.UpdateInstance],
+ // the state must be either omitted or set to `READY`.
+ State Instance_State `protobuf:"varint,6,opt,name=state,proto3,enum=google.spanner.admin.instance.v1.Instance_State" json:"state,omitempty"`
+ // Cloud Labels are a flexible and lightweight mechanism for organizing cloud
+ // resources into groups that reflect a customer's organizational needs and
+ // deployment strategies. Cloud Labels can be used to filter collections of
+ // resources. They can be used to control how resource metrics are aggregated.
+ // And they can be used as arguments to policy management rules (e.g. route,
+ // firewall, load balancing, etc.).
+ //
+ // - Label keys must be between 1 and 63 characters long and must conform to
+ // the following regular expression: `[a-z][a-z0-9_-]{0,62}`.
+ // - Label values must be between 0 and 63 characters long and must conform
+ // to the regular expression `[a-z0-9_-]{0,63}`.
+ // - No more than 64 labels can be associated with a given resource.
+ //
+ // See https://goo.gl/xmQnxf for more information on and examples of labels.
+ //
+ // If you plan to use labels in your own code, please note that additional
+ // characters may be allowed in the future. And so you are advised to use an
+ // internal label representation, such as JSON, which doesn't rely upon
+ // specific characters being disallowed. For example, representing labels
+ // as the string: name + "_" + value would prove problematic if we were to
+ // allow "_" in a future release.
+ Labels map[string]string `protobuf:"bytes,7,rep,name=labels,proto3" json:"labels,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"`
+ // Deprecated. This field is not populated.
+ EndpointUris []string `protobuf:"bytes,8,rep,name=endpoint_uris,json=endpointUris,proto3" json:"endpoint_uris,omitempty"`
+ // Output only. The time at which the instance was created.
+ CreateTime *timestamppb.Timestamp `protobuf:"bytes,11,opt,name=create_time,json=createTime,proto3" json:"create_time,omitempty"`
+ // Output only. The time at which the instance was most recently updated.
+ UpdateTime *timestamppb.Timestamp `protobuf:"bytes,12,opt,name=update_time,json=updateTime,proto3" json:"update_time,omitempty"`
+ // Optional. The `Edition` of the current instance.
+ Edition Instance_Edition `protobuf:"varint,20,opt,name=edition,proto3,enum=google.spanner.admin.instance.v1.Instance_Edition" json:"edition,omitempty"`
+}
+
+func (x *Instance) Reset() {
+ *x = Instance{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_google_spanner_admin_instance_v1_spanner_instance_admin_proto_msgTypes[3]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *Instance) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*Instance) ProtoMessage() {}
+
+func (x *Instance) ProtoReflect() protoreflect.Message {
+ mi := &file_google_spanner_admin_instance_v1_spanner_instance_admin_proto_msgTypes[3]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use Instance.ProtoReflect.Descriptor instead.
+func (*Instance) Descriptor() ([]byte, []int) {
+ return file_google_spanner_admin_instance_v1_spanner_instance_admin_proto_rawDescGZIP(), []int{3}
+}
+
+func (x *Instance) GetName() string {
+ if x != nil {
+ return x.Name
+ }
+ return ""
+}
+
+func (x *Instance) GetConfig() string {
+ if x != nil {
+ return x.Config
+ }
+ return ""
+}
+
+func (x *Instance) GetDisplayName() string {
+ if x != nil {
+ return x.DisplayName
+ }
+ return ""
+}
+
+func (x *Instance) GetNodeCount() int32 {
+ if x != nil {
+ return x.NodeCount
+ }
+ return 0
+}
+
+func (x *Instance) GetProcessingUnits() int32 {
+ if x != nil {
+ return x.ProcessingUnits
+ }
+ return 0
+}
+
+func (x *Instance) GetAutoscalingConfig() *AutoscalingConfig {
+ if x != nil {
+ return x.AutoscalingConfig
+ }
+ return nil
+}
+
+func (x *Instance) GetState() Instance_State {
+ if x != nil {
+ return x.State
+ }
+ return Instance_STATE_UNSPECIFIED
+}
+
+func (x *Instance) GetLabels() map[string]string {
+ if x != nil {
+ return x.Labels
+ }
+ return nil
+}
+
+func (x *Instance) GetEndpointUris() []string {
+ if x != nil {
+ return x.EndpointUris
+ }
+ return nil
+}
+
+func (x *Instance) GetCreateTime() *timestamppb.Timestamp {
+ if x != nil {
+ return x.CreateTime
+ }
+ return nil
+}
+
+func (x *Instance) GetUpdateTime() *timestamppb.Timestamp {
+ if x != nil {
+ return x.UpdateTime
+ }
+ return nil
+}
+
+func (x *Instance) GetEdition() Instance_Edition {
+ if x != nil {
+ return x.Edition
+ }
+ return Instance_EDITION_UNSPECIFIED
+}
+
+// The request for
+// [ListInstanceConfigs][google.spanner.admin.instance.v1.InstanceAdmin.ListInstanceConfigs].
+type ListInstanceConfigsRequest struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // Required. The name of the project for which a list of supported instance
+ // configurations is requested. Values are of the form
+ // `projects/<project>`.
+ Parent string `protobuf:"bytes,1,opt,name=parent,proto3" json:"parent,omitempty"`
+ // Number of instance configurations to be returned in the response. If 0 or
+ // less, defaults to the server's maximum allowed page size.
+ PageSize int32 `protobuf:"varint,2,opt,name=page_size,json=pageSize,proto3" json:"page_size,omitempty"`
+ // If non-empty, `page_token` should contain a
+ // [next_page_token][google.spanner.admin.instance.v1.ListInstanceConfigsResponse.next_page_token]
+ // from a previous
+ // [ListInstanceConfigsResponse][google.spanner.admin.instance.v1.ListInstanceConfigsResponse].
+ PageToken string `protobuf:"bytes,3,opt,name=page_token,json=pageToken,proto3" json:"page_token,omitempty"`
+}
+
+func (x *ListInstanceConfigsRequest) Reset() {
+ *x = ListInstanceConfigsRequest{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_google_spanner_admin_instance_v1_spanner_instance_admin_proto_msgTypes[4]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *ListInstanceConfigsRequest) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*ListInstanceConfigsRequest) ProtoMessage() {}
+
+func (x *ListInstanceConfigsRequest) ProtoReflect() protoreflect.Message {
+ mi := &file_google_spanner_admin_instance_v1_spanner_instance_admin_proto_msgTypes[4]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use ListInstanceConfigsRequest.ProtoReflect.Descriptor instead.
+func (*ListInstanceConfigsRequest) Descriptor() ([]byte, []int) {
+ return file_google_spanner_admin_instance_v1_spanner_instance_admin_proto_rawDescGZIP(), []int{4}
+}
+
+func (x *ListInstanceConfigsRequest) GetParent() string {
+ if x != nil {
+ return x.Parent
+ }
+ return ""
+}
+
+func (x *ListInstanceConfigsRequest) GetPageSize() int32 {
+ if x != nil {
+ return x.PageSize
+ }
+ return 0
+}
+
+func (x *ListInstanceConfigsRequest) GetPageToken() string {
+ if x != nil {
+ return x.PageToken
+ }
+ return ""
+}
+
+// The response for
+// [ListInstanceConfigs][google.spanner.admin.instance.v1.InstanceAdmin.ListInstanceConfigs].
+type ListInstanceConfigsResponse struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // The list of requested instance configurations.
+ InstanceConfigs []*InstanceConfig `protobuf:"bytes,1,rep,name=instance_configs,json=instanceConfigs,proto3" json:"instance_configs,omitempty"`
+ // `next_page_token` can be sent in a subsequent
+ // [ListInstanceConfigs][google.spanner.admin.instance.v1.InstanceAdmin.ListInstanceConfigs]
+ // call to fetch more of the matching instance configurations.
+ NextPageToken string `protobuf:"bytes,2,opt,name=next_page_token,json=nextPageToken,proto3" json:"next_page_token,omitempty"`
+}
+
+func (x *ListInstanceConfigsResponse) Reset() {
+ *x = ListInstanceConfigsResponse{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_google_spanner_admin_instance_v1_spanner_instance_admin_proto_msgTypes[5]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *ListInstanceConfigsResponse) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*ListInstanceConfigsResponse) ProtoMessage() {}
+
+func (x *ListInstanceConfigsResponse) ProtoReflect() protoreflect.Message {
+ mi := &file_google_spanner_admin_instance_v1_spanner_instance_admin_proto_msgTypes[5]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use ListInstanceConfigsResponse.ProtoReflect.Descriptor instead.
+func (*ListInstanceConfigsResponse) Descriptor() ([]byte, []int) {
+ return file_google_spanner_admin_instance_v1_spanner_instance_admin_proto_rawDescGZIP(), []int{5}
+}
+
+func (x *ListInstanceConfigsResponse) GetInstanceConfigs() []*InstanceConfig {
+ if x != nil {
+ return x.InstanceConfigs
+ }
+ return nil
+}
+
+func (x *ListInstanceConfigsResponse) GetNextPageToken() string {
+ if x != nil {
+ return x.NextPageToken
+ }
+ return ""
+}
+
+// The request for
+// [GetInstanceConfigRequest][google.spanner.admin.instance.v1.InstanceAdmin.GetInstanceConfig].
+type GetInstanceConfigRequest struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // Required. The name of the requested instance configuration. Values are of
+ // the form `projects/<project>/instanceConfigs/<config>`.
+ Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"`
+}
+
+func (x *GetInstanceConfigRequest) Reset() {
+ *x = GetInstanceConfigRequest{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_google_spanner_admin_instance_v1_spanner_instance_admin_proto_msgTypes[6]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *GetInstanceConfigRequest) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*GetInstanceConfigRequest) ProtoMessage() {}
+
+func (x *GetInstanceConfigRequest) ProtoReflect() protoreflect.Message {
+ mi := &file_google_spanner_admin_instance_v1_spanner_instance_admin_proto_msgTypes[6]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use GetInstanceConfigRequest.ProtoReflect.Descriptor instead.
+func (*GetInstanceConfigRequest) Descriptor() ([]byte, []int) {
+ return file_google_spanner_admin_instance_v1_spanner_instance_admin_proto_rawDescGZIP(), []int{6}
+}
+
+func (x *GetInstanceConfigRequest) GetName() string {
+ if x != nil {
+ return x.Name
+ }
+ return ""
+}
+
+// The request for
+// [CreateInstanceConfigRequest][InstanceAdmin.CreateInstanceConfigRequest].
+type CreateInstanceConfigRequest struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // Required. The name of the project in which to create the instance
+ // configuration. Values are of the form `projects/<project>`.
+ Parent string `protobuf:"bytes,1,opt,name=parent,proto3" json:"parent,omitempty"`
+ // Required. The ID of the instance configuration to create. Valid identifiers
+ // are of the form `custom-[-a-z0-9]*[a-z0-9]` and must be between 2 and 64
+ // characters in length. The `custom-` prefix is required to avoid name
+ // conflicts with Google-managed configurations.
+ InstanceConfigId string `protobuf:"bytes,2,opt,name=instance_config_id,json=instanceConfigId,proto3" json:"instance_config_id,omitempty"`
+ // Required. The InstanceConfig proto of the configuration to create.
+ // instance_config.name must be
+ // `<parent>/instanceConfigs/<instance_config_id>`.
+ // instance_config.base_config must be a Google managed configuration name,
+ // e.g. <parent>/instanceConfigs/us-east1, <parent>/instanceConfigs/nam3.
+ InstanceConfig *InstanceConfig `protobuf:"bytes,3,opt,name=instance_config,json=instanceConfig,proto3" json:"instance_config,omitempty"`
+ // An option to validate, but not actually execute, a request,
+ // and provide the same response.
+ ValidateOnly bool `protobuf:"varint,4,opt,name=validate_only,json=validateOnly,proto3" json:"validate_only,omitempty"`
+}
+
+func (x *CreateInstanceConfigRequest) Reset() {
+ *x = CreateInstanceConfigRequest{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_google_spanner_admin_instance_v1_spanner_instance_admin_proto_msgTypes[7]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *CreateInstanceConfigRequest) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*CreateInstanceConfigRequest) ProtoMessage() {}
+
+func (x *CreateInstanceConfigRequest) ProtoReflect() protoreflect.Message {
+ mi := &file_google_spanner_admin_instance_v1_spanner_instance_admin_proto_msgTypes[7]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use CreateInstanceConfigRequest.ProtoReflect.Descriptor instead.
+func (*CreateInstanceConfigRequest) Descriptor() ([]byte, []int) {
+ return file_google_spanner_admin_instance_v1_spanner_instance_admin_proto_rawDescGZIP(), []int{7}
+}
+
+func (x *CreateInstanceConfigRequest) GetParent() string {
+ if x != nil {
+ return x.Parent
+ }
+ return ""
+}
+
+func (x *CreateInstanceConfigRequest) GetInstanceConfigId() string {
+ if x != nil {
+ return x.InstanceConfigId
+ }
+ return ""
+}
+
+func (x *CreateInstanceConfigRequest) GetInstanceConfig() *InstanceConfig {
+ if x != nil {
+ return x.InstanceConfig
+ }
+ return nil
+}
+
+func (x *CreateInstanceConfigRequest) GetValidateOnly() bool {
+ if x != nil {
+ return x.ValidateOnly
+ }
+ return false
+}
+
+// The request for
+// [UpdateInstanceConfigRequest][InstanceAdmin.UpdateInstanceConfigRequest].
+type UpdateInstanceConfigRequest struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // Required. The user instance configuration to update, which must always
+ // include the instance configuration name. Otherwise, only fields mentioned
+ // in
+ // [update_mask][google.spanner.admin.instance.v1.UpdateInstanceConfigRequest.update_mask]
+ // need be included. To prevent conflicts of concurrent updates,
+ // [etag][google.spanner.admin.instance.v1.InstanceConfig.reconciling] can
+ // be used.
+ InstanceConfig *InstanceConfig `protobuf:"bytes,1,opt,name=instance_config,json=instanceConfig,proto3" json:"instance_config,omitempty"`
+ // Required. A mask specifying which fields in
+ // [InstanceConfig][google.spanner.admin.instance.v1.InstanceConfig] should be
+ // updated. The field mask must always be specified; this prevents any future
+ // fields in [InstanceConfig][google.spanner.admin.instance.v1.InstanceConfig]
+ // from being erased accidentally by clients that do not know about them. Only
+ // display_name and labels can be updated.
+ UpdateMask *fieldmaskpb.FieldMask `protobuf:"bytes,2,opt,name=update_mask,json=updateMask,proto3" json:"update_mask,omitempty"`
+ // An option to validate, but not actually execute, a request,
+ // and provide the same response.
+ ValidateOnly bool `protobuf:"varint,3,opt,name=validate_only,json=validateOnly,proto3" json:"validate_only,omitempty"`
+}
+
+func (x *UpdateInstanceConfigRequest) Reset() {
+ *x = UpdateInstanceConfigRequest{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_google_spanner_admin_instance_v1_spanner_instance_admin_proto_msgTypes[8]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *UpdateInstanceConfigRequest) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*UpdateInstanceConfigRequest) ProtoMessage() {}
+
+func (x *UpdateInstanceConfigRequest) ProtoReflect() protoreflect.Message {
+ mi := &file_google_spanner_admin_instance_v1_spanner_instance_admin_proto_msgTypes[8]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use UpdateInstanceConfigRequest.ProtoReflect.Descriptor instead.
+func (*UpdateInstanceConfigRequest) Descriptor() ([]byte, []int) {
+ return file_google_spanner_admin_instance_v1_spanner_instance_admin_proto_rawDescGZIP(), []int{8}
+}
+
+func (x *UpdateInstanceConfigRequest) GetInstanceConfig() *InstanceConfig {
+ if x != nil {
+ return x.InstanceConfig
+ }
+ return nil
+}
+
+func (x *UpdateInstanceConfigRequest) GetUpdateMask() *fieldmaskpb.FieldMask {
+ if x != nil {
+ return x.UpdateMask
+ }
+ return nil
+}
+
+func (x *UpdateInstanceConfigRequest) GetValidateOnly() bool {
+ if x != nil {
+ return x.ValidateOnly
+ }
+ return false
+}
+
+// The request for
+// [DeleteInstanceConfigRequest][InstanceAdmin.DeleteInstanceConfigRequest].
+type DeleteInstanceConfigRequest struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // Required. The name of the instance configuration to be deleted.
+ // Values are of the form
+ // `projects/<project>/instanceConfigs/<instance_config>`
+ Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"`
+ // Used for optimistic concurrency control as a way to help prevent
+ // simultaneous deletes of an instance configuration from overwriting each
+ // other. If not empty, the API
+ // only deletes the instance configuration when the etag provided matches the
+ // current status of the requested instance configuration. Otherwise, deletes
+ // the instance configuration without checking the current status of the
+ // requested instance configuration.
+ Etag string `protobuf:"bytes,2,opt,name=etag,proto3" json:"etag,omitempty"`
+ // An option to validate, but not actually execute, a request,
+ // and provide the same response.
+ ValidateOnly bool `protobuf:"varint,3,opt,name=validate_only,json=validateOnly,proto3" json:"validate_only,omitempty"`
+}
+
+func (x *DeleteInstanceConfigRequest) Reset() {
+ *x = DeleteInstanceConfigRequest{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_google_spanner_admin_instance_v1_spanner_instance_admin_proto_msgTypes[9]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *DeleteInstanceConfigRequest) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*DeleteInstanceConfigRequest) ProtoMessage() {}
+
+func (x *DeleteInstanceConfigRequest) ProtoReflect() protoreflect.Message {
+ mi := &file_google_spanner_admin_instance_v1_spanner_instance_admin_proto_msgTypes[9]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use DeleteInstanceConfigRequest.ProtoReflect.Descriptor instead.
+func (*DeleteInstanceConfigRequest) Descriptor() ([]byte, []int) {
+ return file_google_spanner_admin_instance_v1_spanner_instance_admin_proto_rawDescGZIP(), []int{9}
+}
+
+func (x *DeleteInstanceConfigRequest) GetName() string {
+ if x != nil {
+ return x.Name
+ }
+ return ""
+}
+
+func (x *DeleteInstanceConfigRequest) GetEtag() string {
+ if x != nil {
+ return x.Etag
+ }
+ return ""
+}
+
+func (x *DeleteInstanceConfigRequest) GetValidateOnly() bool {
+ if x != nil {
+ return x.ValidateOnly
+ }
+ return false
+}
+
+// The request for
+// [ListInstanceConfigOperations][google.spanner.admin.instance.v1.InstanceAdmin.ListInstanceConfigOperations].
+type ListInstanceConfigOperationsRequest struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // Required. The project of the instance configuration operations.
+ // Values are of the form `projects/<project>`.
+ Parent string `protobuf:"bytes,1,opt,name=parent,proto3" json:"parent,omitempty"`
+ // An expression that filters the list of returned operations.
+ //
+ // A filter expression consists of a field name, a
+ // comparison operator, and a value for filtering.
+ // The value must be a string, a number, or a boolean. The comparison operator
+ // must be one of: `<`, `>`, `<=`, `>=`, `!=`, `=`, or `:`.
+ // Colon `:` is the contains operator. Filter rules are not case sensitive.
+ //
+ // The following fields in the [Operation][google.longrunning.Operation]
+ // are eligible for filtering:
+ //
+ // - `name` - The name of the long-running operation
+ // - `done` - False if the operation is in progress, else true.
+ // - `metadata.@type` - the type of metadata. For example, the type string
+ // for
+ // [CreateInstanceConfigMetadata][google.spanner.admin.instance.v1.CreateInstanceConfigMetadata]
+ // is
+ // `type.googleapis.com/google.spanner.admin.instance.v1.CreateInstanceConfigMetadata`.
+ // - `metadata.<field_name>` - any field in metadata.value.
+ // `metadata.@type` must be specified first, if filtering on metadata
+ // fields.
+ // - `error` - Error associated with the long-running operation.
+ // - `response.@type` - the type of response.
+ // - `response.<field_name>` - any field in response.value.
+ //
+ // You can combine multiple expressions by enclosing each expression in
+ // parentheses. By default, expressions are combined with AND logic. However,
+ // you can specify AND, OR, and NOT logic explicitly.
+ //
+ // Here are a few examples:
+ //
+ // - `done:true` - The operation is complete.
+ // - `(metadata.@type=` \
+ // `type.googleapis.com/google.spanner.admin.instance.v1.CreateInstanceConfigMetadata)
+ // AND` \
+ // `(metadata.instance_config.name:custom-config) AND` \
+ // `(metadata.progress.start_time < \"2021-03-28T14:50:00Z\") AND` \
+ // `(error:*)` - Return operations where:
+ // - The operation's metadata type is
+ // [CreateInstanceConfigMetadata][google.spanner.admin.instance.v1.CreateInstanceConfigMetadata].
+ // - The instance configuration name contains "custom-config".
+ // - The operation started before 2021-03-28T14:50:00Z.
+ // - The operation resulted in an error.
+ Filter string `protobuf:"bytes,2,opt,name=filter,proto3" json:"filter,omitempty"`
+ // Number of operations to be returned in the response. If 0 or
+ // less, defaults to the server's maximum allowed page size.
+ PageSize int32 `protobuf:"varint,3,opt,name=page_size,json=pageSize,proto3" json:"page_size,omitempty"`
+ // If non-empty, `page_token` should contain a
+ // [next_page_token][google.spanner.admin.instance.v1.ListInstanceConfigOperationsResponse.next_page_token]
+ // from a previous
+ // [ListInstanceConfigOperationsResponse][google.spanner.admin.instance.v1.ListInstanceConfigOperationsResponse]
+ // to the same `parent` and with the same `filter`.
+ PageToken string `protobuf:"bytes,4,opt,name=page_token,json=pageToken,proto3" json:"page_token,omitempty"`
+}
+
+func (x *ListInstanceConfigOperationsRequest) Reset() {
+ *x = ListInstanceConfigOperationsRequest{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_google_spanner_admin_instance_v1_spanner_instance_admin_proto_msgTypes[10]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *ListInstanceConfigOperationsRequest) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*ListInstanceConfigOperationsRequest) ProtoMessage() {}
+
+func (x *ListInstanceConfigOperationsRequest) ProtoReflect() protoreflect.Message {
+ mi := &file_google_spanner_admin_instance_v1_spanner_instance_admin_proto_msgTypes[10]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use ListInstanceConfigOperationsRequest.ProtoReflect.Descriptor instead.
+func (*ListInstanceConfigOperationsRequest) Descriptor() ([]byte, []int) {
+ return file_google_spanner_admin_instance_v1_spanner_instance_admin_proto_rawDescGZIP(), []int{10}
+}
+
+func (x *ListInstanceConfigOperationsRequest) GetParent() string {
+ if x != nil {
+ return x.Parent
+ }
+ return ""
+}
+
+func (x *ListInstanceConfigOperationsRequest) GetFilter() string {
+ if x != nil {
+ return x.Filter
+ }
+ return ""
+}
+
+func (x *ListInstanceConfigOperationsRequest) GetPageSize() int32 {
+ if x != nil {
+ return x.PageSize
+ }
+ return 0
+}
+
+func (x *ListInstanceConfigOperationsRequest) GetPageToken() string {
+ if x != nil {
+ return x.PageToken
+ }
+ return ""
+}
+
+// The response for
+// [ListInstanceConfigOperations][google.spanner.admin.instance.v1.InstanceAdmin.ListInstanceConfigOperations].
+type ListInstanceConfigOperationsResponse struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // The list of matching instance configuration [long-running
+ // operations][google.longrunning.Operation]. Each operation's name will be
+ // prefixed by the name of the instance configuration. The operation's
+ // [metadata][google.longrunning.Operation.metadata] field type
+ // `metadata.type_url` describes the type of the metadata.
+ Operations []*longrunningpb.Operation `protobuf:"bytes,1,rep,name=operations,proto3" json:"operations,omitempty"`
+ // `next_page_token` can be sent in a subsequent
+ // [ListInstanceConfigOperations][google.spanner.admin.instance.v1.InstanceAdmin.ListInstanceConfigOperations]
+ // call to fetch more of the matching metadata.
+ NextPageToken string `protobuf:"bytes,2,opt,name=next_page_token,json=nextPageToken,proto3" json:"next_page_token,omitempty"`
+}
+
+func (x *ListInstanceConfigOperationsResponse) Reset() {
+ *x = ListInstanceConfigOperationsResponse{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_google_spanner_admin_instance_v1_spanner_instance_admin_proto_msgTypes[11]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *ListInstanceConfigOperationsResponse) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*ListInstanceConfigOperationsResponse) ProtoMessage() {}
+
+func (x *ListInstanceConfigOperationsResponse) ProtoReflect() protoreflect.Message {
+ mi := &file_google_spanner_admin_instance_v1_spanner_instance_admin_proto_msgTypes[11]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use ListInstanceConfigOperationsResponse.ProtoReflect.Descriptor instead.
+func (*ListInstanceConfigOperationsResponse) Descriptor() ([]byte, []int) {
+ return file_google_spanner_admin_instance_v1_spanner_instance_admin_proto_rawDescGZIP(), []int{11}
+}
+
+func (x *ListInstanceConfigOperationsResponse) GetOperations() []*longrunningpb.Operation {
+ if x != nil {
+ return x.Operations
+ }
+ return nil
+}
+
+func (x *ListInstanceConfigOperationsResponse) GetNextPageToken() string {
+ if x != nil {
+ return x.NextPageToken
+ }
+ return ""
+}
+
+// The request for
+// [GetInstance][google.spanner.admin.instance.v1.InstanceAdmin.GetInstance].
+type GetInstanceRequest struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // Required. The name of the requested instance. Values are of the form
+ // `projects/<project>/instances/<instance>`.
+ Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"`
+ // If field_mask is present, specifies the subset of
+ // [Instance][google.spanner.admin.instance.v1.Instance] fields that should be
+ // returned. If absent, all
+ // [Instance][google.spanner.admin.instance.v1.Instance] fields are returned.
+ FieldMask *fieldmaskpb.FieldMask `protobuf:"bytes,2,opt,name=field_mask,json=fieldMask,proto3" json:"field_mask,omitempty"`
+}
+
+func (x *GetInstanceRequest) Reset() {
+ *x = GetInstanceRequest{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_google_spanner_admin_instance_v1_spanner_instance_admin_proto_msgTypes[12]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *GetInstanceRequest) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*GetInstanceRequest) ProtoMessage() {}
+
+func (x *GetInstanceRequest) ProtoReflect() protoreflect.Message {
+ mi := &file_google_spanner_admin_instance_v1_spanner_instance_admin_proto_msgTypes[12]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use GetInstanceRequest.ProtoReflect.Descriptor instead.
+func (*GetInstanceRequest) Descriptor() ([]byte, []int) {
+ return file_google_spanner_admin_instance_v1_spanner_instance_admin_proto_rawDescGZIP(), []int{12}
+}
+
+func (x *GetInstanceRequest) GetName() string {
+ if x != nil {
+ return x.Name
+ }
+ return ""
+}
+
+func (x *GetInstanceRequest) GetFieldMask() *fieldmaskpb.FieldMask {
+ if x != nil {
+ return x.FieldMask
+ }
+ return nil
+}
+
+// The request for
+// [CreateInstance][google.spanner.admin.instance.v1.InstanceAdmin.CreateInstance].
+type CreateInstanceRequest struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // Required. The name of the project in which to create the instance. Values
+ // are of the form `projects/<project>`.
+ Parent string `protobuf:"bytes,1,opt,name=parent,proto3" json:"parent,omitempty"`
+ // Required. The ID of the instance to create. Valid identifiers are of the
+ // form `[a-z][-a-z0-9]*[a-z0-9]` and must be between 2 and 64 characters in
+ // length.
+ InstanceId string `protobuf:"bytes,2,opt,name=instance_id,json=instanceId,proto3" json:"instance_id,omitempty"`
+ // Required. The instance to create. The name may be omitted, but if
+ // specified must be `<parent>/instances/<instance_id>`.
+ Instance *Instance `protobuf:"bytes,3,opt,name=instance,proto3" json:"instance,omitempty"`
+}
+
+func (x *CreateInstanceRequest) Reset() {
+ *x = CreateInstanceRequest{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_google_spanner_admin_instance_v1_spanner_instance_admin_proto_msgTypes[13]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *CreateInstanceRequest) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*CreateInstanceRequest) ProtoMessage() {}
+
+func (x *CreateInstanceRequest) ProtoReflect() protoreflect.Message {
+ mi := &file_google_spanner_admin_instance_v1_spanner_instance_admin_proto_msgTypes[13]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use CreateInstanceRequest.ProtoReflect.Descriptor instead.
+func (*CreateInstanceRequest) Descriptor() ([]byte, []int) {
+ return file_google_spanner_admin_instance_v1_spanner_instance_admin_proto_rawDescGZIP(), []int{13}
+}
+
+func (x *CreateInstanceRequest) GetParent() string {
+ if x != nil {
+ return x.Parent
+ }
+ return ""
+}
+
+func (x *CreateInstanceRequest) GetInstanceId() string {
+ if x != nil {
+ return x.InstanceId
+ }
+ return ""
+}
+
+func (x *CreateInstanceRequest) GetInstance() *Instance {
+ if x != nil {
+ return x.Instance
+ }
+ return nil
+}
+
+// The request for
+// [ListInstances][google.spanner.admin.instance.v1.InstanceAdmin.ListInstances].
+type ListInstancesRequest struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // Required. The name of the project for which a list of instances is
+ // requested. Values are of the form `projects/<project>`.
+ Parent string `protobuf:"bytes,1,opt,name=parent,proto3" json:"parent,omitempty"`
+ // Number of instances to be returned in the response. If 0 or less, defaults
+ // to the server's maximum allowed page size.
+ PageSize int32 `protobuf:"varint,2,opt,name=page_size,json=pageSize,proto3" json:"page_size,omitempty"`
+ // If non-empty, `page_token` should contain a
+ // [next_page_token][google.spanner.admin.instance.v1.ListInstancesResponse.next_page_token]
+ // from a previous
+ // [ListInstancesResponse][google.spanner.admin.instance.v1.ListInstancesResponse].
+ PageToken string `protobuf:"bytes,3,opt,name=page_token,json=pageToken,proto3" json:"page_token,omitempty"`
+ // An expression for filtering the results of the request. Filter rules are
+ // case insensitive. The fields eligible for filtering are:
+ //
+ // - `name`
+ // - `display_name`
+ // - `labels.key` where key is the name of a label
+ //
+ // Some examples of using filters are:
+ //
+ // - `name:*` --> The instance has a name.
+ // - `name:Howl` --> The instance's name contains the string "howl".
+ // - `name:HOWL` --> Equivalent to above.
+ // - `NAME:howl` --> Equivalent to above.
+ // - `labels.env:*` --> The instance has the label "env".
+ // - `labels.env:dev` --> The instance has the label "env" and the value of
+ // the label contains the string "dev".
+ // - `name:howl labels.env:dev` --> The instance's name contains "howl" and
+ // it has the label "env" with its value
+ // containing "dev".
+ Filter string `protobuf:"bytes,4,opt,name=filter,proto3" json:"filter,omitempty"`
+ // Deadline used while retrieving metadata for instances.
+ // Instances whose metadata cannot be retrieved within this deadline will be
+ // added to
+ // [unreachable][google.spanner.admin.instance.v1.ListInstancesResponse.unreachable]
+ // in
+ // [ListInstancesResponse][google.spanner.admin.instance.v1.ListInstancesResponse].
+ InstanceDeadline *timestamppb.Timestamp `protobuf:"bytes,5,opt,name=instance_deadline,json=instanceDeadline,proto3" json:"instance_deadline,omitempty"`
+}
+
+func (x *ListInstancesRequest) Reset() {
+ *x = ListInstancesRequest{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_google_spanner_admin_instance_v1_spanner_instance_admin_proto_msgTypes[14]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *ListInstancesRequest) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*ListInstancesRequest) ProtoMessage() {}
+
+func (x *ListInstancesRequest) ProtoReflect() protoreflect.Message {
+ mi := &file_google_spanner_admin_instance_v1_spanner_instance_admin_proto_msgTypes[14]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use ListInstancesRequest.ProtoReflect.Descriptor instead.
+func (*ListInstancesRequest) Descriptor() ([]byte, []int) {
+ return file_google_spanner_admin_instance_v1_spanner_instance_admin_proto_rawDescGZIP(), []int{14}
+}
+
+func (x *ListInstancesRequest) GetParent() string {
+ if x != nil {
+ return x.Parent
+ }
+ return ""
+}
+
+func (x *ListInstancesRequest) GetPageSize() int32 {
+ if x != nil {
+ return x.PageSize
+ }
+ return 0
+}
+
+func (x *ListInstancesRequest) GetPageToken() string {
+ if x != nil {
+ return x.PageToken
+ }
+ return ""
+}
+
+func (x *ListInstancesRequest) GetFilter() string {
+ if x != nil {
+ return x.Filter
+ }
+ return ""
+}
+
+func (x *ListInstancesRequest) GetInstanceDeadline() *timestamppb.Timestamp {
+ if x != nil {
+ return x.InstanceDeadline
+ }
+ return nil
+}
+
+// The response for
+// [ListInstances][google.spanner.admin.instance.v1.InstanceAdmin.ListInstances].
+type ListInstancesResponse struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // The list of requested instances.
+ Instances []*Instance `protobuf:"bytes,1,rep,name=instances,proto3" json:"instances,omitempty"`
+ // `next_page_token` can be sent in a subsequent
+ // [ListInstances][google.spanner.admin.instance.v1.InstanceAdmin.ListInstances]
+ // call to fetch more of the matching instances.
+ NextPageToken string `protobuf:"bytes,2,opt,name=next_page_token,json=nextPageToken,proto3" json:"next_page_token,omitempty"`
+ // The list of unreachable instances.
+ // It includes the names of instances whose metadata could not be retrieved
+ // within
+ // [instance_deadline][google.spanner.admin.instance.v1.ListInstancesRequest.instance_deadline].
+ Unreachable []string `protobuf:"bytes,3,rep,name=unreachable,proto3" json:"unreachable,omitempty"`
+}
+
+func (x *ListInstancesResponse) Reset() {
+ *x = ListInstancesResponse{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_google_spanner_admin_instance_v1_spanner_instance_admin_proto_msgTypes[15]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *ListInstancesResponse) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*ListInstancesResponse) ProtoMessage() {}
+
+func (x *ListInstancesResponse) ProtoReflect() protoreflect.Message {
+ mi := &file_google_spanner_admin_instance_v1_spanner_instance_admin_proto_msgTypes[15]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use ListInstancesResponse.ProtoReflect.Descriptor instead.
+func (*ListInstancesResponse) Descriptor() ([]byte, []int) {
+ return file_google_spanner_admin_instance_v1_spanner_instance_admin_proto_rawDescGZIP(), []int{15}
+}
+
+func (x *ListInstancesResponse) GetInstances() []*Instance {
+ if x != nil {
+ return x.Instances
+ }
+ return nil
+}
+
+func (x *ListInstancesResponse) GetNextPageToken() string {
+ if x != nil {
+ return x.NextPageToken
+ }
+ return ""
+}
+
+func (x *ListInstancesResponse) GetUnreachable() []string {
+ if x != nil {
+ return x.Unreachable
+ }
+ return nil
+}
+
+// The request for
+// [UpdateInstance][google.spanner.admin.instance.v1.InstanceAdmin.UpdateInstance].
+type UpdateInstanceRequest struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // Required. The instance to update, which must always include the instance
+ // name. Otherwise, only fields mentioned in
+ // [field_mask][google.spanner.admin.instance.v1.UpdateInstanceRequest.field_mask]
+ // need be included.
+ Instance *Instance `protobuf:"bytes,1,opt,name=instance,proto3" json:"instance,omitempty"`
+ // Required. A mask specifying which fields in
+ // [Instance][google.spanner.admin.instance.v1.Instance] should be updated.
+ // The field mask must always be specified; this prevents any future fields in
+ // [Instance][google.spanner.admin.instance.v1.Instance] from being erased
+ // accidentally by clients that do not know about them.
+ FieldMask *fieldmaskpb.FieldMask `protobuf:"bytes,2,opt,name=field_mask,json=fieldMask,proto3" json:"field_mask,omitempty"`
+}
+
+func (x *UpdateInstanceRequest) Reset() {
+ *x = UpdateInstanceRequest{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_google_spanner_admin_instance_v1_spanner_instance_admin_proto_msgTypes[16]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *UpdateInstanceRequest) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*UpdateInstanceRequest) ProtoMessage() {}
+
+func (x *UpdateInstanceRequest) ProtoReflect() protoreflect.Message {
+ mi := &file_google_spanner_admin_instance_v1_spanner_instance_admin_proto_msgTypes[16]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use UpdateInstanceRequest.ProtoReflect.Descriptor instead.
+func (*UpdateInstanceRequest) Descriptor() ([]byte, []int) {
+ return file_google_spanner_admin_instance_v1_spanner_instance_admin_proto_rawDescGZIP(), []int{16}
+}
+
+func (x *UpdateInstanceRequest) GetInstance() *Instance {
+ if x != nil {
+ return x.Instance
+ }
+ return nil
+}
+
+func (x *UpdateInstanceRequest) GetFieldMask() *fieldmaskpb.FieldMask {
+ if x != nil {
+ return x.FieldMask
+ }
+ return nil
+}
+
+// The request for
+// [DeleteInstance][google.spanner.admin.instance.v1.InstanceAdmin.DeleteInstance].
+type DeleteInstanceRequest struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // Required. The name of the instance to be deleted. Values are of the form
+ // `projects/<project>/instances/<instance>`
+ Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"`
+}
+
+func (x *DeleteInstanceRequest) Reset() {
+ *x = DeleteInstanceRequest{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_google_spanner_admin_instance_v1_spanner_instance_admin_proto_msgTypes[17]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *DeleteInstanceRequest) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*DeleteInstanceRequest) ProtoMessage() {}
+
+func (x *DeleteInstanceRequest) ProtoReflect() protoreflect.Message {
+ mi := &file_google_spanner_admin_instance_v1_spanner_instance_admin_proto_msgTypes[17]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use DeleteInstanceRequest.ProtoReflect.Descriptor instead.
+func (*DeleteInstanceRequest) Descriptor() ([]byte, []int) {
+ return file_google_spanner_admin_instance_v1_spanner_instance_admin_proto_rawDescGZIP(), []int{17}
+}
+
+func (x *DeleteInstanceRequest) GetName() string {
+ if x != nil {
+ return x.Name
+ }
+ return ""
+}
+
+// Metadata type for the operation returned by
+// [CreateInstance][google.spanner.admin.instance.v1.InstanceAdmin.CreateInstance].
+type CreateInstanceMetadata struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // The instance being created.
+ Instance *Instance `protobuf:"bytes,1,opt,name=instance,proto3" json:"instance,omitempty"`
+ // The time at which the
+ // [CreateInstance][google.spanner.admin.instance.v1.InstanceAdmin.CreateInstance]
+ // request was received.
+ StartTime *timestamppb.Timestamp `protobuf:"bytes,2,opt,name=start_time,json=startTime,proto3" json:"start_time,omitempty"`
+ // The time at which this operation was cancelled. If set, this operation is
+ // in the process of undoing itself (which is guaranteed to succeed) and
+ // cannot be cancelled again.
+ CancelTime *timestamppb.Timestamp `protobuf:"bytes,3,opt,name=cancel_time,json=cancelTime,proto3" json:"cancel_time,omitempty"`
+ // The time at which this operation failed or was completed successfully.
+ EndTime *timestamppb.Timestamp `protobuf:"bytes,4,opt,name=end_time,json=endTime,proto3" json:"end_time,omitempty"`
+ // The expected fulfillment period of this create operation.
+ ExpectedFulfillmentPeriod FulfillmentPeriod `protobuf:"varint,5,opt,name=expected_fulfillment_period,json=expectedFulfillmentPeriod,proto3,enum=google.spanner.admin.instance.v1.FulfillmentPeriod" json:"expected_fulfillment_period,omitempty"`
+}
+
+func (x *CreateInstanceMetadata) Reset() {
+ *x = CreateInstanceMetadata{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_google_spanner_admin_instance_v1_spanner_instance_admin_proto_msgTypes[18]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *CreateInstanceMetadata) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*CreateInstanceMetadata) ProtoMessage() {}
+
+func (x *CreateInstanceMetadata) ProtoReflect() protoreflect.Message {
+ mi := &file_google_spanner_admin_instance_v1_spanner_instance_admin_proto_msgTypes[18]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use CreateInstanceMetadata.ProtoReflect.Descriptor instead.
+func (*CreateInstanceMetadata) Descriptor() ([]byte, []int) {
+ return file_google_spanner_admin_instance_v1_spanner_instance_admin_proto_rawDescGZIP(), []int{18}
+}
+
+func (x *CreateInstanceMetadata) GetInstance() *Instance {
+ if x != nil {
+ return x.Instance
+ }
+ return nil
+}
+
+func (x *CreateInstanceMetadata) GetStartTime() *timestamppb.Timestamp {
+ if x != nil {
+ return x.StartTime
+ }
+ return nil
+}
+
+func (x *CreateInstanceMetadata) GetCancelTime() *timestamppb.Timestamp {
+ if x != nil {
+ return x.CancelTime
+ }
+ return nil
+}
+
+func (x *CreateInstanceMetadata) GetEndTime() *timestamppb.Timestamp {
+ if x != nil {
+ return x.EndTime
+ }
+ return nil
+}
+
+func (x *CreateInstanceMetadata) GetExpectedFulfillmentPeriod() FulfillmentPeriod {
+ if x != nil {
+ return x.ExpectedFulfillmentPeriod
+ }
+ return FulfillmentPeriod_FULFILLMENT_PERIOD_UNSPECIFIED
+}
+
+// Metadata type for the operation returned by
+// [UpdateInstance][google.spanner.admin.instance.v1.InstanceAdmin.UpdateInstance].
+type UpdateInstanceMetadata struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // The desired end state of the update.
+ Instance *Instance `protobuf:"bytes,1,opt,name=instance,proto3" json:"instance,omitempty"`
+ // The time at which
+ // [UpdateInstance][google.spanner.admin.instance.v1.InstanceAdmin.UpdateInstance]
+ // request was received.
+ StartTime *timestamppb.Timestamp `protobuf:"bytes,2,opt,name=start_time,json=startTime,proto3" json:"start_time,omitempty"`
+ // The time at which this operation was cancelled. If set, this operation is
+ // in the process of undoing itself (which is guaranteed to succeed) and
+ // cannot be cancelled again.
+ CancelTime *timestamppb.Timestamp `protobuf:"bytes,3,opt,name=cancel_time,json=cancelTime,proto3" json:"cancel_time,omitempty"`
+ // The time at which this operation failed or was completed successfully.
+ EndTime *timestamppb.Timestamp `protobuf:"bytes,4,opt,name=end_time,json=endTime,proto3" json:"end_time,omitempty"`
+ // The expected fulfillment period of this update operation.
+ ExpectedFulfillmentPeriod FulfillmentPeriod `protobuf:"varint,5,opt,name=expected_fulfillment_period,json=expectedFulfillmentPeriod,proto3,enum=google.spanner.admin.instance.v1.FulfillmentPeriod" json:"expected_fulfillment_period,omitempty"`
+}
+
+func (x *UpdateInstanceMetadata) Reset() {
+ *x = UpdateInstanceMetadata{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_google_spanner_admin_instance_v1_spanner_instance_admin_proto_msgTypes[19]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *UpdateInstanceMetadata) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*UpdateInstanceMetadata) ProtoMessage() {}
+
+func (x *UpdateInstanceMetadata) ProtoReflect() protoreflect.Message {
+ mi := &file_google_spanner_admin_instance_v1_spanner_instance_admin_proto_msgTypes[19]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use UpdateInstanceMetadata.ProtoReflect.Descriptor instead.
+func (*UpdateInstanceMetadata) Descriptor() ([]byte, []int) {
+ return file_google_spanner_admin_instance_v1_spanner_instance_admin_proto_rawDescGZIP(), []int{19}
+}
+
+func (x *UpdateInstanceMetadata) GetInstance() *Instance {
+ if x != nil {
+ return x.Instance
+ }
+ return nil
+}
+
+func (x *UpdateInstanceMetadata) GetStartTime() *timestamppb.Timestamp {
+ if x != nil {
+ return x.StartTime
+ }
+ return nil
+}
+
+func (x *UpdateInstanceMetadata) GetCancelTime() *timestamppb.Timestamp {
+ if x != nil {
+ return x.CancelTime
+ }
+ return nil
+}
+
+func (x *UpdateInstanceMetadata) GetEndTime() *timestamppb.Timestamp {
+ if x != nil {
+ return x.EndTime
+ }
+ return nil
+}
+
+func (x *UpdateInstanceMetadata) GetExpectedFulfillmentPeriod() FulfillmentPeriod {
+ if x != nil {
+ return x.ExpectedFulfillmentPeriod
+ }
+ return FulfillmentPeriod_FULFILLMENT_PERIOD_UNSPECIFIED
+}
+
+// Metadata type for the operation returned by
+// [CreateInstanceConfig][google.spanner.admin.instance.v1.InstanceAdmin.CreateInstanceConfig].
+type CreateInstanceConfigMetadata struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // The target instance configuration end state.
+ InstanceConfig *InstanceConfig `protobuf:"bytes,1,opt,name=instance_config,json=instanceConfig,proto3" json:"instance_config,omitempty"`
+ // The progress of the
+ // [CreateInstanceConfig][google.spanner.admin.instance.v1.InstanceAdmin.CreateInstanceConfig]
+ // operation.
+ Progress *OperationProgress `protobuf:"bytes,2,opt,name=progress,proto3" json:"progress,omitempty"`
+ // The time at which this operation was cancelled.
+ CancelTime *timestamppb.Timestamp `protobuf:"bytes,3,opt,name=cancel_time,json=cancelTime,proto3" json:"cancel_time,omitempty"`
+}
+
+func (x *CreateInstanceConfigMetadata) Reset() {
+ *x = CreateInstanceConfigMetadata{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_google_spanner_admin_instance_v1_spanner_instance_admin_proto_msgTypes[20]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *CreateInstanceConfigMetadata) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*CreateInstanceConfigMetadata) ProtoMessage() {}
+
+func (x *CreateInstanceConfigMetadata) ProtoReflect() protoreflect.Message {
+ mi := &file_google_spanner_admin_instance_v1_spanner_instance_admin_proto_msgTypes[20]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use CreateInstanceConfigMetadata.ProtoReflect.Descriptor instead.
+func (*CreateInstanceConfigMetadata) Descriptor() ([]byte, []int) {
+ return file_google_spanner_admin_instance_v1_spanner_instance_admin_proto_rawDescGZIP(), []int{20}
+}
+
+func (x *CreateInstanceConfigMetadata) GetInstanceConfig() *InstanceConfig {
+ if x != nil {
+ return x.InstanceConfig
+ }
+ return nil
+}
+
+func (x *CreateInstanceConfigMetadata) GetProgress() *OperationProgress {
+ if x != nil {
+ return x.Progress
+ }
+ return nil
+}
+
+func (x *CreateInstanceConfigMetadata) GetCancelTime() *timestamppb.Timestamp {
+ if x != nil {
+ return x.CancelTime
+ }
+ return nil
+}
+
+// Metadata type for the operation returned by
+// [UpdateInstanceConfig][google.spanner.admin.instance.v1.InstanceAdmin.UpdateInstanceConfig].
+type UpdateInstanceConfigMetadata struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // The desired instance configuration after updating.
+ InstanceConfig *InstanceConfig `protobuf:"bytes,1,opt,name=instance_config,json=instanceConfig,proto3" json:"instance_config,omitempty"`
+ // The progress of the
+ // [UpdateInstanceConfig][google.spanner.admin.instance.v1.InstanceAdmin.UpdateInstanceConfig]
+ // operation.
+ Progress *OperationProgress `protobuf:"bytes,2,opt,name=progress,proto3" json:"progress,omitempty"`
+ // The time at which this operation was cancelled.
+ CancelTime *timestamppb.Timestamp `protobuf:"bytes,3,opt,name=cancel_time,json=cancelTime,proto3" json:"cancel_time,omitempty"`
+}
+
+func (x *UpdateInstanceConfigMetadata) Reset() {
+ *x = UpdateInstanceConfigMetadata{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_google_spanner_admin_instance_v1_spanner_instance_admin_proto_msgTypes[21]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *UpdateInstanceConfigMetadata) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*UpdateInstanceConfigMetadata) ProtoMessage() {}
+
+func (x *UpdateInstanceConfigMetadata) ProtoReflect() protoreflect.Message {
+ mi := &file_google_spanner_admin_instance_v1_spanner_instance_admin_proto_msgTypes[21]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use UpdateInstanceConfigMetadata.ProtoReflect.Descriptor instead.
+func (*UpdateInstanceConfigMetadata) Descriptor() ([]byte, []int) {
+ return file_google_spanner_admin_instance_v1_spanner_instance_admin_proto_rawDescGZIP(), []int{21}
+}
+
+func (x *UpdateInstanceConfigMetadata) GetInstanceConfig() *InstanceConfig {
+ if x != nil {
+ return x.InstanceConfig
+ }
+ return nil
+}
+
+func (x *UpdateInstanceConfigMetadata) GetProgress() *OperationProgress {
+ if x != nil {
+ return x.Progress
+ }
+ return nil
+}
+
+func (x *UpdateInstanceConfigMetadata) GetCancelTime() *timestamppb.Timestamp {
+ if x != nil {
+ return x.CancelTime
+ }
+ return nil
+}
+
+// An isolated set of Cloud Spanner resources that databases can define
+// placements on.
+type InstancePartition struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // Required. A unique identifier for the instance partition. Values are of the
+ // form
+ // `projects/<project>/instances/<instance>/instancePartitions/[a-z][-a-z0-9]*[a-z0-9]`.
+ // The final segment of the name must be between 2 and 64 characters in
+ // length. An instance partition's name cannot be changed after the instance
+ // partition is created.
+ Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"`
+ // Required. The name of the instance partition's configuration. Values are of
+ // the form `projects/<project>/instanceConfigs/<configuration>`. See also
+ // [InstanceConfig][google.spanner.admin.instance.v1.InstanceConfig] and
+ // [ListInstanceConfigs][google.spanner.admin.instance.v1.InstanceAdmin.ListInstanceConfigs].
+ Config string `protobuf:"bytes,2,opt,name=config,proto3" json:"config,omitempty"`
+ // Required. The descriptive name for this instance partition as it appears in
+ // UIs. Must be unique per project and between 4 and 30 characters in length.
+ DisplayName string `protobuf:"bytes,3,opt,name=display_name,json=displayName,proto3" json:"display_name,omitempty"`
+ // Compute capacity defines amount of server and storage resources that are
+ // available to the databases in an instance partition. At most one of either
+ // node_count or processing_units should be present in the message. See [the
+ // documentation](https://cloud.google.com/spanner/docs/compute-capacity)
+ // for more information about nodes and processing units.
+ //
+ // Types that are assignable to ComputeCapacity:
+ //
+ // *InstancePartition_NodeCount
+ // *InstancePartition_ProcessingUnits
+ ComputeCapacity isInstancePartition_ComputeCapacity `protobuf_oneof:"compute_capacity"`
+ // Output only. The current instance partition state.
+ State InstancePartition_State `protobuf:"varint,7,opt,name=state,proto3,enum=google.spanner.admin.instance.v1.InstancePartition_State" json:"state,omitempty"`
+ // Output only. The time at which the instance partition was created.
+ CreateTime *timestamppb.Timestamp `protobuf:"bytes,8,opt,name=create_time,json=createTime,proto3" json:"create_time,omitempty"`
+ // Output only. The time at which the instance partition was most recently
+ // updated.
+ UpdateTime *timestamppb.Timestamp `protobuf:"bytes,9,opt,name=update_time,json=updateTime,proto3" json:"update_time,omitempty"`
+ // Output only. The names of the databases that reference this
+ // instance partition. Referencing databases should share the parent instance.
+ // The existence of any referencing database prevents the instance partition
+ // from being deleted.
+ ReferencingDatabases []string `protobuf:"bytes,10,rep,name=referencing_databases,json=referencingDatabases,proto3" json:"referencing_databases,omitempty"`
+ // Output only. The names of the backups that reference this instance
+ // partition. Referencing backups should share the parent instance. The
+ // existence of any referencing backup prevents the instance partition from
+ // being deleted.
+ ReferencingBackups []string `protobuf:"bytes,11,rep,name=referencing_backups,json=referencingBackups,proto3" json:"referencing_backups,omitempty"`
+ // Used for optimistic concurrency control as a way
+ // to help prevent simultaneous updates of a instance partition from
+ // overwriting each other. It is strongly suggested that systems make use of
+ // the etag in the read-modify-write cycle to perform instance partition
+ // updates in order to avoid race conditions: An etag is returned in the
+ // response which contains instance partitions, and systems are expected to
+ // put that etag in the request to update instance partitions to ensure that
+ // their change will be applied to the same version of the instance partition.
+ // If no etag is provided in the call to update instance partition, then the
+ // existing instance partition is overwritten blindly.
+ Etag string `protobuf:"bytes,12,opt,name=etag,proto3" json:"etag,omitempty"`
+}
+
+func (x *InstancePartition) Reset() {
+ *x = InstancePartition{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_google_spanner_admin_instance_v1_spanner_instance_admin_proto_msgTypes[22]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *InstancePartition) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*InstancePartition) ProtoMessage() {}
+
+func (x *InstancePartition) ProtoReflect() protoreflect.Message {
+ mi := &file_google_spanner_admin_instance_v1_spanner_instance_admin_proto_msgTypes[22]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use InstancePartition.ProtoReflect.Descriptor instead.
+func (*InstancePartition) Descriptor() ([]byte, []int) {
+ return file_google_spanner_admin_instance_v1_spanner_instance_admin_proto_rawDescGZIP(), []int{22}
+}
+
+func (x *InstancePartition) GetName() string {
+ if x != nil {
+ return x.Name
+ }
+ return ""
+}
+
+func (x *InstancePartition) GetConfig() string {
+ if x != nil {
+ return x.Config
+ }
+ return ""
+}
+
+func (x *InstancePartition) GetDisplayName() string {
+ if x != nil {
+ return x.DisplayName
+ }
+ return ""
+}
+
+func (m *InstancePartition) GetComputeCapacity() isInstancePartition_ComputeCapacity {
+ if m != nil {
+ return m.ComputeCapacity
+ }
+ return nil
+}
+
+func (x *InstancePartition) GetNodeCount() int32 {
+ if x, ok := x.GetComputeCapacity().(*InstancePartition_NodeCount); ok {
+ return x.NodeCount
+ }
+ return 0
+}
+
+func (x *InstancePartition) GetProcessingUnits() int32 {
+ if x, ok := x.GetComputeCapacity().(*InstancePartition_ProcessingUnits); ok {
+ return x.ProcessingUnits
+ }
+ return 0
+}
+
+func (x *InstancePartition) GetState() InstancePartition_State {
+ if x != nil {
+ return x.State
+ }
+ return InstancePartition_STATE_UNSPECIFIED
+}
+
+func (x *InstancePartition) GetCreateTime() *timestamppb.Timestamp {
+ if x != nil {
+ return x.CreateTime
+ }
+ return nil
+}
+
+func (x *InstancePartition) GetUpdateTime() *timestamppb.Timestamp {
+ if x != nil {
+ return x.UpdateTime
+ }
+ return nil
+}
+
+func (x *InstancePartition) GetReferencingDatabases() []string {
+ if x != nil {
+ return x.ReferencingDatabases
+ }
+ return nil
+}
+
+func (x *InstancePartition) GetReferencingBackups() []string {
+ if x != nil {
+ return x.ReferencingBackups
+ }
+ return nil
+}
+
+func (x *InstancePartition) GetEtag() string {
+ if x != nil {
+ return x.Etag
+ }
+ return ""
+}
+
+type isInstancePartition_ComputeCapacity interface {
+ isInstancePartition_ComputeCapacity()
+}
+
+type InstancePartition_NodeCount struct {
+ // The number of nodes allocated to this instance partition.
+ //
+ // Users can set the node_count field to specify the target number of nodes
+ // allocated to the instance partition.
+ //
+ // This may be zero in API responses for instance partitions that are not
+ // yet in state `READY`.
+ NodeCount int32 `protobuf:"varint,5,opt,name=node_count,json=nodeCount,proto3,oneof"`
+}
+
+type InstancePartition_ProcessingUnits struct {
+ // The number of processing units allocated to this instance partition.
+ //
+ // Users can set the processing_units field to specify the target number of
+ // processing units allocated to the instance partition.
+ //
+ // This may be zero in API responses for instance partitions that are not
+ // yet in state `READY`.
+ ProcessingUnits int32 `protobuf:"varint,6,opt,name=processing_units,json=processingUnits,proto3,oneof"`
+}
+
+func (*InstancePartition_NodeCount) isInstancePartition_ComputeCapacity() {}
+
+func (*InstancePartition_ProcessingUnits) isInstancePartition_ComputeCapacity() {}
+
+// Metadata type for the operation returned by
+// [CreateInstancePartition][google.spanner.admin.instance.v1.InstanceAdmin.CreateInstancePartition].
+type CreateInstancePartitionMetadata struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // The instance partition being created.
+ InstancePartition *InstancePartition `protobuf:"bytes,1,opt,name=instance_partition,json=instancePartition,proto3" json:"instance_partition,omitempty"`
+ // The time at which the
+ // [CreateInstancePartition][google.spanner.admin.instance.v1.InstanceAdmin.CreateInstancePartition]
+ // request was received.
+ StartTime *timestamppb.Timestamp `protobuf:"bytes,2,opt,name=start_time,json=startTime,proto3" json:"start_time,omitempty"`
+ // The time at which this operation was cancelled. If set, this operation is
+ // in the process of undoing itself (which is guaranteed to succeed) and
+ // cannot be cancelled again.
+ CancelTime *timestamppb.Timestamp `protobuf:"bytes,3,opt,name=cancel_time,json=cancelTime,proto3" json:"cancel_time,omitempty"`
+ // The time at which this operation failed or was completed successfully.
+ EndTime *timestamppb.Timestamp `protobuf:"bytes,4,opt,name=end_time,json=endTime,proto3" json:"end_time,omitempty"`
+}
+
+func (x *CreateInstancePartitionMetadata) Reset() {
+ *x = CreateInstancePartitionMetadata{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_google_spanner_admin_instance_v1_spanner_instance_admin_proto_msgTypes[23]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *CreateInstancePartitionMetadata) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*CreateInstancePartitionMetadata) ProtoMessage() {}
+
+func (x *CreateInstancePartitionMetadata) ProtoReflect() protoreflect.Message {
+ mi := &file_google_spanner_admin_instance_v1_spanner_instance_admin_proto_msgTypes[23]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use CreateInstancePartitionMetadata.ProtoReflect.Descriptor instead.
+func (*CreateInstancePartitionMetadata) Descriptor() ([]byte, []int) {
+ return file_google_spanner_admin_instance_v1_spanner_instance_admin_proto_rawDescGZIP(), []int{23}
+}
+
+func (x *CreateInstancePartitionMetadata) GetInstancePartition() *InstancePartition {
+ if x != nil {
+ return x.InstancePartition
+ }
+ return nil
+}
+
+func (x *CreateInstancePartitionMetadata) GetStartTime() *timestamppb.Timestamp {
+ if x != nil {
+ return x.StartTime
+ }
+ return nil
+}
+
+func (x *CreateInstancePartitionMetadata) GetCancelTime() *timestamppb.Timestamp {
+ if x != nil {
+ return x.CancelTime
+ }
+ return nil
+}
+
+func (x *CreateInstancePartitionMetadata) GetEndTime() *timestamppb.Timestamp {
+ if x != nil {
+ return x.EndTime
+ }
+ return nil
+}
+
+// The request for
+// [CreateInstancePartition][google.spanner.admin.instance.v1.InstanceAdmin.CreateInstancePartition].
+type CreateInstancePartitionRequest struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // Required. The name of the instance in which to create the instance
+ // partition. Values are of the form
+ // `projects/<project>/instances/<instance>`.
+ Parent string `protobuf:"bytes,1,opt,name=parent,proto3" json:"parent,omitempty"`
+ // Required. The ID of the instance partition to create. Valid identifiers are
+ // of the form `[a-z][-a-z0-9]*[a-z0-9]` and must be between 2 and 64
+ // characters in length.
+ InstancePartitionId string `protobuf:"bytes,2,opt,name=instance_partition_id,json=instancePartitionId,proto3" json:"instance_partition_id,omitempty"`
+ // Required. The instance partition to create. The instance_partition.name may
+ // be omitted, but if specified must be
+ // `<parent>/instancePartitions/<instance_partition_id>`.
+ InstancePartition *InstancePartition `protobuf:"bytes,3,opt,name=instance_partition,json=instancePartition,proto3" json:"instance_partition,omitempty"`
+}
+
+func (x *CreateInstancePartitionRequest) Reset() {
+ *x = CreateInstancePartitionRequest{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_google_spanner_admin_instance_v1_spanner_instance_admin_proto_msgTypes[24]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *CreateInstancePartitionRequest) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*CreateInstancePartitionRequest) ProtoMessage() {}
+
+func (x *CreateInstancePartitionRequest) ProtoReflect() protoreflect.Message {
+ mi := &file_google_spanner_admin_instance_v1_spanner_instance_admin_proto_msgTypes[24]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use CreateInstancePartitionRequest.ProtoReflect.Descriptor instead.
+func (*CreateInstancePartitionRequest) Descriptor() ([]byte, []int) {
+ return file_google_spanner_admin_instance_v1_spanner_instance_admin_proto_rawDescGZIP(), []int{24}
+}
+
+func (x *CreateInstancePartitionRequest) GetParent() string {
+ if x != nil {
+ return x.Parent
+ }
+ return ""
+}
+
+func (x *CreateInstancePartitionRequest) GetInstancePartitionId() string {
+ if x != nil {
+ return x.InstancePartitionId
+ }
+ return ""
+}
+
+func (x *CreateInstancePartitionRequest) GetInstancePartition() *InstancePartition {
+ if x != nil {
+ return x.InstancePartition
+ }
+ return nil
+}
+
+// The request for
+// [DeleteInstancePartition][google.spanner.admin.instance.v1.InstanceAdmin.DeleteInstancePartition].
+type DeleteInstancePartitionRequest struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // Required. The name of the instance partition to be deleted.
+ // Values are of the form
+ // `projects/{project}/instances/{instance}/instancePartitions/{instance_partition}`
+ Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"`
+ // Optional. If not empty, the API only deletes the instance partition when
+ // the etag provided matches the current status of the requested instance
+ // partition. Otherwise, deletes the instance partition without checking the
+ // current status of the requested instance partition.
+ Etag string `protobuf:"bytes,2,opt,name=etag,proto3" json:"etag,omitempty"`
+}
+
+func (x *DeleteInstancePartitionRequest) Reset() {
+ *x = DeleteInstancePartitionRequest{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_google_spanner_admin_instance_v1_spanner_instance_admin_proto_msgTypes[25]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *DeleteInstancePartitionRequest) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*DeleteInstancePartitionRequest) ProtoMessage() {}
+
+func (x *DeleteInstancePartitionRequest) ProtoReflect() protoreflect.Message {
+ mi := &file_google_spanner_admin_instance_v1_spanner_instance_admin_proto_msgTypes[25]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use DeleteInstancePartitionRequest.ProtoReflect.Descriptor instead.
+func (*DeleteInstancePartitionRequest) Descriptor() ([]byte, []int) {
+ return file_google_spanner_admin_instance_v1_spanner_instance_admin_proto_rawDescGZIP(), []int{25}
+}
+
+func (x *DeleteInstancePartitionRequest) GetName() string {
+ if x != nil {
+ return x.Name
+ }
+ return ""
+}
+
+func (x *DeleteInstancePartitionRequest) GetEtag() string {
+ if x != nil {
+ return x.Etag
+ }
+ return ""
+}
+
+// The request for
+// [GetInstancePartition][google.spanner.admin.instance.v1.InstanceAdmin.GetInstancePartition].
+type GetInstancePartitionRequest struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // Required. The name of the requested instance partition. Values are of
+ // the form
+ // `projects/{project}/instances/{instance}/instancePartitions/{instance_partition}`.
+ Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"`
+}
+
+func (x *GetInstancePartitionRequest) Reset() {
+ *x = GetInstancePartitionRequest{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_google_spanner_admin_instance_v1_spanner_instance_admin_proto_msgTypes[26]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *GetInstancePartitionRequest) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*GetInstancePartitionRequest) ProtoMessage() {}
+
+func (x *GetInstancePartitionRequest) ProtoReflect() protoreflect.Message {
+ mi := &file_google_spanner_admin_instance_v1_spanner_instance_admin_proto_msgTypes[26]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use GetInstancePartitionRequest.ProtoReflect.Descriptor instead.
+func (*GetInstancePartitionRequest) Descriptor() ([]byte, []int) {
+ return file_google_spanner_admin_instance_v1_spanner_instance_admin_proto_rawDescGZIP(), []int{26}
+}
+
+func (x *GetInstancePartitionRequest) GetName() string {
+ if x != nil {
+ return x.Name
+ }
+ return ""
+}
+
+// The request for
+// [UpdateInstancePartition][google.spanner.admin.instance.v1.InstanceAdmin.UpdateInstancePartition].
+type UpdateInstancePartitionRequest struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // Required. The instance partition to update, which must always include the
+ // instance partition name. Otherwise, only fields mentioned in
+ // [field_mask][google.spanner.admin.instance.v1.UpdateInstancePartitionRequest.field_mask]
+ // need be included.
+ InstancePartition *InstancePartition `protobuf:"bytes,1,opt,name=instance_partition,json=instancePartition,proto3" json:"instance_partition,omitempty"`
+ // Required. A mask specifying which fields in
+ // [InstancePartition][google.spanner.admin.instance.v1.InstancePartition]
+ // should be updated. The field mask must always be specified; this prevents
+ // any future fields in
+ // [InstancePartition][google.spanner.admin.instance.v1.InstancePartition]
+ // from being erased accidentally by clients that do not know about them.
+ FieldMask *fieldmaskpb.FieldMask `protobuf:"bytes,2,opt,name=field_mask,json=fieldMask,proto3" json:"field_mask,omitempty"`
+}
+
+func (x *UpdateInstancePartitionRequest) Reset() {
+ *x = UpdateInstancePartitionRequest{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_google_spanner_admin_instance_v1_spanner_instance_admin_proto_msgTypes[27]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *UpdateInstancePartitionRequest) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*UpdateInstancePartitionRequest) ProtoMessage() {}
+
+func (x *UpdateInstancePartitionRequest) ProtoReflect() protoreflect.Message {
+ mi := &file_google_spanner_admin_instance_v1_spanner_instance_admin_proto_msgTypes[27]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use UpdateInstancePartitionRequest.ProtoReflect.Descriptor instead.
+func (*UpdateInstancePartitionRequest) Descriptor() ([]byte, []int) {
+ return file_google_spanner_admin_instance_v1_spanner_instance_admin_proto_rawDescGZIP(), []int{27}
+}
+
+func (x *UpdateInstancePartitionRequest) GetInstancePartition() *InstancePartition {
+ if x != nil {
+ return x.InstancePartition
+ }
+ return nil
+}
+
+func (x *UpdateInstancePartitionRequest) GetFieldMask() *fieldmaskpb.FieldMask {
+ if x != nil {
+ return x.FieldMask
+ }
+ return nil
+}
+
+// Metadata type for the operation returned by
+// [UpdateInstancePartition][google.spanner.admin.instance.v1.InstanceAdmin.UpdateInstancePartition].
+type UpdateInstancePartitionMetadata struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // The desired end state of the update.
+ InstancePartition *InstancePartition `protobuf:"bytes,1,opt,name=instance_partition,json=instancePartition,proto3" json:"instance_partition,omitempty"`
+ // The time at which
+ // [UpdateInstancePartition][google.spanner.admin.instance.v1.InstanceAdmin.UpdateInstancePartition]
+ // request was received.
+ StartTime *timestamppb.Timestamp `protobuf:"bytes,2,opt,name=start_time,json=startTime,proto3" json:"start_time,omitempty"`
+ // The time at which this operation was cancelled. If set, this operation is
+ // in the process of undoing itself (which is guaranteed to succeed) and
+ // cannot be cancelled again.
+ CancelTime *timestamppb.Timestamp `protobuf:"bytes,3,opt,name=cancel_time,json=cancelTime,proto3" json:"cancel_time,omitempty"`
+ // The time at which this operation failed or was completed successfully.
+ EndTime *timestamppb.Timestamp `protobuf:"bytes,4,opt,name=end_time,json=endTime,proto3" json:"end_time,omitempty"`
+}
+
+func (x *UpdateInstancePartitionMetadata) Reset() {
+ *x = UpdateInstancePartitionMetadata{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_google_spanner_admin_instance_v1_spanner_instance_admin_proto_msgTypes[28]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *UpdateInstancePartitionMetadata) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*UpdateInstancePartitionMetadata) ProtoMessage() {}
+
+func (x *UpdateInstancePartitionMetadata) ProtoReflect() protoreflect.Message {
+ mi := &file_google_spanner_admin_instance_v1_spanner_instance_admin_proto_msgTypes[28]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use UpdateInstancePartitionMetadata.ProtoReflect.Descriptor instead.
+func (*UpdateInstancePartitionMetadata) Descriptor() ([]byte, []int) {
+ return file_google_spanner_admin_instance_v1_spanner_instance_admin_proto_rawDescGZIP(), []int{28}
+}
+
+func (x *UpdateInstancePartitionMetadata) GetInstancePartition() *InstancePartition {
+ if x != nil {
+ return x.InstancePartition
+ }
+ return nil
+}
+
+func (x *UpdateInstancePartitionMetadata) GetStartTime() *timestamppb.Timestamp {
+ if x != nil {
+ return x.StartTime
+ }
+ return nil
+}
+
+func (x *UpdateInstancePartitionMetadata) GetCancelTime() *timestamppb.Timestamp {
+ if x != nil {
+ return x.CancelTime
+ }
+ return nil
+}
+
+func (x *UpdateInstancePartitionMetadata) GetEndTime() *timestamppb.Timestamp {
+ if x != nil {
+ return x.EndTime
+ }
+ return nil
+}
+
+// The request for
+// [ListInstancePartitions][google.spanner.admin.instance.v1.InstanceAdmin.ListInstancePartitions].
+type ListInstancePartitionsRequest struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // Required. The instance whose instance partitions should be listed. Values
+ // are of the form `projects/<project>/instances/<instance>`.
+ Parent string `protobuf:"bytes,1,opt,name=parent,proto3" json:"parent,omitempty"`
+ // Number of instance partitions to be returned in the response. If 0 or less,
+ // defaults to the server's maximum allowed page size.
+ PageSize int32 `protobuf:"varint,2,opt,name=page_size,json=pageSize,proto3" json:"page_size,omitempty"`
+ // If non-empty, `page_token` should contain a
+ // [next_page_token][google.spanner.admin.instance.v1.ListInstancePartitionsResponse.next_page_token]
+ // from a previous
+ // [ListInstancePartitionsResponse][google.spanner.admin.instance.v1.ListInstancePartitionsResponse].
+ PageToken string `protobuf:"bytes,3,opt,name=page_token,json=pageToken,proto3" json:"page_token,omitempty"`
+ // Optional. Deadline used while retrieving metadata for instance partitions.
+ // Instance partitions whose metadata cannot be retrieved within this deadline
+ // will be added to
+ // [unreachable][google.spanner.admin.instance.v1.ListInstancePartitionsResponse.unreachable]
+ // in
+ // [ListInstancePartitionsResponse][google.spanner.admin.instance.v1.ListInstancePartitionsResponse].
+ InstancePartitionDeadline *timestamppb.Timestamp `protobuf:"bytes,4,opt,name=instance_partition_deadline,json=instancePartitionDeadline,proto3" json:"instance_partition_deadline,omitempty"`
+}
+
+func (x *ListInstancePartitionsRequest) Reset() {
+ *x = ListInstancePartitionsRequest{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_google_spanner_admin_instance_v1_spanner_instance_admin_proto_msgTypes[29]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *ListInstancePartitionsRequest) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*ListInstancePartitionsRequest) ProtoMessage() {}
+
+func (x *ListInstancePartitionsRequest) ProtoReflect() protoreflect.Message {
+ mi := &file_google_spanner_admin_instance_v1_spanner_instance_admin_proto_msgTypes[29]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use ListInstancePartitionsRequest.ProtoReflect.Descriptor instead.
+func (*ListInstancePartitionsRequest) Descriptor() ([]byte, []int) {
+ return file_google_spanner_admin_instance_v1_spanner_instance_admin_proto_rawDescGZIP(), []int{29}
+}
+
+func (x *ListInstancePartitionsRequest) GetParent() string {
+ if x != nil {
+ return x.Parent
+ }
+ return ""
+}
+
+func (x *ListInstancePartitionsRequest) GetPageSize() int32 {
+ if x != nil {
+ return x.PageSize
+ }
+ return 0
+}
+
+func (x *ListInstancePartitionsRequest) GetPageToken() string {
+ if x != nil {
+ return x.PageToken
+ }
+ return ""
+}
+
+func (x *ListInstancePartitionsRequest) GetInstancePartitionDeadline() *timestamppb.Timestamp {
+ if x != nil {
+ return x.InstancePartitionDeadline
+ }
+ return nil
+}
+
+// The response for
+// [ListInstancePartitions][google.spanner.admin.instance.v1.InstanceAdmin.ListInstancePartitions].
+type ListInstancePartitionsResponse struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // The list of requested instancePartitions.
+ InstancePartitions []*InstancePartition `protobuf:"bytes,1,rep,name=instance_partitions,json=instancePartitions,proto3" json:"instance_partitions,omitempty"`
+ // `next_page_token` can be sent in a subsequent
+ // [ListInstancePartitions][google.spanner.admin.instance.v1.InstanceAdmin.ListInstancePartitions]
+ // call to fetch more of the matching instance partitions.
+ NextPageToken string `protobuf:"bytes,2,opt,name=next_page_token,json=nextPageToken,proto3" json:"next_page_token,omitempty"`
+ // The list of unreachable instance partitions.
+ // It includes the names of instance partitions whose metadata could
+ // not be retrieved within
+ // [instance_partition_deadline][google.spanner.admin.instance.v1.ListInstancePartitionsRequest.instance_partition_deadline].
+ Unreachable []string `protobuf:"bytes,3,rep,name=unreachable,proto3" json:"unreachable,omitempty"`
+}
+
+func (x *ListInstancePartitionsResponse) Reset() {
+ *x = ListInstancePartitionsResponse{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_google_spanner_admin_instance_v1_spanner_instance_admin_proto_msgTypes[30]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *ListInstancePartitionsResponse) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*ListInstancePartitionsResponse) ProtoMessage() {}
+
+func (x *ListInstancePartitionsResponse) ProtoReflect() protoreflect.Message {
+ mi := &file_google_spanner_admin_instance_v1_spanner_instance_admin_proto_msgTypes[30]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use ListInstancePartitionsResponse.ProtoReflect.Descriptor instead.
+func (*ListInstancePartitionsResponse) Descriptor() ([]byte, []int) {
+ return file_google_spanner_admin_instance_v1_spanner_instance_admin_proto_rawDescGZIP(), []int{30}
+}
+
+func (x *ListInstancePartitionsResponse) GetInstancePartitions() []*InstancePartition {
+ if x != nil {
+ return x.InstancePartitions
+ }
+ return nil
+}
+
+func (x *ListInstancePartitionsResponse) GetNextPageToken() string {
+ if x != nil {
+ return x.NextPageToken
+ }
+ return ""
+}
+
+func (x *ListInstancePartitionsResponse) GetUnreachable() []string {
+ if x != nil {
+ return x.Unreachable
+ }
+ return nil
+}
+
+// The request for
+// [ListInstancePartitionOperations][google.spanner.admin.instance.v1.InstanceAdmin.ListInstancePartitionOperations].
+type ListInstancePartitionOperationsRequest struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // Required. The parent instance of the instance partition operations.
+ // Values are of the form `projects/<project>/instances/<instance>`.
+ Parent string `protobuf:"bytes,1,opt,name=parent,proto3" json:"parent,omitempty"`
+ // Optional. An expression that filters the list of returned operations.
+ //
+ // A filter expression consists of a field name, a
+ // comparison operator, and a value for filtering.
+ // The value must be a string, a number, or a boolean. The comparison operator
+ // must be one of: `<`, `>`, `<=`, `>=`, `!=`, `=`, or `:`.
+ // Colon `:` is the contains operator. Filter rules are not case sensitive.
+ //
+ // The following fields in the [Operation][google.longrunning.Operation]
+ // are eligible for filtering:
+ //
+ // - `name` - The name of the long-running operation
+ // - `done` - False if the operation is in progress, else true.
+ // - `metadata.@type` - the type of metadata. For example, the type string
+ // for
+ // [CreateInstancePartitionMetadata][google.spanner.admin.instance.v1.CreateInstancePartitionMetadata]
+ // is
+ // `type.googleapis.com/google.spanner.admin.instance.v1.CreateInstancePartitionMetadata`.
+ // - `metadata.<field_name>` - any field in metadata.value.
+ // `metadata.@type` must be specified first, if filtering on metadata
+ // fields.
+ // - `error` - Error associated with the long-running operation.
+ // - `response.@type` - the type of response.
+ // - `response.<field_name>` - any field in response.value.
+ //
+ // You can combine multiple expressions by enclosing each expression in
+ // parentheses. By default, expressions are combined with AND logic. However,
+ // you can specify AND, OR, and NOT logic explicitly.
+ //
+ // Here are a few examples:
+ //
+ // - `done:true` - The operation is complete.
+ // - `(metadata.@type=` \
+ // `type.googleapis.com/google.spanner.admin.instance.v1.CreateInstancePartitionMetadata)
+ // AND` \
+ // `(metadata.instance_partition.name:custom-instance-partition) AND` \
+ // `(metadata.start_time < \"2021-03-28T14:50:00Z\") AND` \
+ // `(error:*)` - Return operations where:
+ // - The operation's metadata type is
+ // [CreateInstancePartitionMetadata][google.spanner.admin.instance.v1.CreateInstancePartitionMetadata].
+ // - The instance partition name contains "custom-instance-partition".
+ // - The operation started before 2021-03-28T14:50:00Z.
+ // - The operation resulted in an error.
+ Filter string `protobuf:"bytes,2,opt,name=filter,proto3" json:"filter,omitempty"`
+ // Optional. Number of operations to be returned in the response. If 0 or
+ // less, defaults to the server's maximum allowed page size.
+ PageSize int32 `protobuf:"varint,3,opt,name=page_size,json=pageSize,proto3" json:"page_size,omitempty"`
+ // Optional. If non-empty, `page_token` should contain a
+ // [next_page_token][google.spanner.admin.instance.v1.ListInstancePartitionOperationsResponse.next_page_token]
+ // from a previous
+ // [ListInstancePartitionOperationsResponse][google.spanner.admin.instance.v1.ListInstancePartitionOperationsResponse]
+ // to the same `parent` and with the same `filter`.
+ PageToken string `protobuf:"bytes,4,opt,name=page_token,json=pageToken,proto3" json:"page_token,omitempty"`
+ // Optional. Deadline used while retrieving metadata for instance partition
+ // operations. Instance partitions whose operation metadata cannot be
+ // retrieved within this deadline will be added to
+ // [unreachable][ListInstancePartitionOperationsResponse.unreachable] in
+ // [ListInstancePartitionOperationsResponse][google.spanner.admin.instance.v1.ListInstancePartitionOperationsResponse].
+ InstancePartitionDeadline *timestamppb.Timestamp `protobuf:"bytes,5,opt,name=instance_partition_deadline,json=instancePartitionDeadline,proto3" json:"instance_partition_deadline,omitempty"`
+}
+
+func (x *ListInstancePartitionOperationsRequest) Reset() {
+ *x = ListInstancePartitionOperationsRequest{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_google_spanner_admin_instance_v1_spanner_instance_admin_proto_msgTypes[31]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *ListInstancePartitionOperationsRequest) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*ListInstancePartitionOperationsRequest) ProtoMessage() {}
+
+func (x *ListInstancePartitionOperationsRequest) ProtoReflect() protoreflect.Message {
+ mi := &file_google_spanner_admin_instance_v1_spanner_instance_admin_proto_msgTypes[31]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use ListInstancePartitionOperationsRequest.ProtoReflect.Descriptor instead.
+func (*ListInstancePartitionOperationsRequest) Descriptor() ([]byte, []int) {
+ return file_google_spanner_admin_instance_v1_spanner_instance_admin_proto_rawDescGZIP(), []int{31}
+}
+
+func (x *ListInstancePartitionOperationsRequest) GetParent() string {
+ if x != nil {
+ return x.Parent
+ }
+ return ""
+}
+
+func (x *ListInstancePartitionOperationsRequest) GetFilter() string {
+ if x != nil {
+ return x.Filter
+ }
+ return ""
+}
+
+func (x *ListInstancePartitionOperationsRequest) GetPageSize() int32 {
+ if x != nil {
+ return x.PageSize
+ }
+ return 0
+}
+
+func (x *ListInstancePartitionOperationsRequest) GetPageToken() string {
+ if x != nil {
+ return x.PageToken
+ }
+ return ""
+}
+
+func (x *ListInstancePartitionOperationsRequest) GetInstancePartitionDeadline() *timestamppb.Timestamp {
+ if x != nil {
+ return x.InstancePartitionDeadline
+ }
+ return nil
+}
+
+// The response for
+// [ListInstancePartitionOperations][google.spanner.admin.instance.v1.InstanceAdmin.ListInstancePartitionOperations].
+type ListInstancePartitionOperationsResponse struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // The list of matching instance partition [long-running
+ // operations][google.longrunning.Operation]. Each operation's name will be
+ // prefixed by the instance partition's name. The operation's
+ // [metadata][google.longrunning.Operation.metadata] field type
+ // `metadata.type_url` describes the type of the metadata.
+ Operations []*longrunningpb.Operation `protobuf:"bytes,1,rep,name=operations,proto3" json:"operations,omitempty"`
+ // `next_page_token` can be sent in a subsequent
+ // [ListInstancePartitionOperations][google.spanner.admin.instance.v1.InstanceAdmin.ListInstancePartitionOperations]
+ // call to fetch more of the matching metadata.
+ NextPageToken string `protobuf:"bytes,2,opt,name=next_page_token,json=nextPageToken,proto3" json:"next_page_token,omitempty"`
+ // The list of unreachable instance partitions.
+ // It includes the names of instance partitions whose operation metadata could
+ // not be retrieved within
+ // [instance_partition_deadline][google.spanner.admin.instance.v1.ListInstancePartitionOperationsRequest.instance_partition_deadline].
+ UnreachableInstancePartitions []string `protobuf:"bytes,3,rep,name=unreachable_instance_partitions,json=unreachableInstancePartitions,proto3" json:"unreachable_instance_partitions,omitempty"`
+}
+
+func (x *ListInstancePartitionOperationsResponse) Reset() {
+ *x = ListInstancePartitionOperationsResponse{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_google_spanner_admin_instance_v1_spanner_instance_admin_proto_msgTypes[32]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *ListInstancePartitionOperationsResponse) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*ListInstancePartitionOperationsResponse) ProtoMessage() {}
+
+func (x *ListInstancePartitionOperationsResponse) ProtoReflect() protoreflect.Message {
+ mi := &file_google_spanner_admin_instance_v1_spanner_instance_admin_proto_msgTypes[32]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use ListInstancePartitionOperationsResponse.ProtoReflect.Descriptor instead.
+func (*ListInstancePartitionOperationsResponse) Descriptor() ([]byte, []int) {
+ return file_google_spanner_admin_instance_v1_spanner_instance_admin_proto_rawDescGZIP(), []int{32}
+}
+
+func (x *ListInstancePartitionOperationsResponse) GetOperations() []*longrunningpb.Operation {
+ if x != nil {
+ return x.Operations
+ }
+ return nil
+}
+
+func (x *ListInstancePartitionOperationsResponse) GetNextPageToken() string {
+ if x != nil {
+ return x.NextPageToken
+ }
+ return ""
+}
+
+func (x *ListInstancePartitionOperationsResponse) GetUnreachableInstancePartitions() []string {
+ if x != nil {
+ return x.UnreachableInstancePartitions
+ }
+ return nil
+}
+
+// The request for
+// [MoveInstance][google.spanner.admin.instance.v1.InstanceAdmin.MoveInstance].
+type MoveInstanceRequest struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // Required. The instance to move.
+ // Values are of the form `projects/<project>/instances/<instance>`.
+ Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"`
+ // Required. The target instance configuration where to move the instance.
+ // Values are of the form `projects/<project>/instanceConfigs/<config>`.
+ TargetConfig string `protobuf:"bytes,2,opt,name=target_config,json=targetConfig,proto3" json:"target_config,omitempty"`
+}
+
+func (x *MoveInstanceRequest) Reset() {
+ *x = MoveInstanceRequest{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_google_spanner_admin_instance_v1_spanner_instance_admin_proto_msgTypes[33]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *MoveInstanceRequest) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*MoveInstanceRequest) ProtoMessage() {}
+
+func (x *MoveInstanceRequest) ProtoReflect() protoreflect.Message {
+ mi := &file_google_spanner_admin_instance_v1_spanner_instance_admin_proto_msgTypes[33]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use MoveInstanceRequest.ProtoReflect.Descriptor instead.
+func (*MoveInstanceRequest) Descriptor() ([]byte, []int) {
+ return file_google_spanner_admin_instance_v1_spanner_instance_admin_proto_rawDescGZIP(), []int{33}
+}
+
+func (x *MoveInstanceRequest) GetName() string {
+ if x != nil {
+ return x.Name
+ }
+ return ""
+}
+
+func (x *MoveInstanceRequest) GetTargetConfig() string {
+ if x != nil {
+ return x.TargetConfig
+ }
+ return ""
+}
+
+// The response for
+// [MoveInstance][google.spanner.admin.instance.v1.InstanceAdmin.MoveInstance].
+type MoveInstanceResponse struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+}
+
+func (x *MoveInstanceResponse) Reset() {
+ *x = MoveInstanceResponse{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_google_spanner_admin_instance_v1_spanner_instance_admin_proto_msgTypes[34]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *MoveInstanceResponse) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*MoveInstanceResponse) ProtoMessage() {}
+
+func (x *MoveInstanceResponse) ProtoReflect() protoreflect.Message {
+ mi := &file_google_spanner_admin_instance_v1_spanner_instance_admin_proto_msgTypes[34]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use MoveInstanceResponse.ProtoReflect.Descriptor instead.
+func (*MoveInstanceResponse) Descriptor() ([]byte, []int) {
+ return file_google_spanner_admin_instance_v1_spanner_instance_admin_proto_rawDescGZIP(), []int{34}
+}
+
+// Metadata type for the operation returned by
+// [MoveInstance][google.spanner.admin.instance.v1.InstanceAdmin.MoveInstance].
+type MoveInstanceMetadata struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // The target instance configuration where to move the instance.
+ // Values are of the form `projects/<project>/instanceConfigs/<config>`.
+ TargetConfig string `protobuf:"bytes,1,opt,name=target_config,json=targetConfig,proto3" json:"target_config,omitempty"`
+ // The progress of the
+ // [MoveInstance][google.spanner.admin.instance.v1.InstanceAdmin.MoveInstance]
+ // operation.
+ // [progress_percent][google.spanner.admin.instance.v1.OperationProgress.progress_percent]
+ // is reset when cancellation is requested.
+ Progress *OperationProgress `protobuf:"bytes,2,opt,name=progress,proto3" json:"progress,omitempty"`
+ // The time at which this operation was cancelled.
+ CancelTime *timestamppb.Timestamp `protobuf:"bytes,3,opt,name=cancel_time,json=cancelTime,proto3" json:"cancel_time,omitempty"`
+}
+
+func (x *MoveInstanceMetadata) Reset() {
+ *x = MoveInstanceMetadata{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_google_spanner_admin_instance_v1_spanner_instance_admin_proto_msgTypes[35]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *MoveInstanceMetadata) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*MoveInstanceMetadata) ProtoMessage() {}
+
+func (x *MoveInstanceMetadata) ProtoReflect() protoreflect.Message {
+ mi := &file_google_spanner_admin_instance_v1_spanner_instance_admin_proto_msgTypes[35]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use MoveInstanceMetadata.ProtoReflect.Descriptor instead.
+func (*MoveInstanceMetadata) Descriptor() ([]byte, []int) {
+ return file_google_spanner_admin_instance_v1_spanner_instance_admin_proto_rawDescGZIP(), []int{35}
+}
+
+func (x *MoveInstanceMetadata) GetTargetConfig() string {
+ if x != nil {
+ return x.TargetConfig
+ }
+ return ""
+}
+
+func (x *MoveInstanceMetadata) GetProgress() *OperationProgress {
+ if x != nil {
+ return x.Progress
+ }
+ return nil
+}
+
+func (x *MoveInstanceMetadata) GetCancelTime() *timestamppb.Timestamp {
+ if x != nil {
+ return x.CancelTime
+ }
+ return nil
+}
+
+// The autoscaling limits for the instance. Users can define the minimum and
+// maximum compute capacity allocated to the instance, and the autoscaler will
+// only scale within that range. Users can either use nodes or processing
+// units to specify the limits, but should use the same unit to set both the
+// min_limit and max_limit.
+type AutoscalingConfig_AutoscalingLimits struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // The minimum compute capacity for the instance.
+ //
+ // Types that are assignable to MinLimit:
+ //
+ // *AutoscalingConfig_AutoscalingLimits_MinNodes
+ // *AutoscalingConfig_AutoscalingLimits_MinProcessingUnits
+ MinLimit isAutoscalingConfig_AutoscalingLimits_MinLimit `protobuf_oneof:"min_limit"`
+ // The maximum compute capacity for the instance. The maximum compute
+ // capacity should be less than or equal to 10X the minimum compute
+ // capacity.
+ //
+ // Types that are assignable to MaxLimit:
+ //
+ // *AutoscalingConfig_AutoscalingLimits_MaxNodes
+ // *AutoscalingConfig_AutoscalingLimits_MaxProcessingUnits
+ MaxLimit isAutoscalingConfig_AutoscalingLimits_MaxLimit `protobuf_oneof:"max_limit"`
+}
+
+func (x *AutoscalingConfig_AutoscalingLimits) Reset() {
+ *x = AutoscalingConfig_AutoscalingLimits{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_google_spanner_admin_instance_v1_spanner_instance_admin_proto_msgTypes[37]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *AutoscalingConfig_AutoscalingLimits) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*AutoscalingConfig_AutoscalingLimits) ProtoMessage() {}
+
+func (x *AutoscalingConfig_AutoscalingLimits) ProtoReflect() protoreflect.Message {
+ mi := &file_google_spanner_admin_instance_v1_spanner_instance_admin_proto_msgTypes[37]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use AutoscalingConfig_AutoscalingLimits.ProtoReflect.Descriptor instead.
+func (*AutoscalingConfig_AutoscalingLimits) Descriptor() ([]byte, []int) {
+ return file_google_spanner_admin_instance_v1_spanner_instance_admin_proto_rawDescGZIP(), []int{2, 0}
+}
+
+func (m *AutoscalingConfig_AutoscalingLimits) GetMinLimit() isAutoscalingConfig_AutoscalingLimits_MinLimit {
+ if m != nil {
+ return m.MinLimit
+ }
+ return nil
+}
+
+func (x *AutoscalingConfig_AutoscalingLimits) GetMinNodes() int32 {
+ if x, ok := x.GetMinLimit().(*AutoscalingConfig_AutoscalingLimits_MinNodes); ok {
+ return x.MinNodes
+ }
+ return 0
+}
+
+func (x *AutoscalingConfig_AutoscalingLimits) GetMinProcessingUnits() int32 {
+ if x, ok := x.GetMinLimit().(*AutoscalingConfig_AutoscalingLimits_MinProcessingUnits); ok {
+ return x.MinProcessingUnits
+ }
+ return 0
+}
+
+func (m *AutoscalingConfig_AutoscalingLimits) GetMaxLimit() isAutoscalingConfig_AutoscalingLimits_MaxLimit {
+ if m != nil {
+ return m.MaxLimit
+ }
+ return nil
+}
+
+func (x *AutoscalingConfig_AutoscalingLimits) GetMaxNodes() int32 {
+ if x, ok := x.GetMaxLimit().(*AutoscalingConfig_AutoscalingLimits_MaxNodes); ok {
+ return x.MaxNodes
+ }
+ return 0
+}
+
+func (x *AutoscalingConfig_AutoscalingLimits) GetMaxProcessingUnits() int32 {
+ if x, ok := x.GetMaxLimit().(*AutoscalingConfig_AutoscalingLimits_MaxProcessingUnits); ok {
+ return x.MaxProcessingUnits
+ }
+ return 0
+}
+
+type isAutoscalingConfig_AutoscalingLimits_MinLimit interface {
+ isAutoscalingConfig_AutoscalingLimits_MinLimit()
+}
+
+type AutoscalingConfig_AutoscalingLimits_MinNodes struct {
+ // Minimum number of nodes allocated to the instance. If set, this number
+ // should be greater than or equal to 1.
+ MinNodes int32 `protobuf:"varint,1,opt,name=min_nodes,json=minNodes,proto3,oneof"`
+}
+
+type AutoscalingConfig_AutoscalingLimits_MinProcessingUnits struct {
+ // Minimum number of processing units allocated to the instance. If set,
+ // this number should be multiples of 1000.
+ MinProcessingUnits int32 `protobuf:"varint,2,opt,name=min_processing_units,json=minProcessingUnits,proto3,oneof"`
+}
+
+func (*AutoscalingConfig_AutoscalingLimits_MinNodes) isAutoscalingConfig_AutoscalingLimits_MinLimit() {
+}
+
+func (*AutoscalingConfig_AutoscalingLimits_MinProcessingUnits) isAutoscalingConfig_AutoscalingLimits_MinLimit() {
+}
+
+type isAutoscalingConfig_AutoscalingLimits_MaxLimit interface {
+ isAutoscalingConfig_AutoscalingLimits_MaxLimit()
+}
+
+type AutoscalingConfig_AutoscalingLimits_MaxNodes struct {
+ // Maximum number of nodes allocated to the instance. If set, this number
+ // should be greater than or equal to min_nodes.
+ MaxNodes int32 `protobuf:"varint,3,opt,name=max_nodes,json=maxNodes,proto3,oneof"`
+}
+
+type AutoscalingConfig_AutoscalingLimits_MaxProcessingUnits struct {
+ // Maximum number of processing units allocated to the instance. If set,
+ // this number should be multiples of 1000 and be greater than or equal to
+ // min_processing_units.
+ MaxProcessingUnits int32 `protobuf:"varint,4,opt,name=max_processing_units,json=maxProcessingUnits,proto3,oneof"`
+}
+
+func (*AutoscalingConfig_AutoscalingLimits_MaxNodes) isAutoscalingConfig_AutoscalingLimits_MaxLimit() {
+}
+
+func (*AutoscalingConfig_AutoscalingLimits_MaxProcessingUnits) isAutoscalingConfig_AutoscalingLimits_MaxLimit() {
+}
+
+// The autoscaling targets for an instance.
+type AutoscalingConfig_AutoscalingTargets struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // Required. The target high priority cpu utilization percentage that the
+ // autoscaler should be trying to achieve for the instance. This number is
+ // on a scale from 0 (no utilization) to 100 (full utilization). The valid
+ // range is [10, 90] inclusive.
+ HighPriorityCpuUtilizationPercent int32 `protobuf:"varint,1,opt,name=high_priority_cpu_utilization_percent,json=highPriorityCpuUtilizationPercent,proto3" json:"high_priority_cpu_utilization_percent,omitempty"`
+ // Required. The target storage utilization percentage that the autoscaler
+ // should be trying to achieve for the instance. This number is on a scale
+ // from 0 (no utilization) to 100 (full utilization). The valid range is
+ // [10, 100] inclusive.
+ StorageUtilizationPercent int32 `protobuf:"varint,2,opt,name=storage_utilization_percent,json=storageUtilizationPercent,proto3" json:"storage_utilization_percent,omitempty"`
+}
+
+func (x *AutoscalingConfig_AutoscalingTargets) Reset() {
+ *x = AutoscalingConfig_AutoscalingTargets{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_google_spanner_admin_instance_v1_spanner_instance_admin_proto_msgTypes[38]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *AutoscalingConfig_AutoscalingTargets) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*AutoscalingConfig_AutoscalingTargets) ProtoMessage() {}
+
+func (x *AutoscalingConfig_AutoscalingTargets) ProtoReflect() protoreflect.Message {
+ mi := &file_google_spanner_admin_instance_v1_spanner_instance_admin_proto_msgTypes[38]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use AutoscalingConfig_AutoscalingTargets.ProtoReflect.Descriptor instead.
+func (*AutoscalingConfig_AutoscalingTargets) Descriptor() ([]byte, []int) {
+ return file_google_spanner_admin_instance_v1_spanner_instance_admin_proto_rawDescGZIP(), []int{2, 1}
+}
+
+func (x *AutoscalingConfig_AutoscalingTargets) GetHighPriorityCpuUtilizationPercent() int32 {
+ if x != nil {
+ return x.HighPriorityCpuUtilizationPercent
+ }
+ return 0
+}
+
+func (x *AutoscalingConfig_AutoscalingTargets) GetStorageUtilizationPercent() int32 {
+ if x != nil {
+ return x.StorageUtilizationPercent
+ }
+ return 0
+}
+
+var File_google_spanner_admin_instance_v1_spanner_instance_admin_proto protoreflect.FileDescriptor
+
+var file_google_spanner_admin_instance_v1_spanner_instance_admin_proto_rawDesc = []byte{
+ 0x0a, 0x3d, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x73, 0x70, 0x61, 0x6e, 0x6e, 0x65, 0x72,
+ 0x2f, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2f, 0x69, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x2f,
+ 0x76, 0x31, 0x2f, 0x73, 0x70, 0x61, 0x6e, 0x6e, 0x65, 0x72, 0x5f, 0x69, 0x6e, 0x73, 0x74, 0x61,
+ 0x6e, 0x63, 0x65, 0x5f, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12,
+ 0x20, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x70, 0x61, 0x6e, 0x6e, 0x65, 0x72, 0x2e,
+ 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x69, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x2e, 0x76,
+ 0x31, 0x1a, 0x1c, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x61, 0x6e,
+ 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a,
+ 0x17, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x63, 0x6c, 0x69, 0x65,
+ 0x6e, 0x74, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65,
+ 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x5f, 0x62, 0x65, 0x68, 0x61, 0x76,
+ 0x69, 0x6f, 0x72, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x19, 0x67, 0x6f, 0x6f, 0x67, 0x6c,
+ 0x65, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x2e, 0x70,
+ 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x69, 0x61, 0x6d,
+ 0x2f, 0x76, 0x31, 0x2f, 0x69, 0x61, 0x6d, 0x5f, 0x70, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x2e, 0x70,
+ 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1a, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x69, 0x61, 0x6d,
+ 0x2f, 0x76, 0x31, 0x2f, 0x70, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f,
+ 0x1a, 0x23, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x6c, 0x6f, 0x6e, 0x67, 0x72, 0x75, 0x6e,
+ 0x6e, 0x69, 0x6e, 0x67, 0x2f, 0x6f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e,
+ 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1b, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72,
+ 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x65, 0x6d, 0x70, 0x74, 0x79, 0x2e, 0x70, 0x72, 0x6f,
+ 0x74, 0x6f, 0x1a, 0x20, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f,
+ 0x62, 0x75, 0x66, 0x2f, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x5f, 0x6d, 0x61, 0x73, 0x6b, 0x2e, 0x70,
+ 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f,
+ 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x2e,
+ 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x2d, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x73, 0x70,
+ 0x61, 0x6e, 0x6e, 0x65, 0x72, 0x2f, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2f, 0x69, 0x6e, 0x73, 0x74,
+ 0x61, 0x6e, 0x63, 0x65, 0x2f, 0x76, 0x31, 0x2f, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x2e, 0x70,
+ 0x72, 0x6f, 0x74, 0x6f, 0x22, 0x81, 0x02, 0x0a, 0x0b, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61,
+ 0x49, 0x6e, 0x66, 0x6f, 0x12, 0x1a, 0x0a, 0x08, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e,
+ 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e,
+ 0x12, 0x4d, 0x0a, 0x04, 0x74, 0x79, 0x70, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x39,
+ 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x70, 0x61, 0x6e, 0x6e, 0x65, 0x72, 0x2e,
+ 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x69, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x2e, 0x76,
+ 0x31, 0x2e, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x49, 0x6e, 0x66, 0x6f, 0x2e, 0x52, 0x65,
+ 0x70, 0x6c, 0x69, 0x63, 0x61, 0x54, 0x79, 0x70, 0x65, 0x52, 0x04, 0x74, 0x79, 0x70, 0x65, 0x12,
+ 0x36, 0x0a, 0x17, 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x5f, 0x6c, 0x65, 0x61, 0x64, 0x65,
+ 0x72, 0x5f, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x08,
+ 0x52, 0x15, 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x4c, 0x65, 0x61, 0x64, 0x65, 0x72, 0x4c,
+ 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x22, 0x4f, 0x0a, 0x0b, 0x52, 0x65, 0x70, 0x6c, 0x69,
+ 0x63, 0x61, 0x54, 0x79, 0x70, 0x65, 0x12, 0x14, 0x0a, 0x10, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x55,
+ 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, 0x49, 0x45, 0x44, 0x10, 0x00, 0x12, 0x0e, 0x0a, 0x0a,
+ 0x52, 0x45, 0x41, 0x44, 0x5f, 0x57, 0x52, 0x49, 0x54, 0x45, 0x10, 0x01, 0x12, 0x0d, 0x0a, 0x09,
+ 0x52, 0x45, 0x41, 0x44, 0x5f, 0x4f, 0x4e, 0x4c, 0x59, 0x10, 0x02, 0x12, 0x0b, 0x0a, 0x07, 0x57,
+ 0x49, 0x54, 0x4e, 0x45, 0x53, 0x53, 0x10, 0x03, 0x22, 0xc2, 0x07, 0x0a, 0x0e, 0x49, 0x6e, 0x73,
+ 0x74, 0x61, 0x6e, 0x63, 0x65, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x12, 0x0a, 0x04, 0x6e,
+ 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12,
+ 0x21, 0x0a, 0x0c, 0x64, 0x69, 0x73, 0x70, 0x6c, 0x61, 0x79, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18,
+ 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x64, 0x69, 0x73, 0x70, 0x6c, 0x61, 0x79, 0x4e, 0x61,
+ 0x6d, 0x65, 0x12, 0x5b, 0x0a, 0x0b, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x5f, 0x74, 0x79, 0x70,
+ 0x65, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x35, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65,
+ 0x2e, 0x73, 0x70, 0x61, 0x6e, 0x6e, 0x65, 0x72, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x69,
+ 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x49, 0x6e, 0x73, 0x74, 0x61,
+ 0x6e, 0x63, 0x65, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x54, 0x79, 0x70, 0x65, 0x42, 0x03,
+ 0xe0, 0x41, 0x03, 0x52, 0x0a, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x54, 0x79, 0x70, 0x65, 0x12,
+ 0x49, 0x0a, 0x08, 0x72, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28,
+ 0x0b, 0x32, 0x2d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x70, 0x61, 0x6e, 0x6e,
+ 0x65, 0x72, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x69, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63,
+ 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x49, 0x6e, 0x66, 0x6f,
+ 0x52, 0x08, 0x72, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x73, 0x12, 0x5f, 0x0a, 0x11, 0x6f, 0x70,
+ 0x74, 0x69, 0x6f, 0x6e, 0x61, 0x6c, 0x5f, 0x72, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x73, 0x18,
+ 0x06, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x2d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73,
+ 0x70, 0x61, 0x6e, 0x6e, 0x65, 0x72, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x69, 0x6e, 0x73,
+ 0x74, 0x61, 0x6e, 0x63, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61,
+ 0x49, 0x6e, 0x66, 0x6f, 0x42, 0x03, 0xe0, 0x41, 0x03, 0x52, 0x10, 0x6f, 0x70, 0x74, 0x69, 0x6f,
+ 0x6e, 0x61, 0x6c, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x73, 0x12, 0x4b, 0x0a, 0x0b, 0x62,
+ 0x61, 0x73, 0x65, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x18, 0x07, 0x20, 0x01, 0x28, 0x09,
+ 0x42, 0x2a, 0xfa, 0x41, 0x27, 0x0a, 0x25, 0x73, 0x70, 0x61, 0x6e, 0x6e, 0x65, 0x72, 0x2e, 0x67,
+ 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x49, 0x6e,
+ 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x0a, 0x62, 0x61,
+ 0x73, 0x65, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x54, 0x0a, 0x06, 0x6c, 0x61, 0x62, 0x65,
+ 0x6c, 0x73, 0x18, 0x08, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x3c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c,
+ 0x65, 0x2e, 0x73, 0x70, 0x61, 0x6e, 0x6e, 0x65, 0x72, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e,
+ 0x69, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x49, 0x6e, 0x73, 0x74,
+ 0x61, 0x6e, 0x63, 0x65, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x4c, 0x61, 0x62, 0x65, 0x6c,
+ 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x06, 0x6c, 0x61, 0x62, 0x65, 0x6c, 0x73, 0x12, 0x12,
+ 0x0a, 0x04, 0x65, 0x74, 0x61, 0x67, 0x18, 0x09, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x65, 0x74,
+ 0x61, 0x67, 0x12, 0x25, 0x0a, 0x0e, 0x6c, 0x65, 0x61, 0x64, 0x65, 0x72, 0x5f, 0x6f, 0x70, 0x74,
+ 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x04, 0x20, 0x03, 0x28, 0x09, 0x52, 0x0d, 0x6c, 0x65, 0x61, 0x64,
+ 0x65, 0x72, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x25, 0x0a, 0x0b, 0x72, 0x65, 0x63,
+ 0x6f, 0x6e, 0x63, 0x69, 0x6c, 0x69, 0x6e, 0x67, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x08, 0x42, 0x03,
+ 0xe0, 0x41, 0x03, 0x52, 0x0b, 0x72, 0x65, 0x63, 0x6f, 0x6e, 0x63, 0x69, 0x6c, 0x69, 0x6e, 0x67,
+ 0x12, 0x51, 0x0a, 0x05, 0x73, 0x74, 0x61, 0x74, 0x65, 0x18, 0x0b, 0x20, 0x01, 0x28, 0x0e, 0x32,
+ 0x36, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x70, 0x61, 0x6e, 0x6e, 0x65, 0x72,
+ 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x69, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x2e,
+ 0x76, 0x31, 0x2e, 0x49, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x43, 0x6f, 0x6e, 0x66, 0x69,
+ 0x67, 0x2e, 0x53, 0x74, 0x61, 0x74, 0x65, 0x42, 0x03, 0xe0, 0x41, 0x03, 0x52, 0x05, 0x73, 0x74,
+ 0x61, 0x74, 0x65, 0x1a, 0x39, 0x0a, 0x0b, 0x4c, 0x61, 0x62, 0x65, 0x6c, 0x73, 0x45, 0x6e, 0x74,
+ 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52,
+ 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20,
+ 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x22, 0x42,
+ 0x0a, 0x04, 0x54, 0x79, 0x70, 0x65, 0x12, 0x14, 0x0a, 0x10, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x55,
+ 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, 0x49, 0x45, 0x44, 0x10, 0x00, 0x12, 0x12, 0x0a, 0x0e,
+ 0x47, 0x4f, 0x4f, 0x47, 0x4c, 0x45, 0x5f, 0x4d, 0x41, 0x4e, 0x41, 0x47, 0x45, 0x44, 0x10, 0x01,
+ 0x12, 0x10, 0x0a, 0x0c, 0x55, 0x53, 0x45, 0x52, 0x5f, 0x4d, 0x41, 0x4e, 0x41, 0x47, 0x45, 0x44,
+ 0x10, 0x02, 0x22, 0x37, 0x0a, 0x05, 0x53, 0x74, 0x61, 0x74, 0x65, 0x12, 0x15, 0x0a, 0x11, 0x53,
+ 0x54, 0x41, 0x54, 0x45, 0x5f, 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, 0x49, 0x45, 0x44,
+ 0x10, 0x00, 0x12, 0x0c, 0x0a, 0x08, 0x43, 0x52, 0x45, 0x41, 0x54, 0x49, 0x4e, 0x47, 0x10, 0x01,
+ 0x12, 0x09, 0x0a, 0x05, 0x52, 0x45, 0x41, 0x44, 0x59, 0x10, 0x02, 0x3a, 0x60, 0xea, 0x41, 0x5d,
+ 0x0a, 0x25, 0x73, 0x70, 0x61, 0x6e, 0x6e, 0x65, 0x72, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65,
+ 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x49, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63,
+ 0x65, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x34, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74,
+ 0x73, 0x2f, 0x7b, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x7d, 0x2f, 0x69, 0x6e, 0x73, 0x74,
+ 0x61, 0x6e, 0x63, 0x65, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x73, 0x2f, 0x7b, 0x69, 0x6e, 0x73,
+ 0x74, 0x61, 0x6e, 0x63, 0x65, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x7d, 0x22, 0x95, 0x05,
+ 0x0a, 0x11, 0x41, 0x75, 0x74, 0x6f, 0x73, 0x63, 0x61, 0x6c, 0x69, 0x6e, 0x67, 0x43, 0x6f, 0x6e,
+ 0x66, 0x69, 0x67, 0x12, 0x79, 0x0a, 0x12, 0x61, 0x75, 0x74, 0x6f, 0x73, 0x63, 0x61, 0x6c, 0x69,
+ 0x6e, 0x67, 0x5f, 0x6c, 0x69, 0x6d, 0x69, 0x74, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32,
+ 0x45, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x70, 0x61, 0x6e, 0x6e, 0x65, 0x72,
+ 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x69, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x2e,
+ 0x76, 0x31, 0x2e, 0x41, 0x75, 0x74, 0x6f, 0x73, 0x63, 0x61, 0x6c, 0x69, 0x6e, 0x67, 0x43, 0x6f,
+ 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x41, 0x75, 0x74, 0x6f, 0x73, 0x63, 0x61, 0x6c, 0x69, 0x6e, 0x67,
+ 0x4c, 0x69, 0x6d, 0x69, 0x74, 0x73, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x11, 0x61, 0x75, 0x74,
+ 0x6f, 0x73, 0x63, 0x61, 0x6c, 0x69, 0x6e, 0x67, 0x4c, 0x69, 0x6d, 0x69, 0x74, 0x73, 0x12, 0x7c,
+ 0x0a, 0x13, 0x61, 0x75, 0x74, 0x6f, 0x73, 0x63, 0x61, 0x6c, 0x69, 0x6e, 0x67, 0x5f, 0x74, 0x61,
+ 0x72, 0x67, 0x65, 0x74, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x46, 0x2e, 0x67, 0x6f,
+ 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x70, 0x61, 0x6e, 0x6e, 0x65, 0x72, 0x2e, 0x61, 0x64, 0x6d,
+ 0x69, 0x6e, 0x2e, 0x69, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x41,
+ 0x75, 0x74, 0x6f, 0x73, 0x63, 0x61, 0x6c, 0x69, 0x6e, 0x67, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67,
+ 0x2e, 0x41, 0x75, 0x74, 0x6f, 0x73, 0x63, 0x61, 0x6c, 0x69, 0x6e, 0x67, 0x54, 0x61, 0x72, 0x67,
+ 0x65, 0x74, 0x73, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x12, 0x61, 0x75, 0x74, 0x6f, 0x73, 0x63,
+ 0x61, 0x6c, 0x69, 0x6e, 0x67, 0x54, 0x61, 0x72, 0x67, 0x65, 0x74, 0x73, 0x1a, 0xd3, 0x01, 0x0a,
+ 0x11, 0x41, 0x75, 0x74, 0x6f, 0x73, 0x63, 0x61, 0x6c, 0x69, 0x6e, 0x67, 0x4c, 0x69, 0x6d, 0x69,
+ 0x74, 0x73, 0x12, 0x1d, 0x0a, 0x09, 0x6d, 0x69, 0x6e, 0x5f, 0x6e, 0x6f, 0x64, 0x65, 0x73, 0x18,
+ 0x01, 0x20, 0x01, 0x28, 0x05, 0x48, 0x00, 0x52, 0x08, 0x6d, 0x69, 0x6e, 0x4e, 0x6f, 0x64, 0x65,
+ 0x73, 0x12, 0x32, 0x0a, 0x14, 0x6d, 0x69, 0x6e, 0x5f, 0x70, 0x72, 0x6f, 0x63, 0x65, 0x73, 0x73,
+ 0x69, 0x6e, 0x67, 0x5f, 0x75, 0x6e, 0x69, 0x74, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x05, 0x48,
+ 0x00, 0x52, 0x12, 0x6d, 0x69, 0x6e, 0x50, 0x72, 0x6f, 0x63, 0x65, 0x73, 0x73, 0x69, 0x6e, 0x67,
+ 0x55, 0x6e, 0x69, 0x74, 0x73, 0x12, 0x1d, 0x0a, 0x09, 0x6d, 0x61, 0x78, 0x5f, 0x6e, 0x6f, 0x64,
+ 0x65, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x05, 0x48, 0x01, 0x52, 0x08, 0x6d, 0x61, 0x78, 0x4e,
+ 0x6f, 0x64, 0x65, 0x73, 0x12, 0x32, 0x0a, 0x14, 0x6d, 0x61, 0x78, 0x5f, 0x70, 0x72, 0x6f, 0x63,
+ 0x65, 0x73, 0x73, 0x69, 0x6e, 0x67, 0x5f, 0x75, 0x6e, 0x69, 0x74, 0x73, 0x18, 0x04, 0x20, 0x01,
+ 0x28, 0x05, 0x48, 0x01, 0x52, 0x12, 0x6d, 0x61, 0x78, 0x50, 0x72, 0x6f, 0x63, 0x65, 0x73, 0x73,
+ 0x69, 0x6e, 0x67, 0x55, 0x6e, 0x69, 0x74, 0x73, 0x42, 0x0b, 0x0a, 0x09, 0x6d, 0x69, 0x6e, 0x5f,
+ 0x6c, 0x69, 0x6d, 0x69, 0x74, 0x42, 0x0b, 0x0a, 0x09, 0x6d, 0x61, 0x78, 0x5f, 0x6c, 0x69, 0x6d,
+ 0x69, 0x74, 0x1a, 0xb0, 0x01, 0x0a, 0x12, 0x41, 0x75, 0x74, 0x6f, 0x73, 0x63, 0x61, 0x6c, 0x69,
+ 0x6e, 0x67, 0x54, 0x61, 0x72, 0x67, 0x65, 0x74, 0x73, 0x12, 0x55, 0x0a, 0x25, 0x68, 0x69, 0x67,
+ 0x68, 0x5f, 0x70, 0x72, 0x69, 0x6f, 0x72, 0x69, 0x74, 0x79, 0x5f, 0x63, 0x70, 0x75, 0x5f, 0x75,
+ 0x74, 0x69, 0x6c, 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x70, 0x65, 0x72, 0x63, 0x65,
+ 0x6e, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x05, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x21, 0x68,
+ 0x69, 0x67, 0x68, 0x50, 0x72, 0x69, 0x6f, 0x72, 0x69, 0x74, 0x79, 0x43, 0x70, 0x75, 0x55, 0x74,
+ 0x69, 0x6c, 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x50, 0x65, 0x72, 0x63, 0x65, 0x6e, 0x74,
+ 0x12, 0x43, 0x0a, 0x1b, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x5f, 0x75, 0x74, 0x69, 0x6c,
+ 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x70, 0x65, 0x72, 0x63, 0x65, 0x6e, 0x74, 0x18,
+ 0x02, 0x20, 0x01, 0x28, 0x05, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x19, 0x73, 0x74, 0x6f, 0x72,
+ 0x61, 0x67, 0x65, 0x55, 0x74, 0x69, 0x6c, 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x50, 0x65,
+ 0x72, 0x63, 0x65, 0x6e, 0x74, 0x22, 0xf8, 0x07, 0x0a, 0x08, 0x49, 0x6e, 0x73, 0x74, 0x61, 0x6e,
+ 0x63, 0x65, 0x12, 0x17, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09,
+ 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x45, 0x0a, 0x06, 0x63,
+ 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x42, 0x2d, 0xe0, 0x41, 0x02,
+ 0xfa, 0x41, 0x27, 0x0a, 0x25, 0x73, 0x70, 0x61, 0x6e, 0x6e, 0x65, 0x72, 0x2e, 0x67, 0x6f, 0x6f,
+ 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x49, 0x6e, 0x73, 0x74,
+ 0x61, 0x6e, 0x63, 0x65, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x06, 0x63, 0x6f, 0x6e, 0x66,
+ 0x69, 0x67, 0x12, 0x26, 0x0a, 0x0c, 0x64, 0x69, 0x73, 0x70, 0x6c, 0x61, 0x79, 0x5f, 0x6e, 0x61,
+ 0x6d, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x0b, 0x64,
+ 0x69, 0x73, 0x70, 0x6c, 0x61, 0x79, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x1d, 0x0a, 0x0a, 0x6e, 0x6f,
+ 0x64, 0x65, 0x5f, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x18, 0x05, 0x20, 0x01, 0x28, 0x05, 0x52, 0x09,
+ 0x6e, 0x6f, 0x64, 0x65, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x12, 0x29, 0x0a, 0x10, 0x70, 0x72, 0x6f,
+ 0x63, 0x65, 0x73, 0x73, 0x69, 0x6e, 0x67, 0x5f, 0x75, 0x6e, 0x69, 0x74, 0x73, 0x18, 0x09, 0x20,
+ 0x01, 0x28, 0x05, 0x52, 0x0f, 0x70, 0x72, 0x6f, 0x63, 0x65, 0x73, 0x73, 0x69, 0x6e, 0x67, 0x55,
+ 0x6e, 0x69, 0x74, 0x73, 0x12, 0x67, 0x0a, 0x12, 0x61, 0x75, 0x74, 0x6f, 0x73, 0x63, 0x61, 0x6c,
+ 0x69, 0x6e, 0x67, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x18, 0x11, 0x20, 0x01, 0x28, 0x0b,
+ 0x32, 0x33, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x70, 0x61, 0x6e, 0x6e, 0x65,
+ 0x72, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x69, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65,
+ 0x2e, 0x76, 0x31, 0x2e, 0x41, 0x75, 0x74, 0x6f, 0x73, 0x63, 0x61, 0x6c, 0x69, 0x6e, 0x67, 0x43,
+ 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x42, 0x03, 0xe0, 0x41, 0x01, 0x52, 0x11, 0x61, 0x75, 0x74, 0x6f,
+ 0x73, 0x63, 0x61, 0x6c, 0x69, 0x6e, 0x67, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x4b, 0x0a,
+ 0x05, 0x73, 0x74, 0x61, 0x74, 0x65, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x30, 0x2e, 0x67,
+ 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x70, 0x61, 0x6e, 0x6e, 0x65, 0x72, 0x2e, 0x61, 0x64,
+ 0x6d, 0x69, 0x6e, 0x2e, 0x69, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x2e, 0x76, 0x31, 0x2e,
+ 0x49, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x2e, 0x53, 0x74, 0x61, 0x74, 0x65, 0x42, 0x03,
+ 0xe0, 0x41, 0x03, 0x52, 0x05, 0x73, 0x74, 0x61, 0x74, 0x65, 0x12, 0x4e, 0x0a, 0x06, 0x6c, 0x61,
+ 0x62, 0x65, 0x6c, 0x73, 0x18, 0x07, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x36, 0x2e, 0x67, 0x6f, 0x6f,
+ 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x70, 0x61, 0x6e, 0x6e, 0x65, 0x72, 0x2e, 0x61, 0x64, 0x6d, 0x69,
+ 0x6e, 0x2e, 0x69, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x49, 0x6e,
+ 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x2e, 0x4c, 0x61, 0x62, 0x65, 0x6c, 0x73, 0x45, 0x6e, 0x74,
+ 0x72, 0x79, 0x52, 0x06, 0x6c, 0x61, 0x62, 0x65, 0x6c, 0x73, 0x12, 0x23, 0x0a, 0x0d, 0x65, 0x6e,
+ 0x64, 0x70, 0x6f, 0x69, 0x6e, 0x74, 0x5f, 0x75, 0x72, 0x69, 0x73, 0x18, 0x08, 0x20, 0x03, 0x28,
+ 0x09, 0x52, 0x0c, 0x65, 0x6e, 0x64, 0x70, 0x6f, 0x69, 0x6e, 0x74, 0x55, 0x72, 0x69, 0x73, 0x12,
+ 0x40, 0x0a, 0x0b, 0x63, 0x72, 0x65, 0x61, 0x74, 0x65, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x18, 0x0b,
+ 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72,
+ 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70,
+ 0x42, 0x03, 0xe0, 0x41, 0x03, 0x52, 0x0a, 0x63, 0x72, 0x65, 0x61, 0x74, 0x65, 0x54, 0x69, 0x6d,
+ 0x65, 0x12, 0x40, 0x0a, 0x0b, 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, 0x5f, 0x74, 0x69, 0x6d, 0x65,
+ 0x18, 0x0c, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e,
+ 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61,
+ 0x6d, 0x70, 0x42, 0x03, 0xe0, 0x41, 0x03, 0x52, 0x0a, 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, 0x54,
+ 0x69, 0x6d, 0x65, 0x12, 0x51, 0x0a, 0x07, 0x65, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x14,
+ 0x20, 0x01, 0x28, 0x0e, 0x32, 0x32, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x70,
+ 0x61, 0x6e, 0x6e, 0x65, 0x72, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x69, 0x6e, 0x73, 0x74,
+ 0x61, 0x6e, 0x63, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x49, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65,
+ 0x2e, 0x45, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x42, 0x03, 0xe0, 0x41, 0x01, 0x52, 0x07, 0x65,
+ 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x1a, 0x39, 0x0a, 0x0b, 0x4c, 0x61, 0x62, 0x65, 0x6c, 0x73,
+ 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01,
+ 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65,
+ 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38,
+ 0x01, 0x22, 0x37, 0x0a, 0x05, 0x53, 0x74, 0x61, 0x74, 0x65, 0x12, 0x15, 0x0a, 0x11, 0x53, 0x54,
+ 0x41, 0x54, 0x45, 0x5f, 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, 0x49, 0x45, 0x44, 0x10,
+ 0x00, 0x12, 0x0c, 0x0a, 0x08, 0x43, 0x52, 0x45, 0x41, 0x54, 0x49, 0x4e, 0x47, 0x10, 0x01, 0x12,
+ 0x09, 0x0a, 0x05, 0x52, 0x45, 0x41, 0x44, 0x59, 0x10, 0x02, 0x22, 0x55, 0x0a, 0x07, 0x45, 0x64,
+ 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x17, 0x0a, 0x13, 0x45, 0x44, 0x49, 0x54, 0x49, 0x4f, 0x4e,
+ 0x5f, 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, 0x49, 0x45, 0x44, 0x10, 0x00, 0x12, 0x0c,
+ 0x0a, 0x08, 0x53, 0x54, 0x41, 0x4e, 0x44, 0x41, 0x52, 0x44, 0x10, 0x01, 0x12, 0x0e, 0x0a, 0x0a,
+ 0x45, 0x4e, 0x54, 0x45, 0x52, 0x50, 0x52, 0x49, 0x53, 0x45, 0x10, 0x02, 0x12, 0x13, 0x0a, 0x0f,
+ 0x45, 0x4e, 0x54, 0x45, 0x52, 0x50, 0x52, 0x49, 0x53, 0x45, 0x5f, 0x50, 0x4c, 0x55, 0x53, 0x10,
+ 0x03, 0x3a, 0x4d, 0xea, 0x41, 0x4a, 0x0a, 0x1f, 0x73, 0x70, 0x61, 0x6e, 0x6e, 0x65, 0x72, 0x2e,
+ 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x49,
+ 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x12, 0x27, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74,
+ 0x73, 0x2f, 0x7b, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x7d, 0x2f, 0x69, 0x6e, 0x73, 0x74,
+ 0x61, 0x6e, 0x63, 0x65, 0x73, 0x2f, 0x7b, 0x69, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x7d,
+ 0x22, 0xa5, 0x01, 0x0a, 0x1a, 0x4c, 0x69, 0x73, 0x74, 0x49, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63,
+ 0x65, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12,
+ 0x4b, 0x0a, 0x06, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42,
+ 0x33, 0xe0, 0x41, 0x02, 0xfa, 0x41, 0x2d, 0x0a, 0x2b, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x72, 0x65,
+ 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, 0x2e, 0x67, 0x6f,
+ 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x50, 0x72, 0x6f,
+ 0x6a, 0x65, 0x63, 0x74, 0x52, 0x06, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x12, 0x1b, 0x0a, 0x09,
+ 0x70, 0x61, 0x67, 0x65, 0x5f, 0x73, 0x69, 0x7a, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x05, 0x52,
+ 0x08, 0x70, 0x61, 0x67, 0x65, 0x53, 0x69, 0x7a, 0x65, 0x12, 0x1d, 0x0a, 0x0a, 0x70, 0x61, 0x67,
+ 0x65, 0x5f, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x70,
+ 0x61, 0x67, 0x65, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x22, 0xa2, 0x01, 0x0a, 0x1b, 0x4c, 0x69, 0x73,
+ 0x74, 0x49, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x73,
+ 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x5b, 0x0a, 0x10, 0x69, 0x6e, 0x73, 0x74,
+ 0x61, 0x6e, 0x63, 0x65, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x73, 0x18, 0x01, 0x20, 0x03,
+ 0x28, 0x0b, 0x32, 0x30, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x70, 0x61, 0x6e,
+ 0x6e, 0x65, 0x72, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x69, 0x6e, 0x73, 0x74, 0x61, 0x6e,
+ 0x63, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x49, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x43, 0x6f,
+ 0x6e, 0x66, 0x69, 0x67, 0x52, 0x0f, 0x69, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x43, 0x6f,
+ 0x6e, 0x66, 0x69, 0x67, 0x73, 0x12, 0x26, 0x0a, 0x0f, 0x6e, 0x65, 0x78, 0x74, 0x5f, 0x70, 0x61,
+ 0x67, 0x65, 0x5f, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0d,
+ 0x6e, 0x65, 0x78, 0x74, 0x50, 0x61, 0x67, 0x65, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x22, 0x5d, 0x0a,
+ 0x18, 0x47, 0x65, 0x74, 0x49, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x43, 0x6f, 0x6e, 0x66,
+ 0x69, 0x67, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x41, 0x0a, 0x04, 0x6e, 0x61, 0x6d,
+ 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x2d, 0xe0, 0x41, 0x02, 0xfa, 0x41, 0x27, 0x0a,
+ 0x25, 0x73, 0x70, 0x61, 0x6e, 0x6e, 0x65, 0x72, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61,
+ 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x49, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65,
+ 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x22, 0xa2, 0x02, 0x0a,
+ 0x1b, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x49, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x43,
+ 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x4b, 0x0a, 0x06,
+ 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x33, 0xe0, 0x41,
+ 0x02, 0xfa, 0x41, 0x2d, 0x0a, 0x2b, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x72, 0x65, 0x73, 0x6f, 0x75,
+ 0x72, 0x63, 0x65, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c,
+ 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x50, 0x72, 0x6f, 0x6a, 0x65, 0x63,
+ 0x74, 0x52, 0x06, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x12, 0x31, 0x0a, 0x12, 0x69, 0x6e, 0x73,
+ 0x74, 0x61, 0x6e, 0x63, 0x65, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x5f, 0x69, 0x64, 0x18,
+ 0x02, 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x10, 0x69, 0x6e, 0x73, 0x74,
+ 0x61, 0x6e, 0x63, 0x65, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x49, 0x64, 0x12, 0x5e, 0x0a, 0x0f,
+ 0x69, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x18,
+ 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x30, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73,
+ 0x70, 0x61, 0x6e, 0x6e, 0x65, 0x72, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x69, 0x6e, 0x73,
+ 0x74, 0x61, 0x6e, 0x63, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x49, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63,
+ 0x65, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x0e, 0x69, 0x6e,
+ 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x23, 0x0a, 0x0d,
+ 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x5f, 0x6f, 0x6e, 0x6c, 0x79, 0x18, 0x04, 0x20,
+ 0x01, 0x28, 0x08, 0x52, 0x0c, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x4f, 0x6e, 0x6c,
+ 0x79, 0x22, 0xe4, 0x01, 0x0a, 0x1b, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x49, 0x6e, 0x73, 0x74,
+ 0x61, 0x6e, 0x63, 0x65, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73,
+ 0x74, 0x12, 0x5e, 0x0a, 0x0f, 0x69, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x5f, 0x63, 0x6f,
+ 0x6e, 0x66, 0x69, 0x67, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x30, 0x2e, 0x67, 0x6f, 0x6f,
+ 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x70, 0x61, 0x6e, 0x6e, 0x65, 0x72, 0x2e, 0x61, 0x64, 0x6d, 0x69,
+ 0x6e, 0x2e, 0x69, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x49, 0x6e,
+ 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x42, 0x03, 0xe0, 0x41,
+ 0x02, 0x52, 0x0e, 0x69, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x43, 0x6f, 0x6e, 0x66, 0x69,
+ 0x67, 0x12, 0x40, 0x0a, 0x0b, 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, 0x5f, 0x6d, 0x61, 0x73, 0x6b,
+ 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e,
+ 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x4d, 0x61,
+ 0x73, 0x6b, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x0a, 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, 0x4d,
+ 0x61, 0x73, 0x6b, 0x12, 0x23, 0x0a, 0x0d, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x5f,
+ 0x6f, 0x6e, 0x6c, 0x79, 0x18, 0x03, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0c, 0x76, 0x61, 0x6c, 0x69,
+ 0x64, 0x61, 0x74, 0x65, 0x4f, 0x6e, 0x6c, 0x79, 0x22, 0x99, 0x01, 0x0a, 0x1b, 0x44, 0x65, 0x6c,
+ 0x65, 0x74, 0x65, 0x49, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x43, 0x6f, 0x6e, 0x66, 0x69,
+ 0x67, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x41, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65,
+ 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x2d, 0xe0, 0x41, 0x02, 0xfa, 0x41, 0x27, 0x0a, 0x25,
+ 0x73, 0x70, 0x61, 0x6e, 0x6e, 0x65, 0x72, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70,
+ 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x49, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x43,
+ 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x12, 0x0a, 0x04, 0x65,
+ 0x74, 0x61, 0x67, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x65, 0x74, 0x61, 0x67, 0x12,
+ 0x23, 0x0a, 0x0d, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x5f, 0x6f, 0x6e, 0x6c, 0x79,
+ 0x18, 0x03, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0c, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65,
+ 0x4f, 0x6e, 0x6c, 0x79, 0x22, 0xc6, 0x01, 0x0a, 0x23, 0x4c, 0x69, 0x73, 0x74, 0x49, 0x6e, 0x73,
+ 0x74, 0x61, 0x6e, 0x63, 0x65, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x4f, 0x70, 0x65, 0x72, 0x61,
+ 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x4b, 0x0a, 0x06,
+ 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x33, 0xe0, 0x41,
+ 0x02, 0xfa, 0x41, 0x2d, 0x0a, 0x2b, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x72, 0x65, 0x73, 0x6f, 0x75,
+ 0x72, 0x63, 0x65, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c,
+ 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x50, 0x72, 0x6f, 0x6a, 0x65, 0x63,
+ 0x74, 0x52, 0x06, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x12, 0x16, 0x0a, 0x06, 0x66, 0x69, 0x6c,
+ 0x74, 0x65, 0x72, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x66, 0x69, 0x6c, 0x74, 0x65,
+ 0x72, 0x12, 0x1b, 0x0a, 0x09, 0x70, 0x61, 0x67, 0x65, 0x5f, 0x73, 0x69, 0x7a, 0x65, 0x18, 0x03,
+ 0x20, 0x01, 0x28, 0x05, 0x52, 0x08, 0x70, 0x61, 0x67, 0x65, 0x53, 0x69, 0x7a, 0x65, 0x12, 0x1d,
+ 0x0a, 0x0a, 0x70, 0x61, 0x67, 0x65, 0x5f, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x18, 0x04, 0x20, 0x01,
+ 0x28, 0x09, 0x52, 0x09, 0x70, 0x61, 0x67, 0x65, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x22, 0x8d, 0x01,
+ 0x0a, 0x24, 0x4c, 0x69, 0x73, 0x74, 0x49, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x43, 0x6f,
+ 0x6e, 0x66, 0x69, 0x67, 0x4f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x65,
+ 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x3d, 0x0a, 0x0a, 0x6f, 0x70, 0x65, 0x72, 0x61, 0x74,
+ 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1d, 0x2e, 0x67, 0x6f, 0x6f,
+ 0x67, 0x6c, 0x65, 0x2e, 0x6c, 0x6f, 0x6e, 0x67, 0x72, 0x75, 0x6e, 0x6e, 0x69, 0x6e, 0x67, 0x2e,
+ 0x4f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x0a, 0x6f, 0x70, 0x65, 0x72, 0x61,
+ 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x26, 0x0a, 0x0f, 0x6e, 0x65, 0x78, 0x74, 0x5f, 0x70, 0x61,
+ 0x67, 0x65, 0x5f, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0d,
+ 0x6e, 0x65, 0x78, 0x74, 0x50, 0x61, 0x67, 0x65, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x22, 0x8c, 0x01,
+ 0x0a, 0x12, 0x47, 0x65, 0x74, 0x49, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x52, 0x65, 0x71,
+ 0x75, 0x65, 0x73, 0x74, 0x12, 0x3b, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01,
+ 0x28, 0x09, 0x42, 0x27, 0xe0, 0x41, 0x02, 0xfa, 0x41, 0x21, 0x0a, 0x1f, 0x73, 0x70, 0x61, 0x6e,
+ 0x6e, 0x65, 0x72, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63,
+ 0x6f, 0x6d, 0x2f, 0x49, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x52, 0x04, 0x6e, 0x61, 0x6d,
+ 0x65, 0x12, 0x39, 0x0a, 0x0a, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x5f, 0x6d, 0x61, 0x73, 0x6b, 0x18,
+ 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70,
+ 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x4d, 0x61, 0x73,
+ 0x6b, 0x52, 0x09, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x4d, 0x61, 0x73, 0x6b, 0x22, 0xd7, 0x01, 0x0a,
+ 0x15, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x49, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x52,
+ 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x4b, 0x0a, 0x06, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74,
+ 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x33, 0xe0, 0x41, 0x02, 0xfa, 0x41, 0x2d, 0x0a, 0x2b,
+ 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x6d, 0x61, 0x6e,
+ 0x61, 0x67, 0x65, 0x72, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e,
+ 0x63, 0x6f, 0x6d, 0x2f, 0x50, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x06, 0x70, 0x61, 0x72,
+ 0x65, 0x6e, 0x74, 0x12, 0x24, 0x0a, 0x0b, 0x69, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x5f,
+ 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x0a, 0x69,
+ 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x49, 0x64, 0x12, 0x4b, 0x0a, 0x08, 0x69, 0x6e, 0x73,
+ 0x74, 0x61, 0x6e, 0x63, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2a, 0x2e, 0x67, 0x6f,
+ 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x70, 0x61, 0x6e, 0x6e, 0x65, 0x72, 0x2e, 0x61, 0x64, 0x6d,
+ 0x69, 0x6e, 0x2e, 0x69, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x49,
+ 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x08, 0x69, 0x6e,
+ 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x22, 0x80, 0x02, 0x0a, 0x14, 0x4c, 0x69, 0x73, 0x74, 0x49,
+ 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12,
+ 0x4b, 0x0a, 0x06, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42,
+ 0x33, 0xe0, 0x41, 0x02, 0xfa, 0x41, 0x2d, 0x0a, 0x2b, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x72, 0x65,
+ 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, 0x2e, 0x67, 0x6f,
+ 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x50, 0x72, 0x6f,
+ 0x6a, 0x65, 0x63, 0x74, 0x52, 0x06, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x12, 0x1b, 0x0a, 0x09,
+ 0x70, 0x61, 0x67, 0x65, 0x5f, 0x73, 0x69, 0x7a, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x05, 0x52,
+ 0x08, 0x70, 0x61, 0x67, 0x65, 0x53, 0x69, 0x7a, 0x65, 0x12, 0x1d, 0x0a, 0x0a, 0x70, 0x61, 0x67,
+ 0x65, 0x5f, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x70,
+ 0x61, 0x67, 0x65, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x12, 0x16, 0x0a, 0x06, 0x66, 0x69, 0x6c, 0x74,
+ 0x65, 0x72, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72,
+ 0x12, 0x47, 0x0a, 0x11, 0x69, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x5f, 0x64, 0x65, 0x61,
+ 0x64, 0x6c, 0x69, 0x6e, 0x65, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f,
+ 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69,
+ 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x10, 0x69, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63,
+ 0x65, 0x44, 0x65, 0x61, 0x64, 0x6c, 0x69, 0x6e, 0x65, 0x22, 0xab, 0x01, 0x0a, 0x15, 0x4c, 0x69,
+ 0x73, 0x74, 0x49, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f,
+ 0x6e, 0x73, 0x65, 0x12, 0x48, 0x0a, 0x09, 0x69, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x73,
+ 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x2a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e,
+ 0x73, 0x70, 0x61, 0x6e, 0x6e, 0x65, 0x72, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x69, 0x6e,
+ 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x49, 0x6e, 0x73, 0x74, 0x61, 0x6e,
+ 0x63, 0x65, 0x52, 0x09, 0x69, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x73, 0x12, 0x26, 0x0a,
+ 0x0f, 0x6e, 0x65, 0x78, 0x74, 0x5f, 0x70, 0x61, 0x67, 0x65, 0x5f, 0x74, 0x6f, 0x6b, 0x65, 0x6e,
+ 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0d, 0x6e, 0x65, 0x78, 0x74, 0x50, 0x61, 0x67, 0x65,
+ 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x12, 0x20, 0x0a, 0x0b, 0x75, 0x6e, 0x72, 0x65, 0x61, 0x63, 0x68,
+ 0x61, 0x62, 0x6c, 0x65, 0x18, 0x03, 0x20, 0x03, 0x28, 0x09, 0x52, 0x0b, 0x75, 0x6e, 0x72, 0x65,
+ 0x61, 0x63, 0x68, 0x61, 0x62, 0x6c, 0x65, 0x22, 0xa4, 0x01, 0x0a, 0x15, 0x55, 0x70, 0x64, 0x61,
+ 0x74, 0x65, 0x49, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73,
+ 0x74, 0x12, 0x4b, 0x0a, 0x08, 0x69, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x18, 0x01, 0x20,
+ 0x01, 0x28, 0x0b, 0x32, 0x2a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x70, 0x61,
+ 0x6e, 0x6e, 0x65, 0x72, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x69, 0x6e, 0x73, 0x74, 0x61,
+ 0x6e, 0x63, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x49, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x42,
+ 0x03, 0xe0, 0x41, 0x02, 0x52, 0x08, 0x69, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x12, 0x3e,
+ 0x0a, 0x0a, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x5f, 0x6d, 0x61, 0x73, 0x6b, 0x18, 0x02, 0x20, 0x01,
+ 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74,
+ 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x4d, 0x61, 0x73, 0x6b, 0x42, 0x03,
+ 0xe0, 0x41, 0x02, 0x52, 0x09, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x4d, 0x61, 0x73, 0x6b, 0x22, 0x54,
+ 0x0a, 0x15, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x49, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65,
+ 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x3b, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18,
+ 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x27, 0xe0, 0x41, 0x02, 0xfa, 0x41, 0x21, 0x0a, 0x1f, 0x73,
+ 0x70, 0x61, 0x6e, 0x6e, 0x65, 0x72, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69,
+ 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x49, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x52, 0x04,
+ 0x6e, 0x61, 0x6d, 0x65, 0x22, 0x84, 0x03, 0x0a, 0x16, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x49,
+ 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x12,
+ 0x46, 0x0a, 0x08, 0x69, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28,
+ 0x0b, 0x32, 0x2a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x70, 0x61, 0x6e, 0x6e,
+ 0x65, 0x72, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x69, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63,
+ 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x49, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x52, 0x08, 0x69,
+ 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x12, 0x39, 0x0a, 0x0a, 0x73, 0x74, 0x61, 0x72, 0x74,
+ 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f,
+ 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69,
+ 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x09, 0x73, 0x74, 0x61, 0x72, 0x74, 0x54, 0x69,
+ 0x6d, 0x65, 0x12, 0x3b, 0x0a, 0x0b, 0x63, 0x61, 0x6e, 0x63, 0x65, 0x6c, 0x5f, 0x74, 0x69, 0x6d,
+ 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65,
+ 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74,
+ 0x61, 0x6d, 0x70, 0x52, 0x0a, 0x63, 0x61, 0x6e, 0x63, 0x65, 0x6c, 0x54, 0x69, 0x6d, 0x65, 0x12,
+ 0x35, 0x0a, 0x08, 0x65, 0x6e, 0x64, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28,
+ 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f,
+ 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x07, 0x65,
+ 0x6e, 0x64, 0x54, 0x69, 0x6d, 0x65, 0x12, 0x73, 0x0a, 0x1b, 0x65, 0x78, 0x70, 0x65, 0x63, 0x74,
+ 0x65, 0x64, 0x5f, 0x66, 0x75, 0x6c, 0x66, 0x69, 0x6c, 0x6c, 0x6d, 0x65, 0x6e, 0x74, 0x5f, 0x70,
+ 0x65, 0x72, 0x69, 0x6f, 0x64, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x33, 0x2e, 0x67, 0x6f,
+ 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x70, 0x61, 0x6e, 0x6e, 0x65, 0x72, 0x2e, 0x61, 0x64, 0x6d,
+ 0x69, 0x6e, 0x2e, 0x69, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x46,
+ 0x75, 0x6c, 0x66, 0x69, 0x6c, 0x6c, 0x6d, 0x65, 0x6e, 0x74, 0x50, 0x65, 0x72, 0x69, 0x6f, 0x64,
+ 0x52, 0x19, 0x65, 0x78, 0x70, 0x65, 0x63, 0x74, 0x65, 0x64, 0x46, 0x75, 0x6c, 0x66, 0x69, 0x6c,
+ 0x6c, 0x6d, 0x65, 0x6e, 0x74, 0x50, 0x65, 0x72, 0x69, 0x6f, 0x64, 0x22, 0x84, 0x03, 0x0a, 0x16,
+ 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x49, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x4d, 0x65,
+ 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x12, 0x46, 0x0a, 0x08, 0x69, 0x6e, 0x73, 0x74, 0x61, 0x6e,
+ 0x63, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c,
+ 0x65, 0x2e, 0x73, 0x70, 0x61, 0x6e, 0x6e, 0x65, 0x72, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e,
+ 0x69, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x49, 0x6e, 0x73, 0x74,
+ 0x61, 0x6e, 0x63, 0x65, 0x52, 0x08, 0x69, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x12, 0x39,
+ 0x0a, 0x0a, 0x73, 0x74, 0x61, 0x72, 0x74, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x18, 0x02, 0x20, 0x01,
+ 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74,
+ 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x09,
+ 0x73, 0x74, 0x61, 0x72, 0x74, 0x54, 0x69, 0x6d, 0x65, 0x12, 0x3b, 0x0a, 0x0b, 0x63, 0x61, 0x6e,
+ 0x63, 0x65, 0x6c, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a,
+ 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66,
+ 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x0a, 0x63, 0x61, 0x6e, 0x63,
+ 0x65, 0x6c, 0x54, 0x69, 0x6d, 0x65, 0x12, 0x35, 0x0a, 0x08, 0x65, 0x6e, 0x64, 0x5f, 0x74, 0x69,
+ 0x6d, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c,
+ 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73,
+ 0x74, 0x61, 0x6d, 0x70, 0x52, 0x07, 0x65, 0x6e, 0x64, 0x54, 0x69, 0x6d, 0x65, 0x12, 0x73, 0x0a,
+ 0x1b, 0x65, 0x78, 0x70, 0x65, 0x63, 0x74, 0x65, 0x64, 0x5f, 0x66, 0x75, 0x6c, 0x66, 0x69, 0x6c,
+ 0x6c, 0x6d, 0x65, 0x6e, 0x74, 0x5f, 0x70, 0x65, 0x72, 0x69, 0x6f, 0x64, 0x18, 0x05, 0x20, 0x01,
+ 0x28, 0x0e, 0x32, 0x33, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x70, 0x61, 0x6e,
+ 0x6e, 0x65, 0x72, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x69, 0x6e, 0x73, 0x74, 0x61, 0x6e,
+ 0x63, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x46, 0x75, 0x6c, 0x66, 0x69, 0x6c, 0x6c, 0x6d, 0x65, 0x6e,
+ 0x74, 0x50, 0x65, 0x72, 0x69, 0x6f, 0x64, 0x52, 0x19, 0x65, 0x78, 0x70, 0x65, 0x63, 0x74, 0x65,
+ 0x64, 0x46, 0x75, 0x6c, 0x66, 0x69, 0x6c, 0x6c, 0x6d, 0x65, 0x6e, 0x74, 0x50, 0x65, 0x72, 0x69,
+ 0x6f, 0x64, 0x22, 0x87, 0x02, 0x0a, 0x1c, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x49, 0x6e, 0x73,
+ 0x74, 0x61, 0x6e, 0x63, 0x65, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x4d, 0x65, 0x74, 0x61, 0x64,
+ 0x61, 0x74, 0x61, 0x12, 0x59, 0x0a, 0x0f, 0x69, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x5f,
+ 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x30, 0x2e, 0x67,
+ 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x70, 0x61, 0x6e, 0x6e, 0x65, 0x72, 0x2e, 0x61, 0x64,
+ 0x6d, 0x69, 0x6e, 0x2e, 0x69, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x2e, 0x76, 0x31, 0x2e,
+ 0x49, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x0e,
+ 0x69, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x4f,
+ 0x0a, 0x08, 0x70, 0x72, 0x6f, 0x67, 0x72, 0x65, 0x73, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b,
+ 0x32, 0x33, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x70, 0x61, 0x6e, 0x6e, 0x65,
+ 0x72, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x69, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65,
+ 0x2e, 0x76, 0x31, 0x2e, 0x4f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x50, 0x72, 0x6f,
+ 0x67, 0x72, 0x65, 0x73, 0x73, 0x52, 0x08, 0x70, 0x72, 0x6f, 0x67, 0x72, 0x65, 0x73, 0x73, 0x12,
+ 0x3b, 0x0a, 0x0b, 0x63, 0x61, 0x6e, 0x63, 0x65, 0x6c, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x18, 0x03,
+ 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72,
+ 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70,
+ 0x52, 0x0a, 0x63, 0x61, 0x6e, 0x63, 0x65, 0x6c, 0x54, 0x69, 0x6d, 0x65, 0x22, 0x87, 0x02, 0x0a,
+ 0x1c, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x49, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x43,
+ 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x12, 0x59, 0x0a,
+ 0x0f, 0x69, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67,
+ 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x30, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e,
+ 0x73, 0x70, 0x61, 0x6e, 0x6e, 0x65, 0x72, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x69, 0x6e,
+ 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x49, 0x6e, 0x73, 0x74, 0x61, 0x6e,
+ 0x63, 0x65, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x0e, 0x69, 0x6e, 0x73, 0x74, 0x61, 0x6e,
+ 0x63, 0x65, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x4f, 0x0a, 0x08, 0x70, 0x72, 0x6f, 0x67,
+ 0x72, 0x65, 0x73, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x33, 0x2e, 0x67, 0x6f, 0x6f,
+ 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x70, 0x61, 0x6e, 0x6e, 0x65, 0x72, 0x2e, 0x61, 0x64, 0x6d, 0x69,
+ 0x6e, 0x2e, 0x69, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x4f, 0x70,
+ 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x50, 0x72, 0x6f, 0x67, 0x72, 0x65, 0x73, 0x73, 0x52,
+ 0x08, 0x70, 0x72, 0x6f, 0x67, 0x72, 0x65, 0x73, 0x73, 0x12, 0x3b, 0x0a, 0x0b, 0x63, 0x61, 0x6e,
+ 0x63, 0x65, 0x6c, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a,
+ 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66,
+ 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x0a, 0x63, 0x61, 0x6e, 0x63,
+ 0x65, 0x6c, 0x54, 0x69, 0x6d, 0x65, 0x22, 0x94, 0x06, 0x0a, 0x11, 0x49, 0x6e, 0x73, 0x74, 0x61,
+ 0x6e, 0x63, 0x65, 0x50, 0x61, 0x72, 0x74, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x17, 0x0a, 0x04,
+ 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52,
+ 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x45, 0x0a, 0x06, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x18,
+ 0x02, 0x20, 0x01, 0x28, 0x09, 0x42, 0x2d, 0xe0, 0x41, 0x02, 0xfa, 0x41, 0x27, 0x0a, 0x25, 0x73,
+ 0x70, 0x61, 0x6e, 0x6e, 0x65, 0x72, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69,
+ 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x49, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x43, 0x6f,
+ 0x6e, 0x66, 0x69, 0x67, 0x52, 0x06, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x26, 0x0a, 0x0c,
+ 0x64, 0x69, 0x73, 0x70, 0x6c, 0x61, 0x79, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x03, 0x20, 0x01,
+ 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x0b, 0x64, 0x69, 0x73, 0x70, 0x6c, 0x61, 0x79,
+ 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x1f, 0x0a, 0x0a, 0x6e, 0x6f, 0x64, 0x65, 0x5f, 0x63, 0x6f, 0x75,
+ 0x6e, 0x74, 0x18, 0x05, 0x20, 0x01, 0x28, 0x05, 0x48, 0x00, 0x52, 0x09, 0x6e, 0x6f, 0x64, 0x65,
+ 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x12, 0x2b, 0x0a, 0x10, 0x70, 0x72, 0x6f, 0x63, 0x65, 0x73, 0x73,
+ 0x69, 0x6e, 0x67, 0x5f, 0x75, 0x6e, 0x69, 0x74, 0x73, 0x18, 0x06, 0x20, 0x01, 0x28, 0x05, 0x48,
+ 0x00, 0x52, 0x0f, 0x70, 0x72, 0x6f, 0x63, 0x65, 0x73, 0x73, 0x69, 0x6e, 0x67, 0x55, 0x6e, 0x69,
+ 0x74, 0x73, 0x12, 0x54, 0x0a, 0x05, 0x73, 0x74, 0x61, 0x74, 0x65, 0x18, 0x07, 0x20, 0x01, 0x28,
+ 0x0e, 0x32, 0x39, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x70, 0x61, 0x6e, 0x6e,
+ 0x65, 0x72, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x69, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63,
+ 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x49, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x50, 0x61, 0x72,
+ 0x74, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x53, 0x74, 0x61, 0x74, 0x65, 0x42, 0x03, 0xe0, 0x41,
+ 0x03, 0x52, 0x05, 0x73, 0x74, 0x61, 0x74, 0x65, 0x12, 0x40, 0x0a, 0x0b, 0x63, 0x72, 0x65, 0x61,
+ 0x74, 0x65, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x18, 0x08, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e,
+ 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e,
+ 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x42, 0x03, 0xe0, 0x41, 0x03, 0x52, 0x0a,
+ 0x63, 0x72, 0x65, 0x61, 0x74, 0x65, 0x54, 0x69, 0x6d, 0x65, 0x12, 0x40, 0x0a, 0x0b, 0x75, 0x70,
+ 0x64, 0x61, 0x74, 0x65, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x18, 0x09, 0x20, 0x01, 0x28, 0x0b, 0x32,
+ 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75,
+ 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x42, 0x03, 0xe0, 0x41, 0x03,
+ 0x52, 0x0a, 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, 0x54, 0x69, 0x6d, 0x65, 0x12, 0x38, 0x0a, 0x15,
+ 0x72, 0x65, 0x66, 0x65, 0x72, 0x65, 0x6e, 0x63, 0x69, 0x6e, 0x67, 0x5f, 0x64, 0x61, 0x74, 0x61,
+ 0x62, 0x61, 0x73, 0x65, 0x73, 0x18, 0x0a, 0x20, 0x03, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x03,
+ 0x52, 0x14, 0x72, 0x65, 0x66, 0x65, 0x72, 0x65, 0x6e, 0x63, 0x69, 0x6e, 0x67, 0x44, 0x61, 0x74,
+ 0x61, 0x62, 0x61, 0x73, 0x65, 0x73, 0x12, 0x34, 0x0a, 0x13, 0x72, 0x65, 0x66, 0x65, 0x72, 0x65,
+ 0x6e, 0x63, 0x69, 0x6e, 0x67, 0x5f, 0x62, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x73, 0x18, 0x0b, 0x20,
+ 0x03, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x03, 0x52, 0x12, 0x72, 0x65, 0x66, 0x65, 0x72, 0x65,
+ 0x6e, 0x63, 0x69, 0x6e, 0x67, 0x42, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x73, 0x12, 0x12, 0x0a, 0x04,
+ 0x65, 0x74, 0x61, 0x67, 0x18, 0x0c, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x65, 0x74, 0x61, 0x67,
+ 0x22, 0x37, 0x0a, 0x05, 0x53, 0x74, 0x61, 0x74, 0x65, 0x12, 0x15, 0x0a, 0x11, 0x53, 0x54, 0x41,
+ 0x54, 0x45, 0x5f, 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, 0x49, 0x45, 0x44, 0x10, 0x00,
+ 0x12, 0x0c, 0x0a, 0x08, 0x43, 0x52, 0x45, 0x41, 0x54, 0x49, 0x4e, 0x47, 0x10, 0x01, 0x12, 0x09,
+ 0x0a, 0x05, 0x52, 0x45, 0x41, 0x44, 0x59, 0x10, 0x02, 0x3a, 0x7e, 0xea, 0x41, 0x7b, 0x0a, 0x28,
+ 0x73, 0x70, 0x61, 0x6e, 0x6e, 0x65, 0x72, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70,
+ 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x49, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x50,
+ 0x61, 0x72, 0x74, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x4f, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63,
+ 0x74, 0x73, 0x2f, 0x7b, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x7d, 0x2f, 0x69, 0x6e, 0x73,
+ 0x74, 0x61, 0x6e, 0x63, 0x65, 0x73, 0x2f, 0x7b, 0x69, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65,
+ 0x7d, 0x2f, 0x69, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x50, 0x61, 0x72, 0x74, 0x69, 0x74,
+ 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x7b, 0x69, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x5f, 0x70,
+ 0x61, 0x72, 0x74, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x7d, 0x42, 0x12, 0x0a, 0x10, 0x63, 0x6f, 0x6d,
+ 0x70, 0x75, 0x74, 0x65, 0x5f, 0x63, 0x61, 0x70, 0x61, 0x63, 0x69, 0x74, 0x79, 0x22, 0xb4, 0x02,
+ 0x0a, 0x1f, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x49, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65,
+ 0x50, 0x61, 0x72, 0x74, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74,
+ 0x61, 0x12, 0x62, 0x0a, 0x12, 0x69, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x5f, 0x70, 0x61,
+ 0x72, 0x74, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x33, 0x2e,
+ 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x70, 0x61, 0x6e, 0x6e, 0x65, 0x72, 0x2e, 0x61,
+ 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x69, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x2e, 0x76, 0x31,
+ 0x2e, 0x49, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x50, 0x61, 0x72, 0x74, 0x69, 0x74, 0x69,
+ 0x6f, 0x6e, 0x52, 0x11, 0x69, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x50, 0x61, 0x72, 0x74,
+ 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x39, 0x0a, 0x0a, 0x73, 0x74, 0x61, 0x72, 0x74, 0x5f, 0x74,
+ 0x69, 0x6d, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67,
+ 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65,
+ 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x09, 0x73, 0x74, 0x61, 0x72, 0x74, 0x54, 0x69, 0x6d, 0x65,
+ 0x12, 0x3b, 0x0a, 0x0b, 0x63, 0x61, 0x6e, 0x63, 0x65, 0x6c, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x18,
+ 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70,
+ 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d,
+ 0x70, 0x52, 0x0a, 0x63, 0x61, 0x6e, 0x63, 0x65, 0x6c, 0x54, 0x69, 0x6d, 0x65, 0x12, 0x35, 0x0a,
+ 0x08, 0x65, 0x6e, 0x64, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32,
+ 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75,
+ 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x07, 0x65, 0x6e, 0x64,
+ 0x54, 0x69, 0x6d, 0x65, 0x22, 0x83, 0x02, 0x0a, 0x1e, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x49,
+ 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x50, 0x61, 0x72, 0x74, 0x69, 0x74, 0x69, 0x6f, 0x6e,
+ 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x3f, 0x0a, 0x06, 0x70, 0x61, 0x72, 0x65, 0x6e,
+ 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x27, 0xe0, 0x41, 0x02, 0xfa, 0x41, 0x21, 0x0a,
+ 0x1f, 0x73, 0x70, 0x61, 0x6e, 0x6e, 0x65, 0x72, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61,
+ 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x49, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65,
+ 0x52, 0x06, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x12, 0x37, 0x0a, 0x15, 0x69, 0x6e, 0x73, 0x74,
+ 0x61, 0x6e, 0x63, 0x65, 0x5f, 0x70, 0x61, 0x72, 0x74, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x69,
+ 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x13, 0x69, 0x6e,
+ 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x50, 0x61, 0x72, 0x74, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x49,
+ 0x64, 0x12, 0x67, 0x0a, 0x12, 0x69, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x5f, 0x70, 0x61,
+ 0x72, 0x74, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x33, 0x2e,
+ 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x70, 0x61, 0x6e, 0x6e, 0x65, 0x72, 0x2e, 0x61,
+ 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x69, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x2e, 0x76, 0x31,
+ 0x2e, 0x49, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x50, 0x61, 0x72, 0x74, 0x69, 0x74, 0x69,
+ 0x6f, 0x6e, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x11, 0x69, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63,
+ 0x65, 0x50, 0x61, 0x72, 0x74, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x22, 0x7a, 0x0a, 0x1e, 0x44, 0x65,
+ 0x6c, 0x65, 0x74, 0x65, 0x49, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x50, 0x61, 0x72, 0x74,
+ 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x44, 0x0a, 0x04,
+ 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x30, 0xe0, 0x41, 0x02, 0xfa,
+ 0x41, 0x2a, 0x0a, 0x28, 0x73, 0x70, 0x61, 0x6e, 0x6e, 0x65, 0x72, 0x2e, 0x67, 0x6f, 0x6f, 0x67,
+ 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x49, 0x6e, 0x73, 0x74, 0x61,
+ 0x6e, 0x63, 0x65, 0x50, 0x61, 0x72, 0x74, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x04, 0x6e, 0x61,
+ 0x6d, 0x65, 0x12, 0x12, 0x0a, 0x04, 0x65, 0x74, 0x61, 0x67, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09,
+ 0x52, 0x04, 0x65, 0x74, 0x61, 0x67, 0x22, 0x63, 0x0a, 0x1b, 0x47, 0x65, 0x74, 0x49, 0x6e, 0x73,
+ 0x74, 0x61, 0x6e, 0x63, 0x65, 0x50, 0x61, 0x72, 0x74, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65,
+ 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x44, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20,
+ 0x01, 0x28, 0x09, 0x42, 0x30, 0xe0, 0x41, 0x02, 0xfa, 0x41, 0x2a, 0x0a, 0x28, 0x73, 0x70, 0x61,
+ 0x6e, 0x6e, 0x65, 0x72, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e,
+ 0x63, 0x6f, 0x6d, 0x2f, 0x49, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x50, 0x61, 0x72, 0x74,
+ 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x22, 0xc9, 0x01, 0x0a, 0x1e,
+ 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x49, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x50, 0x61,
+ 0x72, 0x74, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x67,
+ 0x0a, 0x12, 0x69, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x5f, 0x70, 0x61, 0x72, 0x74, 0x69,
+ 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x33, 0x2e, 0x67, 0x6f, 0x6f,
+ 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x70, 0x61, 0x6e, 0x6e, 0x65, 0x72, 0x2e, 0x61, 0x64, 0x6d, 0x69,
+ 0x6e, 0x2e, 0x69, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x49, 0x6e,
+ 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x50, 0x61, 0x72, 0x74, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x42,
+ 0x03, 0xe0, 0x41, 0x02, 0x52, 0x11, 0x69, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x50, 0x61,
+ 0x72, 0x74, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x3e, 0x0a, 0x0a, 0x66, 0x69, 0x65, 0x6c, 0x64,
+ 0x5f, 0x6d, 0x61, 0x73, 0x6b, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f,
+ 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69,
+ 0x65, 0x6c, 0x64, 0x4d, 0x61, 0x73, 0x6b, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x09, 0x66, 0x69,
+ 0x65, 0x6c, 0x64, 0x4d, 0x61, 0x73, 0x6b, 0x22, 0xb4, 0x02, 0x0a, 0x1f, 0x55, 0x70, 0x64, 0x61,
+ 0x74, 0x65, 0x49, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x50, 0x61, 0x72, 0x74, 0x69, 0x74,
+ 0x69, 0x6f, 0x6e, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x12, 0x62, 0x0a, 0x12, 0x69,
+ 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x5f, 0x70, 0x61, 0x72, 0x74, 0x69, 0x74, 0x69, 0x6f,
+ 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x33, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65,
+ 0x2e, 0x73, 0x70, 0x61, 0x6e, 0x6e, 0x65, 0x72, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x69,
+ 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x49, 0x6e, 0x73, 0x74, 0x61,
+ 0x6e, 0x63, 0x65, 0x50, 0x61, 0x72, 0x74, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x11, 0x69, 0x6e,
+ 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x50, 0x61, 0x72, 0x74, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x12,
+ 0x39, 0x0a, 0x0a, 0x73, 0x74, 0x61, 0x72, 0x74, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x18, 0x02, 0x20,
+ 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f,
+ 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52,
+ 0x09, 0x73, 0x74, 0x61, 0x72, 0x74, 0x54, 0x69, 0x6d, 0x65, 0x12, 0x3b, 0x0a, 0x0b, 0x63, 0x61,
+ 0x6e, 0x63, 0x65, 0x6c, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32,
+ 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75,
+ 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x0a, 0x63, 0x61, 0x6e,
+ 0x63, 0x65, 0x6c, 0x54, 0x69, 0x6d, 0x65, 0x12, 0x35, 0x0a, 0x08, 0x65, 0x6e, 0x64, 0x5f, 0x74,
+ 0x69, 0x6d, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67,
+ 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65,
+ 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x07, 0x65, 0x6e, 0x64, 0x54, 0x69, 0x6d, 0x65, 0x22, 0xfd,
+ 0x01, 0x0a, 0x1d, 0x4c, 0x69, 0x73, 0x74, 0x49, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x50,
+ 0x61, 0x72, 0x74, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74,
+ 0x12, 0x3f, 0x0a, 0x06, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09,
+ 0x42, 0x27, 0xe0, 0x41, 0x02, 0xfa, 0x41, 0x21, 0x0a, 0x1f, 0x73, 0x70, 0x61, 0x6e, 0x6e, 0x65,
+ 0x72, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d,
+ 0x2f, 0x49, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x52, 0x06, 0x70, 0x61, 0x72, 0x65, 0x6e,
+ 0x74, 0x12, 0x1b, 0x0a, 0x09, 0x70, 0x61, 0x67, 0x65, 0x5f, 0x73, 0x69, 0x7a, 0x65, 0x18, 0x02,
+ 0x20, 0x01, 0x28, 0x05, 0x52, 0x08, 0x70, 0x61, 0x67, 0x65, 0x53, 0x69, 0x7a, 0x65, 0x12, 0x1d,
+ 0x0a, 0x0a, 0x70, 0x61, 0x67, 0x65, 0x5f, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x18, 0x03, 0x20, 0x01,
+ 0x28, 0x09, 0x52, 0x09, 0x70, 0x61, 0x67, 0x65, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x12, 0x5f, 0x0a,
+ 0x1b, 0x69, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x5f, 0x70, 0x61, 0x72, 0x74, 0x69, 0x74,
+ 0x69, 0x6f, 0x6e, 0x5f, 0x64, 0x65, 0x61, 0x64, 0x6c, 0x69, 0x6e, 0x65, 0x18, 0x04, 0x20, 0x01,
+ 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74,
+ 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x42, 0x03,
+ 0xe0, 0x41, 0x01, 0x52, 0x19, 0x69, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x50, 0x61, 0x72,
+ 0x74, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x44, 0x65, 0x61, 0x64, 0x6c, 0x69, 0x6e, 0x65, 0x22, 0xd0,
+ 0x01, 0x0a, 0x1e, 0x4c, 0x69, 0x73, 0x74, 0x49, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x50,
+ 0x61, 0x72, 0x74, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73,
+ 0x65, 0x12, 0x64, 0x0a, 0x13, 0x69, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x5f, 0x70, 0x61,
+ 0x72, 0x74, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x33,
+ 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x70, 0x61, 0x6e, 0x6e, 0x65, 0x72, 0x2e,
+ 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x69, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x2e, 0x76,
+ 0x31, 0x2e, 0x49, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x50, 0x61, 0x72, 0x74, 0x69, 0x74,
+ 0x69, 0x6f, 0x6e, 0x52, 0x12, 0x69, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x50, 0x61, 0x72,
+ 0x74, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x26, 0x0a, 0x0f, 0x6e, 0x65, 0x78, 0x74, 0x5f,
+ 0x70, 0x61, 0x67, 0x65, 0x5f, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09,
+ 0x52, 0x0d, 0x6e, 0x65, 0x78, 0x74, 0x50, 0x61, 0x67, 0x65, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x12,
+ 0x20, 0x0a, 0x0b, 0x75, 0x6e, 0x72, 0x65, 0x61, 0x63, 0x68, 0x61, 0x62, 0x6c, 0x65, 0x18, 0x03,
+ 0x20, 0x03, 0x28, 0x09, 0x52, 0x0b, 0x75, 0x6e, 0x72, 0x65, 0x61, 0x63, 0x68, 0x61, 0x62, 0x6c,
+ 0x65, 0x22, 0xad, 0x02, 0x0a, 0x26, 0x4c, 0x69, 0x73, 0x74, 0x49, 0x6e, 0x73, 0x74, 0x61, 0x6e,
+ 0x63, 0x65, 0x50, 0x61, 0x72, 0x74, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x4f, 0x70, 0x65, 0x72, 0x61,
+ 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x3f, 0x0a, 0x06,
+ 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x27, 0xe0, 0x41,
+ 0x02, 0xfa, 0x41, 0x21, 0x0a, 0x1f, 0x73, 0x70, 0x61, 0x6e, 0x6e, 0x65, 0x72, 0x2e, 0x67, 0x6f,
+ 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x49, 0x6e, 0x73,
+ 0x74, 0x61, 0x6e, 0x63, 0x65, 0x52, 0x06, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x12, 0x1b, 0x0a,
+ 0x06, 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0,
+ 0x41, 0x01, 0x52, 0x06, 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x12, 0x20, 0x0a, 0x09, 0x70, 0x61,
+ 0x67, 0x65, 0x5f, 0x73, 0x69, 0x7a, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x05, 0x42, 0x03, 0xe0,
+ 0x41, 0x01, 0x52, 0x08, 0x70, 0x61, 0x67, 0x65, 0x53, 0x69, 0x7a, 0x65, 0x12, 0x22, 0x0a, 0x0a,
+ 0x70, 0x61, 0x67, 0x65, 0x5f, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09,
+ 0x42, 0x03, 0xe0, 0x41, 0x01, 0x52, 0x09, 0x70, 0x61, 0x67, 0x65, 0x54, 0x6f, 0x6b, 0x65, 0x6e,
+ 0x12, 0x5f, 0x0a, 0x1b, 0x69, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x5f, 0x70, 0x61, 0x72,
+ 0x74, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x64, 0x65, 0x61, 0x64, 0x6c, 0x69, 0x6e, 0x65, 0x18,
+ 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70,
+ 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d,
+ 0x70, 0x42, 0x03, 0xe0, 0x41, 0x01, 0x52, 0x19, 0x69, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65,
+ 0x50, 0x61, 0x72, 0x74, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x44, 0x65, 0x61, 0x64, 0x6c, 0x69, 0x6e,
+ 0x65, 0x22, 0xd8, 0x01, 0x0a, 0x27, 0x4c, 0x69, 0x73, 0x74, 0x49, 0x6e, 0x73, 0x74, 0x61, 0x6e,
+ 0x63, 0x65, 0x50, 0x61, 0x72, 0x74, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x4f, 0x70, 0x65, 0x72, 0x61,
+ 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x3d, 0x0a,
+ 0x0a, 0x6f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28,
+ 0x0b, 0x32, 0x1d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6c, 0x6f, 0x6e, 0x67, 0x72,
+ 0x75, 0x6e, 0x6e, 0x69, 0x6e, 0x67, 0x2e, 0x4f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e,
+ 0x52, 0x0a, 0x6f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x26, 0x0a, 0x0f,
+ 0x6e, 0x65, 0x78, 0x74, 0x5f, 0x70, 0x61, 0x67, 0x65, 0x5f, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x18,
+ 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0d, 0x6e, 0x65, 0x78, 0x74, 0x50, 0x61, 0x67, 0x65, 0x54,
+ 0x6f, 0x6b, 0x65, 0x6e, 0x12, 0x46, 0x0a, 0x1f, 0x75, 0x6e, 0x72, 0x65, 0x61, 0x63, 0x68, 0x61,
+ 0x62, 0x6c, 0x65, 0x5f, 0x69, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x5f, 0x70, 0x61, 0x72,
+ 0x74, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x09, 0x52, 0x1d, 0x75,
+ 0x6e, 0x72, 0x65, 0x61, 0x63, 0x68, 0x61, 0x62, 0x6c, 0x65, 0x49, 0x6e, 0x73, 0x74, 0x61, 0x6e,
+ 0x63, 0x65, 0x50, 0x61, 0x72, 0x74, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x22, 0xa6, 0x01, 0x0a,
+ 0x13, 0x4d, 0x6f, 0x76, 0x65, 0x49, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x52, 0x65, 0x71,
+ 0x75, 0x65, 0x73, 0x74, 0x12, 0x3b, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01,
+ 0x28, 0x09, 0x42, 0x27, 0xe0, 0x41, 0x02, 0xfa, 0x41, 0x21, 0x0a, 0x1f, 0x73, 0x70, 0x61, 0x6e,
+ 0x6e, 0x65, 0x72, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63,
+ 0x6f, 0x6d, 0x2f, 0x49, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x52, 0x04, 0x6e, 0x61, 0x6d,
+ 0x65, 0x12, 0x52, 0x0a, 0x0d, 0x74, 0x61, 0x72, 0x67, 0x65, 0x74, 0x5f, 0x63, 0x6f, 0x6e, 0x66,
+ 0x69, 0x67, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x42, 0x2d, 0xe0, 0x41, 0x02, 0xfa, 0x41, 0x27,
+ 0x0a, 0x25, 0x73, 0x70, 0x61, 0x6e, 0x6e, 0x65, 0x72, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65,
+ 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x49, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63,
+ 0x65, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x0c, 0x74, 0x61, 0x72, 0x67, 0x65, 0x74, 0x43,
+ 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x22, 0x16, 0x0a, 0x14, 0x4d, 0x6f, 0x76, 0x65, 0x49, 0x6e, 0x73,
+ 0x74, 0x61, 0x6e, 0x63, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0xc9, 0x01,
+ 0x0a, 0x14, 0x4d, 0x6f, 0x76, 0x65, 0x49, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x4d, 0x65,
+ 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x12, 0x23, 0x0a, 0x0d, 0x74, 0x61, 0x72, 0x67, 0x65, 0x74,
+ 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, 0x74,
+ 0x61, 0x72, 0x67, 0x65, 0x74, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x4f, 0x0a, 0x08, 0x70,
+ 0x72, 0x6f, 0x67, 0x72, 0x65, 0x73, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x33, 0x2e,
+ 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x70, 0x61, 0x6e, 0x6e, 0x65, 0x72, 0x2e, 0x61,
+ 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x69, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x2e, 0x76, 0x31,
+ 0x2e, 0x4f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x50, 0x72, 0x6f, 0x67, 0x72, 0x65,
+ 0x73, 0x73, 0x52, 0x08, 0x70, 0x72, 0x6f, 0x67, 0x72, 0x65, 0x73, 0x73, 0x12, 0x3b, 0x0a, 0x0b,
+ 0x63, 0x61, 0x6e, 0x63, 0x65, 0x6c, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28,
+ 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f,
+ 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x0a, 0x63,
+ 0x61, 0x6e, 0x63, 0x65, 0x6c, 0x54, 0x69, 0x6d, 0x65, 0x32, 0xda, 0x27, 0x0a, 0x0d, 0x49, 0x6e,
+ 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x41, 0x64, 0x6d, 0x69, 0x6e, 0x12, 0xcc, 0x01, 0x0a, 0x13,
+ 0x4c, 0x69, 0x73, 0x74, 0x49, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x43, 0x6f, 0x6e, 0x66,
+ 0x69, 0x67, 0x73, 0x12, 0x3c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x70, 0x61,
+ 0x6e, 0x6e, 0x65, 0x72, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x69, 0x6e, 0x73, 0x74, 0x61,
+ 0x6e, 0x63, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x49, 0x6e, 0x73, 0x74, 0x61,
+ 0x6e, 0x63, 0x65, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73,
+ 0x74, 0x1a, 0x3d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x70, 0x61, 0x6e, 0x6e,
+ 0x65, 0x72, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x69, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63,
+ 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x49, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63,
+ 0x65, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65,
+ 0x22, 0x38, 0xda, 0x41, 0x06, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x82, 0xd3, 0xe4, 0x93, 0x02,
+ 0x29, 0x12, 0x27, 0x2f, 0x76, 0x31, 0x2f, 0x7b, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x3d, 0x70,
+ 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x2a, 0x7d, 0x2f, 0x69, 0x6e, 0x73, 0x74, 0x61,
+ 0x6e, 0x63, 0x65, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x73, 0x12, 0xb9, 0x01, 0x0a, 0x11, 0x47,
+ 0x65, 0x74, 0x49, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67,
+ 0x12, 0x3a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x70, 0x61, 0x6e, 0x6e, 0x65,
+ 0x72, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x69, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65,
+ 0x2e, 0x76, 0x31, 0x2e, 0x47, 0x65, 0x74, 0x49, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x43,
+ 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x30, 0x2e, 0x67,
+ 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x70, 0x61, 0x6e, 0x6e, 0x65, 0x72, 0x2e, 0x61, 0x64,
+ 0x6d, 0x69, 0x6e, 0x2e, 0x69, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x2e, 0x76, 0x31, 0x2e,
+ 0x49, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x22, 0x36,
+ 0xda, 0x41, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x29, 0x12, 0x27, 0x2f,
+ 0x76, 0x31, 0x2f, 0x7b, 0x6e, 0x61, 0x6d, 0x65, 0x3d, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74,
+ 0x73, 0x2f, 0x2a, 0x2f, 0x69, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x43, 0x6f, 0x6e, 0x66,
+ 0x69, 0x67, 0x73, 0x2f, 0x2a, 0x7d, 0x12, 0xc8, 0x02, 0x0a, 0x14, 0x43, 0x72, 0x65, 0x61, 0x74,
+ 0x65, 0x49, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12,
+ 0x3d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x70, 0x61, 0x6e, 0x6e, 0x65, 0x72,
+ 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x69, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x2e,
+ 0x76, 0x31, 0x2e, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x49, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63,
+ 0x65, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1d,
+ 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6c, 0x6f, 0x6e, 0x67, 0x72, 0x75, 0x6e, 0x6e,
+ 0x69, 0x6e, 0x67, 0x2e, 0x4f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x22, 0xd1, 0x01,
+ 0xca, 0x41, 0x70, 0x0a, 0x2f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x70, 0x61, 0x6e,
+ 0x6e, 0x65, 0x72, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x69, 0x6e, 0x73, 0x74, 0x61, 0x6e,
+ 0x63, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x49, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x43, 0x6f,
+ 0x6e, 0x66, 0x69, 0x67, 0x12, 0x3d, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x70, 0x61,
+ 0x6e, 0x6e, 0x65, 0x72, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x69, 0x6e, 0x73, 0x74, 0x61,
+ 0x6e, 0x63, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x49, 0x6e, 0x73,
+ 0x74, 0x61, 0x6e, 0x63, 0x65, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x4d, 0x65, 0x74, 0x61, 0x64,
+ 0x61, 0x74, 0x61, 0xda, 0x41, 0x29, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x2c, 0x69, 0x6e, 0x73,
+ 0x74, 0x61, 0x6e, 0x63, 0x65, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2c, 0x69, 0x6e, 0x73,
+ 0x74, 0x61, 0x6e, 0x63, 0x65, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x5f, 0x69, 0x64, 0x82,
+ 0xd3, 0xe4, 0x93, 0x02, 0x2c, 0x3a, 0x01, 0x2a, 0x22, 0x27, 0x2f, 0x76, 0x31, 0x2f, 0x7b, 0x70,
+ 0x61, 0x72, 0x65, 0x6e, 0x74, 0x3d, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x2a,
+ 0x7d, 0x2f, 0x69, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67,
+ 0x73, 0x12, 0xca, 0x02, 0x0a, 0x14, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x49, 0x6e, 0x73, 0x74,
+ 0x61, 0x6e, 0x63, 0x65, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x3d, 0x2e, 0x67, 0x6f, 0x6f,
+ 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x70, 0x61, 0x6e, 0x6e, 0x65, 0x72, 0x2e, 0x61, 0x64, 0x6d, 0x69,
+ 0x6e, 0x2e, 0x69, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x55, 0x70,
+ 0x64, 0x61, 0x74, 0x65, 0x49, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x43, 0x6f, 0x6e, 0x66,
+ 0x69, 0x67, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1d, 0x2e, 0x67, 0x6f, 0x6f, 0x67,
+ 0x6c, 0x65, 0x2e, 0x6c, 0x6f, 0x6e, 0x67, 0x72, 0x75, 0x6e, 0x6e, 0x69, 0x6e, 0x67, 0x2e, 0x4f,
+ 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x22, 0xd3, 0x01, 0xca, 0x41, 0x70, 0x0a, 0x2f,
+ 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x70, 0x61, 0x6e, 0x6e, 0x65, 0x72, 0x2e, 0x61,
+ 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x69, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x2e, 0x76, 0x31,
+ 0x2e, 0x49, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12,
+ 0x3d, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x70, 0x61, 0x6e, 0x6e, 0x65, 0x72, 0x2e,
+ 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x69, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x2e, 0x76,
+ 0x31, 0x2e, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x49, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65,
+ 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0xda, 0x41,
+ 0x1b, 0x69, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67,
+ 0x2c, 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, 0x5f, 0x6d, 0x61, 0x73, 0x6b, 0x82, 0xd3, 0xe4, 0x93,
+ 0x02, 0x3c, 0x3a, 0x01, 0x2a, 0x32, 0x37, 0x2f, 0x76, 0x31, 0x2f, 0x7b, 0x69, 0x6e, 0x73, 0x74,
+ 0x61, 0x6e, 0x63, 0x65, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x6e, 0x61, 0x6d, 0x65,
+ 0x3d, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x2a, 0x2f, 0x69, 0x6e, 0x73, 0x74,
+ 0x61, 0x6e, 0x63, 0x65, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x73, 0x2f, 0x2a, 0x7d, 0x12, 0xa5,
+ 0x01, 0x0a, 0x14, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x49, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63,
+ 0x65, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x3d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65,
+ 0x2e, 0x73, 0x70, 0x61, 0x6e, 0x6e, 0x65, 0x72, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x69,
+ 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x44, 0x65, 0x6c, 0x65, 0x74,
+ 0x65, 0x49, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52,
+ 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x16, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e,
+ 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x22, 0x36,
+ 0xda, 0x41, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x29, 0x2a, 0x27, 0x2f,
+ 0x76, 0x31, 0x2f, 0x7b, 0x6e, 0x61, 0x6d, 0x65, 0x3d, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74,
+ 0x73, 0x2f, 0x2a, 0x2f, 0x69, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x43, 0x6f, 0x6e, 0x66,
+ 0x69, 0x67, 0x73, 0x2f, 0x2a, 0x7d, 0x12, 0xf0, 0x01, 0x0a, 0x1c, 0x4c, 0x69, 0x73, 0x74, 0x49,
+ 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x4f, 0x70, 0x65,
+ 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x45, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65,
+ 0x2e, 0x73, 0x70, 0x61, 0x6e, 0x6e, 0x65, 0x72, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x69,
+ 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x49,
+ 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x4f, 0x70, 0x65,
+ 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x46,
+ 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x70, 0x61, 0x6e, 0x6e, 0x65, 0x72, 0x2e,
+ 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x69, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x2e, 0x76,
+ 0x31, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x49, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x43, 0x6f,
+ 0x6e, 0x66, 0x69, 0x67, 0x4f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x65,
+ 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x41, 0xda, 0x41, 0x06, 0x70, 0x61, 0x72, 0x65, 0x6e,
+ 0x74, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x32, 0x12, 0x30, 0x2f, 0x76, 0x31, 0x2f, 0x7b, 0x70, 0x61,
+ 0x72, 0x65, 0x6e, 0x74, 0x3d, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x2a, 0x7d,
+ 0x2f, 0x69, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x4f,
+ 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0xb4, 0x01, 0x0a, 0x0d, 0x4c, 0x69,
+ 0x73, 0x74, 0x49, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x73, 0x12, 0x36, 0x2e, 0x67, 0x6f,
+ 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x70, 0x61, 0x6e, 0x6e, 0x65, 0x72, 0x2e, 0x61, 0x64, 0x6d,
+ 0x69, 0x6e, 0x2e, 0x69, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x4c,
+ 0x69, 0x73, 0x74, 0x49, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x73, 0x52, 0x65, 0x71, 0x75,
+ 0x65, 0x73, 0x74, 0x1a, 0x37, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x70, 0x61,
+ 0x6e, 0x6e, 0x65, 0x72, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x69, 0x6e, 0x73, 0x74, 0x61,
+ 0x6e, 0x63, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x49, 0x6e, 0x73, 0x74, 0x61,
+ 0x6e, 0x63, 0x65, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x32, 0xda, 0x41,
+ 0x06, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x23, 0x12, 0x21, 0x2f,
+ 0x76, 0x31, 0x2f, 0x7b, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x3d, 0x70, 0x72, 0x6f, 0x6a, 0x65,
+ 0x63, 0x74, 0x73, 0x2f, 0x2a, 0x7d, 0x2f, 0x69, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x73,
+ 0x12, 0xe4, 0x01, 0x0a, 0x16, 0x4c, 0x69, 0x73, 0x74, 0x49, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63,
+ 0x65, 0x50, 0x61, 0x72, 0x74, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x3f, 0x2e, 0x67, 0x6f,
+ 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x70, 0x61, 0x6e, 0x6e, 0x65, 0x72, 0x2e, 0x61, 0x64, 0x6d,
+ 0x69, 0x6e, 0x2e, 0x69, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x4c,
+ 0x69, 0x73, 0x74, 0x49, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x50, 0x61, 0x72, 0x74, 0x69,
+ 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x40, 0x2e, 0x67,
+ 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x70, 0x61, 0x6e, 0x6e, 0x65, 0x72, 0x2e, 0x61, 0x64,
+ 0x6d, 0x69, 0x6e, 0x2e, 0x69, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x2e, 0x76, 0x31, 0x2e,
+ 0x4c, 0x69, 0x73, 0x74, 0x49, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x50, 0x61, 0x72, 0x74,
+ 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x47,
+ 0xda, 0x41, 0x06, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x38, 0x12,
+ 0x36, 0x2f, 0x76, 0x31, 0x2f, 0x7b, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x3d, 0x70, 0x72, 0x6f,
+ 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x2a, 0x2f, 0x69, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65,
+ 0x73, 0x2f, 0x2a, 0x7d, 0x2f, 0x69, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x50, 0x61, 0x72,
+ 0x74, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0xa1, 0x01, 0x0a, 0x0b, 0x47, 0x65, 0x74, 0x49,
+ 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x12, 0x34, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65,
+ 0x2e, 0x73, 0x70, 0x61, 0x6e, 0x6e, 0x65, 0x72, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x69,
+ 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x47, 0x65, 0x74, 0x49, 0x6e,
+ 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x2a, 0x2e,
+ 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x70, 0x61, 0x6e, 0x6e, 0x65, 0x72, 0x2e, 0x61,
+ 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x69, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x2e, 0x76, 0x31,
+ 0x2e, 0x49, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x22, 0x30, 0xda, 0x41, 0x04, 0x6e, 0x61,
+ 0x6d, 0x65, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x23, 0x12, 0x21, 0x2f, 0x76, 0x31, 0x2f, 0x7b, 0x6e,
+ 0x61, 0x6d, 0x65, 0x3d, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x2a, 0x2f, 0x69,
+ 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x73, 0x2f, 0x2a, 0x7d, 0x12, 0x9c, 0x02, 0x0a, 0x0e,
+ 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x49, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x12, 0x37,
+ 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x70, 0x61, 0x6e, 0x6e, 0x65, 0x72, 0x2e,
+ 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x69, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x2e, 0x76,
+ 0x31, 0x2e, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x49, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65,
+ 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65,
+ 0x2e, 0x6c, 0x6f, 0x6e, 0x67, 0x72, 0x75, 0x6e, 0x6e, 0x69, 0x6e, 0x67, 0x2e, 0x4f, 0x70, 0x65,
+ 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x22, 0xb1, 0x01, 0xca, 0x41, 0x64, 0x0a, 0x29, 0x67, 0x6f,
+ 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x70, 0x61, 0x6e, 0x6e, 0x65, 0x72, 0x2e, 0x61, 0x64, 0x6d,
+ 0x69, 0x6e, 0x2e, 0x69, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x49,
+ 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x12, 0x37, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e,
+ 0x73, 0x70, 0x61, 0x6e, 0x6e, 0x65, 0x72, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x69, 0x6e,
+ 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65,
+ 0x49, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61,
+ 0xda, 0x41, 0x1b, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x2c, 0x69, 0x6e, 0x73, 0x74, 0x61, 0x6e,
+ 0x63, 0x65, 0x5f, 0x69, 0x64, 0x2c, 0x69, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x82, 0xd3,
+ 0xe4, 0x93, 0x02, 0x26, 0x3a, 0x01, 0x2a, 0x22, 0x21, 0x2f, 0x76, 0x31, 0x2f, 0x7b, 0x70, 0x61,
+ 0x72, 0x65, 0x6e, 0x74, 0x3d, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x2a, 0x7d,
+ 0x2f, 0x69, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x73, 0x12, 0x9d, 0x02, 0x0a, 0x0e, 0x55,
+ 0x70, 0x64, 0x61, 0x74, 0x65, 0x49, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x12, 0x37, 0x2e,
+ 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x70, 0x61, 0x6e, 0x6e, 0x65, 0x72, 0x2e, 0x61,
+ 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x69, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x2e, 0x76, 0x31,
+ 0x2e, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x49, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x52,
+ 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e,
+ 0x6c, 0x6f, 0x6e, 0x67, 0x72, 0x75, 0x6e, 0x6e, 0x69, 0x6e, 0x67, 0x2e, 0x4f, 0x70, 0x65, 0x72,
+ 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x22, 0xb2, 0x01, 0xca, 0x41, 0x64, 0x0a, 0x29, 0x67, 0x6f, 0x6f,
+ 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x70, 0x61, 0x6e, 0x6e, 0x65, 0x72, 0x2e, 0x61, 0x64, 0x6d, 0x69,
+ 0x6e, 0x2e, 0x69, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x49, 0x6e,
+ 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x12, 0x37, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73,
+ 0x70, 0x61, 0x6e, 0x6e, 0x65, 0x72, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x69, 0x6e, 0x73,
+ 0x74, 0x61, 0x6e, 0x63, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x49,
+ 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0xda,
+ 0x41, 0x13, 0x69, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x2c, 0x66, 0x69, 0x65, 0x6c, 0x64,
+ 0x5f, 0x6d, 0x61, 0x73, 0x6b, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x2f, 0x3a, 0x01, 0x2a, 0x32, 0x2a,
+ 0x2f, 0x76, 0x31, 0x2f, 0x7b, 0x69, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x2e, 0x6e, 0x61,
+ 0x6d, 0x65, 0x3d, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x2a, 0x2f, 0x69, 0x6e,
+ 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x73, 0x2f, 0x2a, 0x7d, 0x12, 0x93, 0x01, 0x0a, 0x0e, 0x44,
+ 0x65, 0x6c, 0x65, 0x74, 0x65, 0x49, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x12, 0x37, 0x2e,
+ 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x70, 0x61, 0x6e, 0x6e, 0x65, 0x72, 0x2e, 0x61,
+ 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x69, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x2e, 0x76, 0x31,
+ 0x2e, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x49, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x52,
+ 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x16, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e,
+ 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x22, 0x30,
+ 0xda, 0x41, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x23, 0x2a, 0x21, 0x2f,
+ 0x76, 0x31, 0x2f, 0x7b, 0x6e, 0x61, 0x6d, 0x65, 0x3d, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74,
+ 0x73, 0x2f, 0x2a, 0x2f, 0x69, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x73, 0x2f, 0x2a, 0x7d,
+ 0x12, 0x9a, 0x01, 0x0a, 0x0c, 0x53, 0x65, 0x74, 0x49, 0x61, 0x6d, 0x50, 0x6f, 0x6c, 0x69, 0x63,
+ 0x79, 0x12, 0x22, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x69, 0x61, 0x6d, 0x2e, 0x76,
+ 0x31, 0x2e, 0x53, 0x65, 0x74, 0x49, 0x61, 0x6d, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x52, 0x65,
+ 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x15, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x69,
+ 0x61, 0x6d, 0x2e, 0x76, 0x31, 0x2e, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x22, 0x4f, 0xda, 0x41,
+ 0x0f, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x2c, 0x70, 0x6f, 0x6c, 0x69, 0x63, 0x79,
+ 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x37, 0x3a, 0x01, 0x2a, 0x22, 0x32, 0x2f, 0x76, 0x31, 0x2f, 0x7b,
+ 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x3d, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74,
+ 0x73, 0x2f, 0x2a, 0x2f, 0x69, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x73, 0x2f, 0x2a, 0x7d,
+ 0x3a, 0x73, 0x65, 0x74, 0x49, 0x61, 0x6d, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x12, 0x93, 0x01,
+ 0x0a, 0x0c, 0x47, 0x65, 0x74, 0x49, 0x61, 0x6d, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x12, 0x22,
+ 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x69, 0x61, 0x6d, 0x2e, 0x76, 0x31, 0x2e, 0x47,
+ 0x65, 0x74, 0x49, 0x61, 0x6d, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x52, 0x65, 0x71, 0x75, 0x65,
+ 0x73, 0x74, 0x1a, 0x15, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x69, 0x61, 0x6d, 0x2e,
+ 0x76, 0x31, 0x2e, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x22, 0x48, 0xda, 0x41, 0x08, 0x72, 0x65,
+ 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x37, 0x3a, 0x01, 0x2a, 0x22,
+ 0x32, 0x2f, 0x76, 0x31, 0x2f, 0x7b, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x3d, 0x70,
+ 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x2a, 0x2f, 0x69, 0x6e, 0x73, 0x74, 0x61, 0x6e,
+ 0x63, 0x65, 0x73, 0x2f, 0x2a, 0x7d, 0x3a, 0x67, 0x65, 0x74, 0x49, 0x61, 0x6d, 0x50, 0x6f, 0x6c,
+ 0x69, 0x63, 0x79, 0x12, 0xc5, 0x01, 0x0a, 0x12, 0x54, 0x65, 0x73, 0x74, 0x49, 0x61, 0x6d, 0x50,
+ 0x65, 0x72, 0x6d, 0x69, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x28, 0x2e, 0x67, 0x6f, 0x6f,
+ 0x67, 0x6c, 0x65, 0x2e, 0x69, 0x61, 0x6d, 0x2e, 0x76, 0x31, 0x2e, 0x54, 0x65, 0x73, 0x74, 0x49,
+ 0x61, 0x6d, 0x50, 0x65, 0x72, 0x6d, 0x69, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x65, 0x71,
+ 0x75, 0x65, 0x73, 0x74, 0x1a, 0x29, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x69, 0x61,
+ 0x6d, 0x2e, 0x76, 0x31, 0x2e, 0x54, 0x65, 0x73, 0x74, 0x49, 0x61, 0x6d, 0x50, 0x65, 0x72, 0x6d,
+ 0x69, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22,
+ 0x5a, 0xda, 0x41, 0x14, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x2c, 0x70, 0x65, 0x72,
+ 0x6d, 0x69, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x3d, 0x3a, 0x01,
+ 0x2a, 0x22, 0x38, 0x2f, 0x76, 0x31, 0x2f, 0x7b, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65,
+ 0x3d, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x2a, 0x2f, 0x69, 0x6e, 0x73, 0x74,
+ 0x61, 0x6e, 0x63, 0x65, 0x73, 0x2f, 0x2a, 0x7d, 0x3a, 0x74, 0x65, 0x73, 0x74, 0x49, 0x61, 0x6d,
+ 0x50, 0x65, 0x72, 0x6d, 0x69, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0xd1, 0x01, 0x0a, 0x14,
+ 0x47, 0x65, 0x74, 0x49, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x50, 0x61, 0x72, 0x74, 0x69,
+ 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x3d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x70,
+ 0x61, 0x6e, 0x6e, 0x65, 0x72, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x69, 0x6e, 0x73, 0x74,
+ 0x61, 0x6e, 0x63, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x47, 0x65, 0x74, 0x49, 0x6e, 0x73, 0x74, 0x61,
+ 0x6e, 0x63, 0x65, 0x50, 0x61, 0x72, 0x74, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x75,
+ 0x65, 0x73, 0x74, 0x1a, 0x33, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x70, 0x61,
+ 0x6e, 0x6e, 0x65, 0x72, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x69, 0x6e, 0x73, 0x74, 0x61,
+ 0x6e, 0x63, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x49, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x50,
+ 0x61, 0x72, 0x74, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x22, 0x45, 0xda, 0x41, 0x04, 0x6e, 0x61, 0x6d,
+ 0x65, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x38, 0x12, 0x36, 0x2f, 0x76, 0x31, 0x2f, 0x7b, 0x6e, 0x61,
+ 0x6d, 0x65, 0x3d, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x2a, 0x2f, 0x69, 0x6e,
+ 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x73, 0x2f, 0x2a, 0x2f, 0x69, 0x6e, 0x73, 0x74, 0x61, 0x6e,
+ 0x63, 0x65, 0x50, 0x61, 0x72, 0x74, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x2a, 0x7d, 0x12,
+ 0xe9, 0x02, 0x0a, 0x17, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x49, 0x6e, 0x73, 0x74, 0x61, 0x6e,
+ 0x63, 0x65, 0x50, 0x61, 0x72, 0x74, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x40, 0x2e, 0x67, 0x6f,
+ 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x70, 0x61, 0x6e, 0x6e, 0x65, 0x72, 0x2e, 0x61, 0x64, 0x6d,
+ 0x69, 0x6e, 0x2e, 0x69, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x43,
+ 0x72, 0x65, 0x61, 0x74, 0x65, 0x49, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x50, 0x61, 0x72,
+ 0x74, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1d, 0x2e,
+ 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6c, 0x6f, 0x6e, 0x67, 0x72, 0x75, 0x6e, 0x6e, 0x69,
+ 0x6e, 0x67, 0x2e, 0x4f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x22, 0xec, 0x01, 0xca,
+ 0x41, 0x76, 0x0a, 0x32, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x70, 0x61, 0x6e, 0x6e,
+ 0x65, 0x72, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x69, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63,
+ 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x49, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x50, 0x61, 0x72,
+ 0x74, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x40, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73,
+ 0x70, 0x61, 0x6e, 0x6e, 0x65, 0x72, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x69, 0x6e, 0x73,
+ 0x74, 0x61, 0x6e, 0x63, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x49,
+ 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x50, 0x61, 0x72, 0x74, 0x69, 0x74, 0x69, 0x6f, 0x6e,
+ 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0xda, 0x41, 0x2f, 0x70, 0x61, 0x72, 0x65, 0x6e,
+ 0x74, 0x2c, 0x69, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x5f, 0x70, 0x61, 0x72, 0x74, 0x69,
+ 0x74, 0x69, 0x6f, 0x6e, 0x2c, 0x69, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x5f, 0x70, 0x61,
+ 0x72, 0x74, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x69, 0x64, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x3b,
+ 0x3a, 0x01, 0x2a, 0x22, 0x36, 0x2f, 0x76, 0x31, 0x2f, 0x7b, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74,
+ 0x3d, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x2a, 0x2f, 0x69, 0x6e, 0x73, 0x74,
+ 0x61, 0x6e, 0x63, 0x65, 0x73, 0x2f, 0x2a, 0x7d, 0x2f, 0x69, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63,
+ 0x65, 0x50, 0x61, 0x72, 0x74, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0xba, 0x01, 0x0a, 0x17,
+ 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x49, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x50, 0x61,
+ 0x72, 0x74, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x40, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65,
+ 0x2e, 0x73, 0x70, 0x61, 0x6e, 0x6e, 0x65, 0x72, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x69,
+ 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x44, 0x65, 0x6c, 0x65, 0x74,
+ 0x65, 0x49, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x50, 0x61, 0x72, 0x74, 0x69, 0x74, 0x69,
+ 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x16, 0x2e, 0x67, 0x6f, 0x6f, 0x67,
+ 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6d, 0x70, 0x74,
+ 0x79, 0x22, 0x45, 0xda, 0x41, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x38,
+ 0x2a, 0x36, 0x2f, 0x76, 0x31, 0x2f, 0x7b, 0x6e, 0x61, 0x6d, 0x65, 0x3d, 0x70, 0x72, 0x6f, 0x6a,
+ 0x65, 0x63, 0x74, 0x73, 0x2f, 0x2a, 0x2f, 0x69, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x73,
+ 0x2f, 0x2a, 0x2f, 0x69, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x50, 0x61, 0x72, 0x74, 0x69,
+ 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x2a, 0x7d, 0x12, 0xea, 0x02, 0x0a, 0x17, 0x55, 0x70, 0x64,
+ 0x61, 0x74, 0x65, 0x49, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x50, 0x61, 0x72, 0x74, 0x69,
+ 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x40, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x70,
+ 0x61, 0x6e, 0x6e, 0x65, 0x72, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x69, 0x6e, 0x73, 0x74,
+ 0x61, 0x6e, 0x63, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x49, 0x6e,
+ 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x50, 0x61, 0x72, 0x74, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x52,
+ 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e,
+ 0x6c, 0x6f, 0x6e, 0x67, 0x72, 0x75, 0x6e, 0x6e, 0x69, 0x6e, 0x67, 0x2e, 0x4f, 0x70, 0x65, 0x72,
+ 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x22, 0xed, 0x01, 0xca, 0x41, 0x76, 0x0a, 0x32, 0x67, 0x6f, 0x6f,
+ 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x70, 0x61, 0x6e, 0x6e, 0x65, 0x72, 0x2e, 0x61, 0x64, 0x6d, 0x69,
+ 0x6e, 0x2e, 0x69, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x49, 0x6e,
+ 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x50, 0x61, 0x72, 0x74, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x12,
+ 0x40, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x70, 0x61, 0x6e, 0x6e, 0x65, 0x72, 0x2e,
+ 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x69, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x2e, 0x76,
+ 0x31, 0x2e, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x49, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65,
+ 0x50, 0x61, 0x72, 0x74, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74,
+ 0x61, 0xda, 0x41, 0x1d, 0x69, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x5f, 0x70, 0x61, 0x72,
+ 0x74, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x2c, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x5f, 0x6d, 0x61, 0x73,
+ 0x6b, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x4e, 0x3a, 0x01, 0x2a, 0x32, 0x49, 0x2f, 0x76, 0x31, 0x2f,
+ 0x7b, 0x69, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x5f, 0x70, 0x61, 0x72, 0x74, 0x69, 0x74,
+ 0x69, 0x6f, 0x6e, 0x2e, 0x6e, 0x61, 0x6d, 0x65, 0x3d, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74,
+ 0x73, 0x2f, 0x2a, 0x2f, 0x69, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x73, 0x2f, 0x2a, 0x2f,
+ 0x69, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x50, 0x61, 0x72, 0x74, 0x69, 0x74, 0x69, 0x6f,
+ 0x6e, 0x73, 0x2f, 0x2a, 0x7d, 0x12, 0x88, 0x02, 0x0a, 0x1f, 0x4c, 0x69, 0x73, 0x74, 0x49, 0x6e,
+ 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x50, 0x61, 0x72, 0x74, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x4f,
+ 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x48, 0x2e, 0x67, 0x6f, 0x6f, 0x67,
+ 0x6c, 0x65, 0x2e, 0x73, 0x70, 0x61, 0x6e, 0x6e, 0x65, 0x72, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e,
+ 0x2e, 0x69, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x4c, 0x69, 0x73,
+ 0x74, 0x49, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x50, 0x61, 0x72, 0x74, 0x69, 0x74, 0x69,
+ 0x6f, 0x6e, 0x4f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x65, 0x71, 0x75,
+ 0x65, 0x73, 0x74, 0x1a, 0x49, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x70, 0x61,
+ 0x6e, 0x6e, 0x65, 0x72, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x69, 0x6e, 0x73, 0x74, 0x61,
+ 0x6e, 0x63, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x49, 0x6e, 0x73, 0x74, 0x61,
+ 0x6e, 0x63, 0x65, 0x50, 0x61, 0x72, 0x74, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x4f, 0x70, 0x65, 0x72,
+ 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x50,
+ 0xda, 0x41, 0x06, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x41, 0x12,
+ 0x3f, 0x2f, 0x76, 0x31, 0x2f, 0x7b, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x3d, 0x70, 0x72, 0x6f,
+ 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x2a, 0x2f, 0x69, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65,
+ 0x73, 0x2f, 0x2a, 0x7d, 0x2f, 0x69, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x50, 0x61, 0x72,
+ 0x74, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x4f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73,
+ 0x12, 0x89, 0x02, 0x0a, 0x0c, 0x4d, 0x6f, 0x76, 0x65, 0x49, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63,
+ 0x65, 0x12, 0x35, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x70, 0x61, 0x6e, 0x6e,
+ 0x65, 0x72, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x69, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63,
+ 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x4d, 0x6f, 0x76, 0x65, 0x49, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63,
+ 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c,
+ 0x65, 0x2e, 0x6c, 0x6f, 0x6e, 0x67, 0x72, 0x75, 0x6e, 0x6e, 0x69, 0x6e, 0x67, 0x2e, 0x4f, 0x70,
+ 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x22, 0xa2, 0x01, 0xca, 0x41, 0x6e, 0x0a, 0x35, 0x67,
+ 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x70, 0x61, 0x6e, 0x6e, 0x65, 0x72, 0x2e, 0x61, 0x64,
+ 0x6d, 0x69, 0x6e, 0x2e, 0x69, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x2e, 0x76, 0x31, 0x2e,
+ 0x4d, 0x6f, 0x76, 0x65, 0x49, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x52, 0x65, 0x73, 0x70,
+ 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x35, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x70, 0x61,
+ 0x6e, 0x6e, 0x65, 0x72, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x69, 0x6e, 0x73, 0x74, 0x61,
+ 0x6e, 0x63, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x4d, 0x6f, 0x76, 0x65, 0x49, 0x6e, 0x73, 0x74, 0x61,
+ 0x6e, 0x63, 0x65, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x82, 0xd3, 0xe4, 0x93, 0x02,
+ 0x2b, 0x3a, 0x01, 0x2a, 0x22, 0x26, 0x2f, 0x76, 0x31, 0x2f, 0x7b, 0x6e, 0x61, 0x6d, 0x65, 0x3d,
+ 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x2a, 0x2f, 0x69, 0x6e, 0x73, 0x74, 0x61,
+ 0x6e, 0x63, 0x65, 0x73, 0x2f, 0x2a, 0x7d, 0x3a, 0x6d, 0x6f, 0x76, 0x65, 0x1a, 0x78, 0xca, 0x41,
+ 0x16, 0x73, 0x70, 0x61, 0x6e, 0x6e, 0x65, 0x72, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61,
+ 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0xd2, 0x41, 0x5c, 0x68, 0x74, 0x74, 0x70, 0x73, 0x3a,
+ 0x2f, 0x2f, 0x77, 0x77, 0x77, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73,
+ 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x61, 0x75, 0x74, 0x68, 0x2f, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2d,
+ 0x70, 0x6c, 0x61, 0x74, 0x66, 0x6f, 0x72, 0x6d, 0x2c, 0x68, 0x74, 0x74, 0x70, 0x73, 0x3a, 0x2f,
+ 0x2f, 0x77, 0x77, 0x77, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e,
+ 0x63, 0x6f, 0x6d, 0x2f, 0x61, 0x75, 0x74, 0x68, 0x2f, 0x73, 0x70, 0x61, 0x6e, 0x6e, 0x65, 0x72,
+ 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x42, 0x8b, 0x02, 0x0a, 0x24, 0x63, 0x6f, 0x6d, 0x2e, 0x67,
+ 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x70, 0x61, 0x6e, 0x6e, 0x65, 0x72, 0x2e, 0x61, 0x64,
+ 0x6d, 0x69, 0x6e, 0x2e, 0x69, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x2e, 0x76, 0x31, 0x42,
+ 0x19, 0x53, 0x70, 0x61, 0x6e, 0x6e, 0x65, 0x72, 0x49, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65,
+ 0x41, 0x64, 0x6d, 0x69, 0x6e, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x46, 0x63, 0x6c,
+ 0x6f, 0x75, 0x64, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x67,
+ 0x6f, 0x2f, 0x73, 0x70, 0x61, 0x6e, 0x6e, 0x65, 0x72, 0x2f, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2f,
+ 0x69, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x2f, 0x61, 0x70, 0x69, 0x76, 0x31, 0x2f, 0x69,
+ 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x70, 0x62, 0x3b, 0x69, 0x6e, 0x73, 0x74, 0x61, 0x6e,
+ 0x63, 0x65, 0x70, 0x62, 0xaa, 0x02, 0x26, 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x43, 0x6c,
+ 0x6f, 0x75, 0x64, 0x2e, 0x53, 0x70, 0x61, 0x6e, 0x6e, 0x65, 0x72, 0x2e, 0x41, 0x64, 0x6d, 0x69,
+ 0x6e, 0x2e, 0x49, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x2e, 0x56, 0x31, 0xca, 0x02, 0x26,
+ 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x5c, 0x43, 0x6c, 0x6f, 0x75, 0x64, 0x5c, 0x53, 0x70, 0x61,
+ 0x6e, 0x6e, 0x65, 0x72, 0x5c, 0x41, 0x64, 0x6d, 0x69, 0x6e, 0x5c, 0x49, 0x6e, 0x73, 0x74, 0x61,
+ 0x6e, 0x63, 0x65, 0x5c, 0x56, 0x31, 0xea, 0x02, 0x2b, 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x3a,
+ 0x3a, 0x43, 0x6c, 0x6f, 0x75, 0x64, 0x3a, 0x3a, 0x53, 0x70, 0x61, 0x6e, 0x6e, 0x65, 0x72, 0x3a,
+ 0x3a, 0x41, 0x64, 0x6d, 0x69, 0x6e, 0x3a, 0x3a, 0x49, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65,
+ 0x3a, 0x3a, 0x56, 0x31, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33,
+}
+
+var (
+ file_google_spanner_admin_instance_v1_spanner_instance_admin_proto_rawDescOnce sync.Once
+ file_google_spanner_admin_instance_v1_spanner_instance_admin_proto_rawDescData = file_google_spanner_admin_instance_v1_spanner_instance_admin_proto_rawDesc
+)
+
+func file_google_spanner_admin_instance_v1_spanner_instance_admin_proto_rawDescGZIP() []byte {
+ file_google_spanner_admin_instance_v1_spanner_instance_admin_proto_rawDescOnce.Do(func() {
+ file_google_spanner_admin_instance_v1_spanner_instance_admin_proto_rawDescData = protoimpl.X.CompressGZIP(file_google_spanner_admin_instance_v1_spanner_instance_admin_proto_rawDescData)
+ })
+ return file_google_spanner_admin_instance_v1_spanner_instance_admin_proto_rawDescData
+}
+
+var file_google_spanner_admin_instance_v1_spanner_instance_admin_proto_enumTypes = make([]protoimpl.EnumInfo, 6)
+var file_google_spanner_admin_instance_v1_spanner_instance_admin_proto_msgTypes = make([]protoimpl.MessageInfo, 40)
+var file_google_spanner_admin_instance_v1_spanner_instance_admin_proto_goTypes = []any{
+ (ReplicaInfo_ReplicaType)(0), // 0: google.spanner.admin.instance.v1.ReplicaInfo.ReplicaType
+ (InstanceConfig_Type)(0), // 1: google.spanner.admin.instance.v1.InstanceConfig.Type
+ (InstanceConfig_State)(0), // 2: google.spanner.admin.instance.v1.InstanceConfig.State
+ (Instance_State)(0), // 3: google.spanner.admin.instance.v1.Instance.State
+ (Instance_Edition)(0), // 4: google.spanner.admin.instance.v1.Instance.Edition
+ (InstancePartition_State)(0), // 5: google.spanner.admin.instance.v1.InstancePartition.State
+ (*ReplicaInfo)(nil), // 6: google.spanner.admin.instance.v1.ReplicaInfo
+ (*InstanceConfig)(nil), // 7: google.spanner.admin.instance.v1.InstanceConfig
+ (*AutoscalingConfig)(nil), // 8: google.spanner.admin.instance.v1.AutoscalingConfig
+ (*Instance)(nil), // 9: google.spanner.admin.instance.v1.Instance
+ (*ListInstanceConfigsRequest)(nil), // 10: google.spanner.admin.instance.v1.ListInstanceConfigsRequest
+ (*ListInstanceConfigsResponse)(nil), // 11: google.spanner.admin.instance.v1.ListInstanceConfigsResponse
+ (*GetInstanceConfigRequest)(nil), // 12: google.spanner.admin.instance.v1.GetInstanceConfigRequest
+ (*CreateInstanceConfigRequest)(nil), // 13: google.spanner.admin.instance.v1.CreateInstanceConfigRequest
+ (*UpdateInstanceConfigRequest)(nil), // 14: google.spanner.admin.instance.v1.UpdateInstanceConfigRequest
+ (*DeleteInstanceConfigRequest)(nil), // 15: google.spanner.admin.instance.v1.DeleteInstanceConfigRequest
+ (*ListInstanceConfigOperationsRequest)(nil), // 16: google.spanner.admin.instance.v1.ListInstanceConfigOperationsRequest
+ (*ListInstanceConfigOperationsResponse)(nil), // 17: google.spanner.admin.instance.v1.ListInstanceConfigOperationsResponse
+ (*GetInstanceRequest)(nil), // 18: google.spanner.admin.instance.v1.GetInstanceRequest
+ (*CreateInstanceRequest)(nil), // 19: google.spanner.admin.instance.v1.CreateInstanceRequest
+ (*ListInstancesRequest)(nil), // 20: google.spanner.admin.instance.v1.ListInstancesRequest
+ (*ListInstancesResponse)(nil), // 21: google.spanner.admin.instance.v1.ListInstancesResponse
+ (*UpdateInstanceRequest)(nil), // 22: google.spanner.admin.instance.v1.UpdateInstanceRequest
+ (*DeleteInstanceRequest)(nil), // 23: google.spanner.admin.instance.v1.DeleteInstanceRequest
+ (*CreateInstanceMetadata)(nil), // 24: google.spanner.admin.instance.v1.CreateInstanceMetadata
+ (*UpdateInstanceMetadata)(nil), // 25: google.spanner.admin.instance.v1.UpdateInstanceMetadata
+ (*CreateInstanceConfigMetadata)(nil), // 26: google.spanner.admin.instance.v1.CreateInstanceConfigMetadata
+ (*UpdateInstanceConfigMetadata)(nil), // 27: google.spanner.admin.instance.v1.UpdateInstanceConfigMetadata
+ (*InstancePartition)(nil), // 28: google.spanner.admin.instance.v1.InstancePartition
+ (*CreateInstancePartitionMetadata)(nil), // 29: google.spanner.admin.instance.v1.CreateInstancePartitionMetadata
+ (*CreateInstancePartitionRequest)(nil), // 30: google.spanner.admin.instance.v1.CreateInstancePartitionRequest
+ (*DeleteInstancePartitionRequest)(nil), // 31: google.spanner.admin.instance.v1.DeleteInstancePartitionRequest
+ (*GetInstancePartitionRequest)(nil), // 32: google.spanner.admin.instance.v1.GetInstancePartitionRequest
+ (*UpdateInstancePartitionRequest)(nil), // 33: google.spanner.admin.instance.v1.UpdateInstancePartitionRequest
+ (*UpdateInstancePartitionMetadata)(nil), // 34: google.spanner.admin.instance.v1.UpdateInstancePartitionMetadata
+ (*ListInstancePartitionsRequest)(nil), // 35: google.spanner.admin.instance.v1.ListInstancePartitionsRequest
+ (*ListInstancePartitionsResponse)(nil), // 36: google.spanner.admin.instance.v1.ListInstancePartitionsResponse
+ (*ListInstancePartitionOperationsRequest)(nil), // 37: google.spanner.admin.instance.v1.ListInstancePartitionOperationsRequest
+ (*ListInstancePartitionOperationsResponse)(nil), // 38: google.spanner.admin.instance.v1.ListInstancePartitionOperationsResponse
+ (*MoveInstanceRequest)(nil), // 39: google.spanner.admin.instance.v1.MoveInstanceRequest
+ (*MoveInstanceResponse)(nil), // 40: google.spanner.admin.instance.v1.MoveInstanceResponse
+ (*MoveInstanceMetadata)(nil), // 41: google.spanner.admin.instance.v1.MoveInstanceMetadata
+ nil, // 42: google.spanner.admin.instance.v1.InstanceConfig.LabelsEntry
+ (*AutoscalingConfig_AutoscalingLimits)(nil), // 43: google.spanner.admin.instance.v1.AutoscalingConfig.AutoscalingLimits
+ (*AutoscalingConfig_AutoscalingTargets)(nil), // 44: google.spanner.admin.instance.v1.AutoscalingConfig.AutoscalingTargets
+ nil, // 45: google.spanner.admin.instance.v1.Instance.LabelsEntry
+ (*timestamppb.Timestamp)(nil), // 46: google.protobuf.Timestamp
+ (*fieldmaskpb.FieldMask)(nil), // 47: google.protobuf.FieldMask
+ (*longrunningpb.Operation)(nil), // 48: google.longrunning.Operation
+ (FulfillmentPeriod)(0), // 49: google.spanner.admin.instance.v1.FulfillmentPeriod
+ (*OperationProgress)(nil), // 50: google.spanner.admin.instance.v1.OperationProgress
+ (*iampb.SetIamPolicyRequest)(nil), // 51: google.iam.v1.SetIamPolicyRequest
+ (*iampb.GetIamPolicyRequest)(nil), // 52: google.iam.v1.GetIamPolicyRequest
+ (*iampb.TestIamPermissionsRequest)(nil), // 53: google.iam.v1.TestIamPermissionsRequest
+ (*emptypb.Empty)(nil), // 54: google.protobuf.Empty
+ (*iampb.Policy)(nil), // 55: google.iam.v1.Policy
+ (*iampb.TestIamPermissionsResponse)(nil), // 56: google.iam.v1.TestIamPermissionsResponse
+}
+var file_google_spanner_admin_instance_v1_spanner_instance_admin_proto_depIdxs = []int32{
+ 0, // 0: google.spanner.admin.instance.v1.ReplicaInfo.type:type_name -> google.spanner.admin.instance.v1.ReplicaInfo.ReplicaType
+ 1, // 1: google.spanner.admin.instance.v1.InstanceConfig.config_type:type_name -> google.spanner.admin.instance.v1.InstanceConfig.Type
+ 6, // 2: google.spanner.admin.instance.v1.InstanceConfig.replicas:type_name -> google.spanner.admin.instance.v1.ReplicaInfo
+ 6, // 3: google.spanner.admin.instance.v1.InstanceConfig.optional_replicas:type_name -> google.spanner.admin.instance.v1.ReplicaInfo
+ 42, // 4: google.spanner.admin.instance.v1.InstanceConfig.labels:type_name -> google.spanner.admin.instance.v1.InstanceConfig.LabelsEntry
+ 2, // 5: google.spanner.admin.instance.v1.InstanceConfig.state:type_name -> google.spanner.admin.instance.v1.InstanceConfig.State
+ 43, // 6: google.spanner.admin.instance.v1.AutoscalingConfig.autoscaling_limits:type_name -> google.spanner.admin.instance.v1.AutoscalingConfig.AutoscalingLimits
+ 44, // 7: google.spanner.admin.instance.v1.AutoscalingConfig.autoscaling_targets:type_name -> google.spanner.admin.instance.v1.AutoscalingConfig.AutoscalingTargets
+ 8, // 8: google.spanner.admin.instance.v1.Instance.autoscaling_config:type_name -> google.spanner.admin.instance.v1.AutoscalingConfig
+ 3, // 9: google.spanner.admin.instance.v1.Instance.state:type_name -> google.spanner.admin.instance.v1.Instance.State
+ 45, // 10: google.spanner.admin.instance.v1.Instance.labels:type_name -> google.spanner.admin.instance.v1.Instance.LabelsEntry
+ 46, // 11: google.spanner.admin.instance.v1.Instance.create_time:type_name -> google.protobuf.Timestamp
+ 46, // 12: google.spanner.admin.instance.v1.Instance.update_time:type_name -> google.protobuf.Timestamp
+ 4, // 13: google.spanner.admin.instance.v1.Instance.edition:type_name -> google.spanner.admin.instance.v1.Instance.Edition
+ 7, // 14: google.spanner.admin.instance.v1.ListInstanceConfigsResponse.instance_configs:type_name -> google.spanner.admin.instance.v1.InstanceConfig
+ 7, // 15: google.spanner.admin.instance.v1.CreateInstanceConfigRequest.instance_config:type_name -> google.spanner.admin.instance.v1.InstanceConfig
+ 7, // 16: google.spanner.admin.instance.v1.UpdateInstanceConfigRequest.instance_config:type_name -> google.spanner.admin.instance.v1.InstanceConfig
+ 47, // 17: google.spanner.admin.instance.v1.UpdateInstanceConfigRequest.update_mask:type_name -> google.protobuf.FieldMask
+ 48, // 18: google.spanner.admin.instance.v1.ListInstanceConfigOperationsResponse.operations:type_name -> google.longrunning.Operation
+ 47, // 19: google.spanner.admin.instance.v1.GetInstanceRequest.field_mask:type_name -> google.protobuf.FieldMask
+ 9, // 20: google.spanner.admin.instance.v1.CreateInstanceRequest.instance:type_name -> google.spanner.admin.instance.v1.Instance
+ 46, // 21: google.spanner.admin.instance.v1.ListInstancesRequest.instance_deadline:type_name -> google.protobuf.Timestamp
+ 9, // 22: google.spanner.admin.instance.v1.ListInstancesResponse.instances:type_name -> google.spanner.admin.instance.v1.Instance
+ 9, // 23: google.spanner.admin.instance.v1.UpdateInstanceRequest.instance:type_name -> google.spanner.admin.instance.v1.Instance
+ 47, // 24: google.spanner.admin.instance.v1.UpdateInstanceRequest.field_mask:type_name -> google.protobuf.FieldMask
+ 9, // 25: google.spanner.admin.instance.v1.CreateInstanceMetadata.instance:type_name -> google.spanner.admin.instance.v1.Instance
+ 46, // 26: google.spanner.admin.instance.v1.CreateInstanceMetadata.start_time:type_name -> google.protobuf.Timestamp
+ 46, // 27: google.spanner.admin.instance.v1.CreateInstanceMetadata.cancel_time:type_name -> google.protobuf.Timestamp
+ 46, // 28: google.spanner.admin.instance.v1.CreateInstanceMetadata.end_time:type_name -> google.protobuf.Timestamp
+ 49, // 29: google.spanner.admin.instance.v1.CreateInstanceMetadata.expected_fulfillment_period:type_name -> google.spanner.admin.instance.v1.FulfillmentPeriod
+ 9, // 30: google.spanner.admin.instance.v1.UpdateInstanceMetadata.instance:type_name -> google.spanner.admin.instance.v1.Instance
+ 46, // 31: google.spanner.admin.instance.v1.UpdateInstanceMetadata.start_time:type_name -> google.protobuf.Timestamp
+ 46, // 32: google.spanner.admin.instance.v1.UpdateInstanceMetadata.cancel_time:type_name -> google.protobuf.Timestamp
+ 46, // 33: google.spanner.admin.instance.v1.UpdateInstanceMetadata.end_time:type_name -> google.protobuf.Timestamp
+ 49, // 34: google.spanner.admin.instance.v1.UpdateInstanceMetadata.expected_fulfillment_period:type_name -> google.spanner.admin.instance.v1.FulfillmentPeriod
+ 7, // 35: google.spanner.admin.instance.v1.CreateInstanceConfigMetadata.instance_config:type_name -> google.spanner.admin.instance.v1.InstanceConfig
+ 50, // 36: google.spanner.admin.instance.v1.CreateInstanceConfigMetadata.progress:type_name -> google.spanner.admin.instance.v1.OperationProgress
+ 46, // 37: google.spanner.admin.instance.v1.CreateInstanceConfigMetadata.cancel_time:type_name -> google.protobuf.Timestamp
+ 7, // 38: google.spanner.admin.instance.v1.UpdateInstanceConfigMetadata.instance_config:type_name -> google.spanner.admin.instance.v1.InstanceConfig
+ 50, // 39: google.spanner.admin.instance.v1.UpdateInstanceConfigMetadata.progress:type_name -> google.spanner.admin.instance.v1.OperationProgress
+ 46, // 40: google.spanner.admin.instance.v1.UpdateInstanceConfigMetadata.cancel_time:type_name -> google.protobuf.Timestamp
+ 5, // 41: google.spanner.admin.instance.v1.InstancePartition.state:type_name -> google.spanner.admin.instance.v1.InstancePartition.State
+ 46, // 42: google.spanner.admin.instance.v1.InstancePartition.create_time:type_name -> google.protobuf.Timestamp
+ 46, // 43: google.spanner.admin.instance.v1.InstancePartition.update_time:type_name -> google.protobuf.Timestamp
+ 28, // 44: google.spanner.admin.instance.v1.CreateInstancePartitionMetadata.instance_partition:type_name -> google.spanner.admin.instance.v1.InstancePartition
+ 46, // 45: google.spanner.admin.instance.v1.CreateInstancePartitionMetadata.start_time:type_name -> google.protobuf.Timestamp
+ 46, // 46: google.spanner.admin.instance.v1.CreateInstancePartitionMetadata.cancel_time:type_name -> google.protobuf.Timestamp
+ 46, // 47: google.spanner.admin.instance.v1.CreateInstancePartitionMetadata.end_time:type_name -> google.protobuf.Timestamp
+ 28, // 48: google.spanner.admin.instance.v1.CreateInstancePartitionRequest.instance_partition:type_name -> google.spanner.admin.instance.v1.InstancePartition
+ 28, // 49: google.spanner.admin.instance.v1.UpdateInstancePartitionRequest.instance_partition:type_name -> google.spanner.admin.instance.v1.InstancePartition
+ 47, // 50: google.spanner.admin.instance.v1.UpdateInstancePartitionRequest.field_mask:type_name -> google.protobuf.FieldMask
+ 28, // 51: google.spanner.admin.instance.v1.UpdateInstancePartitionMetadata.instance_partition:type_name -> google.spanner.admin.instance.v1.InstancePartition
+ 46, // 52: google.spanner.admin.instance.v1.UpdateInstancePartitionMetadata.start_time:type_name -> google.protobuf.Timestamp
+ 46, // 53: google.spanner.admin.instance.v1.UpdateInstancePartitionMetadata.cancel_time:type_name -> google.protobuf.Timestamp
+ 46, // 54: google.spanner.admin.instance.v1.UpdateInstancePartitionMetadata.end_time:type_name -> google.protobuf.Timestamp
+ 46, // 55: google.spanner.admin.instance.v1.ListInstancePartitionsRequest.instance_partition_deadline:type_name -> google.protobuf.Timestamp
+ 28, // 56: google.spanner.admin.instance.v1.ListInstancePartitionsResponse.instance_partitions:type_name -> google.spanner.admin.instance.v1.InstancePartition
+ 46, // 57: google.spanner.admin.instance.v1.ListInstancePartitionOperationsRequest.instance_partition_deadline:type_name -> google.protobuf.Timestamp
+ 48, // 58: google.spanner.admin.instance.v1.ListInstancePartitionOperationsResponse.operations:type_name -> google.longrunning.Operation
+ 50, // 59: google.spanner.admin.instance.v1.MoveInstanceMetadata.progress:type_name -> google.spanner.admin.instance.v1.OperationProgress
+ 46, // 60: google.spanner.admin.instance.v1.MoveInstanceMetadata.cancel_time:type_name -> google.protobuf.Timestamp
+ 10, // 61: google.spanner.admin.instance.v1.InstanceAdmin.ListInstanceConfigs:input_type -> google.spanner.admin.instance.v1.ListInstanceConfigsRequest
+ 12, // 62: google.spanner.admin.instance.v1.InstanceAdmin.GetInstanceConfig:input_type -> google.spanner.admin.instance.v1.GetInstanceConfigRequest
+ 13, // 63: google.spanner.admin.instance.v1.InstanceAdmin.CreateInstanceConfig:input_type -> google.spanner.admin.instance.v1.CreateInstanceConfigRequest
+ 14, // 64: google.spanner.admin.instance.v1.InstanceAdmin.UpdateInstanceConfig:input_type -> google.spanner.admin.instance.v1.UpdateInstanceConfigRequest
+ 15, // 65: google.spanner.admin.instance.v1.InstanceAdmin.DeleteInstanceConfig:input_type -> google.spanner.admin.instance.v1.DeleteInstanceConfigRequest
+ 16, // 66: google.spanner.admin.instance.v1.InstanceAdmin.ListInstanceConfigOperations:input_type -> google.spanner.admin.instance.v1.ListInstanceConfigOperationsRequest
+ 20, // 67: google.spanner.admin.instance.v1.InstanceAdmin.ListInstances:input_type -> google.spanner.admin.instance.v1.ListInstancesRequest
+ 35, // 68: google.spanner.admin.instance.v1.InstanceAdmin.ListInstancePartitions:input_type -> google.spanner.admin.instance.v1.ListInstancePartitionsRequest
+ 18, // 69: google.spanner.admin.instance.v1.InstanceAdmin.GetInstance:input_type -> google.spanner.admin.instance.v1.GetInstanceRequest
+ 19, // 70: google.spanner.admin.instance.v1.InstanceAdmin.CreateInstance:input_type -> google.spanner.admin.instance.v1.CreateInstanceRequest
+ 22, // 71: google.spanner.admin.instance.v1.InstanceAdmin.UpdateInstance:input_type -> google.spanner.admin.instance.v1.UpdateInstanceRequest
+ 23, // 72: google.spanner.admin.instance.v1.InstanceAdmin.DeleteInstance:input_type -> google.spanner.admin.instance.v1.DeleteInstanceRequest
+ 51, // 73: google.spanner.admin.instance.v1.InstanceAdmin.SetIamPolicy:input_type -> google.iam.v1.SetIamPolicyRequest
+ 52, // 74: google.spanner.admin.instance.v1.InstanceAdmin.GetIamPolicy:input_type -> google.iam.v1.GetIamPolicyRequest
+ 53, // 75: google.spanner.admin.instance.v1.InstanceAdmin.TestIamPermissions:input_type -> google.iam.v1.TestIamPermissionsRequest
+ 32, // 76: google.spanner.admin.instance.v1.InstanceAdmin.GetInstancePartition:input_type -> google.spanner.admin.instance.v1.GetInstancePartitionRequest
+ 30, // 77: google.spanner.admin.instance.v1.InstanceAdmin.CreateInstancePartition:input_type -> google.spanner.admin.instance.v1.CreateInstancePartitionRequest
+ 31, // 78: google.spanner.admin.instance.v1.InstanceAdmin.DeleteInstancePartition:input_type -> google.spanner.admin.instance.v1.DeleteInstancePartitionRequest
+ 33, // 79: google.spanner.admin.instance.v1.InstanceAdmin.UpdateInstancePartition:input_type -> google.spanner.admin.instance.v1.UpdateInstancePartitionRequest
+ 37, // 80: google.spanner.admin.instance.v1.InstanceAdmin.ListInstancePartitionOperations:input_type -> google.spanner.admin.instance.v1.ListInstancePartitionOperationsRequest
+ 39, // 81: google.spanner.admin.instance.v1.InstanceAdmin.MoveInstance:input_type -> google.spanner.admin.instance.v1.MoveInstanceRequest
+ 11, // 82: google.spanner.admin.instance.v1.InstanceAdmin.ListInstanceConfigs:output_type -> google.spanner.admin.instance.v1.ListInstanceConfigsResponse
+ 7, // 83: google.spanner.admin.instance.v1.InstanceAdmin.GetInstanceConfig:output_type -> google.spanner.admin.instance.v1.InstanceConfig
+ 48, // 84: google.spanner.admin.instance.v1.InstanceAdmin.CreateInstanceConfig:output_type -> google.longrunning.Operation
+ 48, // 85: google.spanner.admin.instance.v1.InstanceAdmin.UpdateInstanceConfig:output_type -> google.longrunning.Operation
+ 54, // 86: google.spanner.admin.instance.v1.InstanceAdmin.DeleteInstanceConfig:output_type -> google.protobuf.Empty
+ 17, // 87: google.spanner.admin.instance.v1.InstanceAdmin.ListInstanceConfigOperations:output_type -> google.spanner.admin.instance.v1.ListInstanceConfigOperationsResponse
+ 21, // 88: google.spanner.admin.instance.v1.InstanceAdmin.ListInstances:output_type -> google.spanner.admin.instance.v1.ListInstancesResponse
+ 36, // 89: google.spanner.admin.instance.v1.InstanceAdmin.ListInstancePartitions:output_type -> google.spanner.admin.instance.v1.ListInstancePartitionsResponse
+ 9, // 90: google.spanner.admin.instance.v1.InstanceAdmin.GetInstance:output_type -> google.spanner.admin.instance.v1.Instance
+ 48, // 91: google.spanner.admin.instance.v1.InstanceAdmin.CreateInstance:output_type -> google.longrunning.Operation
+ 48, // 92: google.spanner.admin.instance.v1.InstanceAdmin.UpdateInstance:output_type -> google.longrunning.Operation
+ 54, // 93: google.spanner.admin.instance.v1.InstanceAdmin.DeleteInstance:output_type -> google.protobuf.Empty
+ 55, // 94: google.spanner.admin.instance.v1.InstanceAdmin.SetIamPolicy:output_type -> google.iam.v1.Policy
+ 55, // 95: google.spanner.admin.instance.v1.InstanceAdmin.GetIamPolicy:output_type -> google.iam.v1.Policy
+ 56, // 96: google.spanner.admin.instance.v1.InstanceAdmin.TestIamPermissions:output_type -> google.iam.v1.TestIamPermissionsResponse
+ 28, // 97: google.spanner.admin.instance.v1.InstanceAdmin.GetInstancePartition:output_type -> google.spanner.admin.instance.v1.InstancePartition
+ 48, // 98: google.spanner.admin.instance.v1.InstanceAdmin.CreateInstancePartition:output_type -> google.longrunning.Operation
+ 54, // 99: google.spanner.admin.instance.v1.InstanceAdmin.DeleteInstancePartition:output_type -> google.protobuf.Empty
+ 48, // 100: google.spanner.admin.instance.v1.InstanceAdmin.UpdateInstancePartition:output_type -> google.longrunning.Operation
+ 38, // 101: google.spanner.admin.instance.v1.InstanceAdmin.ListInstancePartitionOperations:output_type -> google.spanner.admin.instance.v1.ListInstancePartitionOperationsResponse
+ 48, // 102: google.spanner.admin.instance.v1.InstanceAdmin.MoveInstance:output_type -> google.longrunning.Operation
+ 82, // [82:103] is the sub-list for method output_type
+ 61, // [61:82] is the sub-list for method input_type
+ 61, // [61:61] is the sub-list for extension type_name
+ 61, // [61:61] is the sub-list for extension extendee
+ 0, // [0:61] is the sub-list for field type_name
+}
+
+func init() { file_google_spanner_admin_instance_v1_spanner_instance_admin_proto_init() }
+func file_google_spanner_admin_instance_v1_spanner_instance_admin_proto_init() {
+ if File_google_spanner_admin_instance_v1_spanner_instance_admin_proto != nil {
+ return
+ }
+ file_google_spanner_admin_instance_v1_common_proto_init()
+ if !protoimpl.UnsafeEnabled {
+ file_google_spanner_admin_instance_v1_spanner_instance_admin_proto_msgTypes[0].Exporter = func(v any, i int) any {
+ switch v := v.(*ReplicaInfo); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_google_spanner_admin_instance_v1_spanner_instance_admin_proto_msgTypes[1].Exporter = func(v any, i int) any {
+ switch v := v.(*InstanceConfig); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_google_spanner_admin_instance_v1_spanner_instance_admin_proto_msgTypes[2].Exporter = func(v any, i int) any {
+ switch v := v.(*AutoscalingConfig); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_google_spanner_admin_instance_v1_spanner_instance_admin_proto_msgTypes[3].Exporter = func(v any, i int) any {
+ switch v := v.(*Instance); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_google_spanner_admin_instance_v1_spanner_instance_admin_proto_msgTypes[4].Exporter = func(v any, i int) any {
+ switch v := v.(*ListInstanceConfigsRequest); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_google_spanner_admin_instance_v1_spanner_instance_admin_proto_msgTypes[5].Exporter = func(v any, i int) any {
+ switch v := v.(*ListInstanceConfigsResponse); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_google_spanner_admin_instance_v1_spanner_instance_admin_proto_msgTypes[6].Exporter = func(v any, i int) any {
+ switch v := v.(*GetInstanceConfigRequest); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_google_spanner_admin_instance_v1_spanner_instance_admin_proto_msgTypes[7].Exporter = func(v any, i int) any {
+ switch v := v.(*CreateInstanceConfigRequest); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_google_spanner_admin_instance_v1_spanner_instance_admin_proto_msgTypes[8].Exporter = func(v any, i int) any {
+ switch v := v.(*UpdateInstanceConfigRequest); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_google_spanner_admin_instance_v1_spanner_instance_admin_proto_msgTypes[9].Exporter = func(v any, i int) any {
+ switch v := v.(*DeleteInstanceConfigRequest); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_google_spanner_admin_instance_v1_spanner_instance_admin_proto_msgTypes[10].Exporter = func(v any, i int) any {
+ switch v := v.(*ListInstanceConfigOperationsRequest); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_google_spanner_admin_instance_v1_spanner_instance_admin_proto_msgTypes[11].Exporter = func(v any, i int) any {
+ switch v := v.(*ListInstanceConfigOperationsResponse); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_google_spanner_admin_instance_v1_spanner_instance_admin_proto_msgTypes[12].Exporter = func(v any, i int) any {
+ switch v := v.(*GetInstanceRequest); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_google_spanner_admin_instance_v1_spanner_instance_admin_proto_msgTypes[13].Exporter = func(v any, i int) any {
+ switch v := v.(*CreateInstanceRequest); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_google_spanner_admin_instance_v1_spanner_instance_admin_proto_msgTypes[14].Exporter = func(v any, i int) any {
+ switch v := v.(*ListInstancesRequest); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_google_spanner_admin_instance_v1_spanner_instance_admin_proto_msgTypes[15].Exporter = func(v any, i int) any {
+ switch v := v.(*ListInstancesResponse); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_google_spanner_admin_instance_v1_spanner_instance_admin_proto_msgTypes[16].Exporter = func(v any, i int) any {
+ switch v := v.(*UpdateInstanceRequest); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_google_spanner_admin_instance_v1_spanner_instance_admin_proto_msgTypes[17].Exporter = func(v any, i int) any {
+ switch v := v.(*DeleteInstanceRequest); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_google_spanner_admin_instance_v1_spanner_instance_admin_proto_msgTypes[18].Exporter = func(v any, i int) any {
+ switch v := v.(*CreateInstanceMetadata); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_google_spanner_admin_instance_v1_spanner_instance_admin_proto_msgTypes[19].Exporter = func(v any, i int) any {
+ switch v := v.(*UpdateInstanceMetadata); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_google_spanner_admin_instance_v1_spanner_instance_admin_proto_msgTypes[20].Exporter = func(v any, i int) any {
+ switch v := v.(*CreateInstanceConfigMetadata); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_google_spanner_admin_instance_v1_spanner_instance_admin_proto_msgTypes[21].Exporter = func(v any, i int) any {
+ switch v := v.(*UpdateInstanceConfigMetadata); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_google_spanner_admin_instance_v1_spanner_instance_admin_proto_msgTypes[22].Exporter = func(v any, i int) any {
+ switch v := v.(*InstancePartition); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_google_spanner_admin_instance_v1_spanner_instance_admin_proto_msgTypes[23].Exporter = func(v any, i int) any {
+ switch v := v.(*CreateInstancePartitionMetadata); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_google_spanner_admin_instance_v1_spanner_instance_admin_proto_msgTypes[24].Exporter = func(v any, i int) any {
+ switch v := v.(*CreateInstancePartitionRequest); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_google_spanner_admin_instance_v1_spanner_instance_admin_proto_msgTypes[25].Exporter = func(v any, i int) any {
+ switch v := v.(*DeleteInstancePartitionRequest); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_google_spanner_admin_instance_v1_spanner_instance_admin_proto_msgTypes[26].Exporter = func(v any, i int) any {
+ switch v := v.(*GetInstancePartitionRequest); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_google_spanner_admin_instance_v1_spanner_instance_admin_proto_msgTypes[27].Exporter = func(v any, i int) any {
+ switch v := v.(*UpdateInstancePartitionRequest); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_google_spanner_admin_instance_v1_spanner_instance_admin_proto_msgTypes[28].Exporter = func(v any, i int) any {
+ switch v := v.(*UpdateInstancePartitionMetadata); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_google_spanner_admin_instance_v1_spanner_instance_admin_proto_msgTypes[29].Exporter = func(v any, i int) any {
+ switch v := v.(*ListInstancePartitionsRequest); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_google_spanner_admin_instance_v1_spanner_instance_admin_proto_msgTypes[30].Exporter = func(v any, i int) any {
+ switch v := v.(*ListInstancePartitionsResponse); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_google_spanner_admin_instance_v1_spanner_instance_admin_proto_msgTypes[31].Exporter = func(v any, i int) any {
+ switch v := v.(*ListInstancePartitionOperationsRequest); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_google_spanner_admin_instance_v1_spanner_instance_admin_proto_msgTypes[32].Exporter = func(v any, i int) any {
+ switch v := v.(*ListInstancePartitionOperationsResponse); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_google_spanner_admin_instance_v1_spanner_instance_admin_proto_msgTypes[33].Exporter = func(v any, i int) any {
+ switch v := v.(*MoveInstanceRequest); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_google_spanner_admin_instance_v1_spanner_instance_admin_proto_msgTypes[34].Exporter = func(v any, i int) any {
+ switch v := v.(*MoveInstanceResponse); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_google_spanner_admin_instance_v1_spanner_instance_admin_proto_msgTypes[35].Exporter = func(v any, i int) any {
+ switch v := v.(*MoveInstanceMetadata); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_google_spanner_admin_instance_v1_spanner_instance_admin_proto_msgTypes[37].Exporter = func(v any, i int) any {
+ switch v := v.(*AutoscalingConfig_AutoscalingLimits); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_google_spanner_admin_instance_v1_spanner_instance_admin_proto_msgTypes[38].Exporter = func(v any, i int) any {
+ switch v := v.(*AutoscalingConfig_AutoscalingTargets); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ }
+ file_google_spanner_admin_instance_v1_spanner_instance_admin_proto_msgTypes[22].OneofWrappers = []any{
+ (*InstancePartition_NodeCount)(nil),
+ (*InstancePartition_ProcessingUnits)(nil),
+ }
+ file_google_spanner_admin_instance_v1_spanner_instance_admin_proto_msgTypes[37].OneofWrappers = []any{
+ (*AutoscalingConfig_AutoscalingLimits_MinNodes)(nil),
+ (*AutoscalingConfig_AutoscalingLimits_MinProcessingUnits)(nil),
+ (*AutoscalingConfig_AutoscalingLimits_MaxNodes)(nil),
+ (*AutoscalingConfig_AutoscalingLimits_MaxProcessingUnits)(nil),
+ }
+ type x struct{}
+ out := protoimpl.TypeBuilder{
+ File: protoimpl.DescBuilder{
+ GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
+ RawDescriptor: file_google_spanner_admin_instance_v1_spanner_instance_admin_proto_rawDesc,
+ NumEnums: 6,
+ NumMessages: 40,
+ NumExtensions: 0,
+ NumServices: 1,
+ },
+ GoTypes: file_google_spanner_admin_instance_v1_spanner_instance_admin_proto_goTypes,
+ DependencyIndexes: file_google_spanner_admin_instance_v1_spanner_instance_admin_proto_depIdxs,
+ EnumInfos: file_google_spanner_admin_instance_v1_spanner_instance_admin_proto_enumTypes,
+ MessageInfos: file_google_spanner_admin_instance_v1_spanner_instance_admin_proto_msgTypes,
+ }.Build()
+ File_google_spanner_admin_instance_v1_spanner_instance_admin_proto = out.File
+ file_google_spanner_admin_instance_v1_spanner_instance_admin_proto_rawDesc = nil
+ file_google_spanner_admin_instance_v1_spanner_instance_admin_proto_goTypes = nil
+ file_google_spanner_admin_instance_v1_spanner_instance_admin_proto_depIdxs = nil
+}
+
+// Reference imports to suppress errors if they are not otherwise used.
+var _ context.Context
+var _ grpc.ClientConnInterface
+
+// This is a compile-time assertion to ensure that this generated file
+// is compatible with the grpc package it is being compiled against.
+const _ = grpc.SupportPackageIsVersion6
+
+// InstanceAdminClient is the client API for InstanceAdmin service.
+//
+// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream.
+type InstanceAdminClient interface {
+ // Lists the supported instance configurations for a given project.
+ ListInstanceConfigs(ctx context.Context, in *ListInstanceConfigsRequest, opts ...grpc.CallOption) (*ListInstanceConfigsResponse, error)
+ // Gets information about a particular instance configuration.
+ GetInstanceConfig(ctx context.Context, in *GetInstanceConfigRequest, opts ...grpc.CallOption) (*InstanceConfig, error)
+ // Creates an instance configuration and begins preparing it to be used. The
+ // returned [long-running operation][google.longrunning.Operation]
+ // can be used to track the progress of preparing the new
+ // instance configuration. The instance configuration name is assigned by the
+ // caller. If the named instance configuration already exists,
+ // `CreateInstanceConfig` returns `ALREADY_EXISTS`.
+ //
+ // Immediately after the request returns:
+ //
+ // - The instance configuration is readable via the API, with all requested
+ // attributes. The instance configuration's
+ // [reconciling][google.spanner.admin.instance.v1.InstanceConfig.reconciling]
+ // field is set to true. Its state is `CREATING`.
+ //
+ // While the operation is pending:
+ //
+ // - Cancelling the operation renders the instance configuration immediately
+ // unreadable via the API.
+ // - Except for deleting the creating resource, all other attempts to modify
+ // the instance configuration are rejected.
+ //
+ // Upon completion of the returned operation:
+ //
+ // - Instances can be created using the instance configuration.
+ // - The instance configuration's
+ // [reconciling][google.spanner.admin.instance.v1.InstanceConfig.reconciling]
+ // field becomes false. Its state becomes `READY`.
+ //
+ // The returned [long-running operation][google.longrunning.Operation] will
+ // have a name of the format
+ // `<instance_config_name>/operations/<operation_id>` and can be used to track
+ // creation of the instance configuration. The
+ // [metadata][google.longrunning.Operation.metadata] field type is
+ // [CreateInstanceConfigMetadata][google.spanner.admin.instance.v1.CreateInstanceConfigMetadata].
+ // The [response][google.longrunning.Operation.response] field type is
+ // [InstanceConfig][google.spanner.admin.instance.v1.InstanceConfig], if
+ // successful.
+ //
+ // Authorization requires `spanner.instanceConfigs.create` permission on
+ // the resource
+ // [parent][google.spanner.admin.instance.v1.CreateInstanceConfigRequest.parent].
+ CreateInstanceConfig(ctx context.Context, in *CreateInstanceConfigRequest, opts ...grpc.CallOption) (*longrunningpb.Operation, error)
+ // Updates an instance configuration. The returned
+ // [long-running operation][google.longrunning.Operation] can be used to track
+ // the progress of updating the instance. If the named instance configuration
+ // does not exist, returns `NOT_FOUND`.
+ //
+ // Only user-managed configurations can be updated.
+ //
+ // Immediately after the request returns:
+ //
+ // - The instance configuration's
+ // [reconciling][google.spanner.admin.instance.v1.InstanceConfig.reconciling]
+ // field is set to true.
+ //
+ // While the operation is pending:
+ //
+ // - Cancelling the operation sets its metadata's
+ // [cancel_time][google.spanner.admin.instance.v1.UpdateInstanceConfigMetadata.cancel_time].
+ // The operation is guaranteed to succeed at undoing all changes, after
+ // which point it terminates with a `CANCELLED` status.
+ // - All other attempts to modify the instance configuration are rejected.
+ // - Reading the instance configuration via the API continues to give the
+ // pre-request values.
+ //
+ // Upon completion of the returned operation:
+ //
+ // - Creating instances using the instance configuration uses the new
+ // values.
+ // - The new values of the instance configuration are readable via the API.
+ // - The instance configuration's
+ // [reconciling][google.spanner.admin.instance.v1.InstanceConfig.reconciling]
+ // field becomes false.
+ //
+ // The returned [long-running operation][google.longrunning.Operation] will
+ // have a name of the format
+ // `<instance_config_name>/operations/<operation_id>` and can be used to track
+ // the instance configuration modification. The
+ // [metadata][google.longrunning.Operation.metadata] field type is
+ // [UpdateInstanceConfigMetadata][google.spanner.admin.instance.v1.UpdateInstanceConfigMetadata].
+ // The [response][google.longrunning.Operation.response] field type is
+ // [InstanceConfig][google.spanner.admin.instance.v1.InstanceConfig], if
+ // successful.
+ //
+ // Authorization requires `spanner.instanceConfigs.update` permission on
+ // the resource [name][google.spanner.admin.instance.v1.InstanceConfig.name].
+ UpdateInstanceConfig(ctx context.Context, in *UpdateInstanceConfigRequest, opts ...grpc.CallOption) (*longrunningpb.Operation, error)
+ // Deletes the instance configuration. Deletion is only allowed when no
+ // instances are using the configuration. If any instances are using
+ // the configuration, returns `FAILED_PRECONDITION`.
+ //
+ // Only user-managed configurations can be deleted.
+ //
+ // Authorization requires `spanner.instanceConfigs.delete` permission on
+ // the resource [name][google.spanner.admin.instance.v1.InstanceConfig.name].
+ DeleteInstanceConfig(ctx context.Context, in *DeleteInstanceConfigRequest, opts ...grpc.CallOption) (*emptypb.Empty, error)
+ // Lists the user-managed instance configuration [long-running
+ // operations][google.longrunning.Operation] in the given project. An instance
+ // configuration operation has a name of the form
+ // `projects/<project>/instanceConfigs/<instance_config>/operations/<operation>`.
+ // The long-running operation
+ // [metadata][google.longrunning.Operation.metadata] field type
+ // `metadata.type_url` describes the type of the metadata. Operations returned
+ // include those that have completed/failed/canceled within the last 7 days,
+ // and pending operations. Operations returned are ordered by
+ // `operation.metadata.value.start_time` in descending order starting
+ // from the most recently started operation.
+ ListInstanceConfigOperations(ctx context.Context, in *ListInstanceConfigOperationsRequest, opts ...grpc.CallOption) (*ListInstanceConfigOperationsResponse, error)
+ // Lists all instances in the given project.
+ ListInstances(ctx context.Context, in *ListInstancesRequest, opts ...grpc.CallOption) (*ListInstancesResponse, error)
+ // Lists all instance partitions for the given instance.
+ ListInstancePartitions(ctx context.Context, in *ListInstancePartitionsRequest, opts ...grpc.CallOption) (*ListInstancePartitionsResponse, error)
+ // Gets information about a particular instance.
+ GetInstance(ctx context.Context, in *GetInstanceRequest, opts ...grpc.CallOption) (*Instance, error)
+ // Creates an instance and begins preparing it to begin serving. The
+ // returned [long-running operation][google.longrunning.Operation]
+ // can be used to track the progress of preparing the new
+ // instance. The instance name is assigned by the caller. If the
+ // named instance already exists, `CreateInstance` returns
+ // `ALREADY_EXISTS`.
+ //
+ // Immediately upon completion of this request:
+ //
+ // - The instance is readable via the API, with all requested attributes
+ // but no allocated resources. Its state is `CREATING`.
+ //
+ // Until completion of the returned operation:
+ //
+ // - Cancelling the operation renders the instance immediately unreadable
+ // via the API.
+ // - The instance can be deleted.
+ // - All other attempts to modify the instance are rejected.
+ //
+ // Upon completion of the returned operation:
+ //
+ // - Billing for all successfully-allocated resources begins (some types
+ // may have lower than the requested levels).
+ // - Databases can be created in the instance.
+ // - The instance's allocated resource levels are readable via the API.
+ // - The instance's state becomes `READY`.
+ //
+ // The returned [long-running operation][google.longrunning.Operation] will
+ // have a name of the format `<instance_name>/operations/<operation_id>` and
+ // can be used to track creation of the instance. The
+ // [metadata][google.longrunning.Operation.metadata] field type is
+ // [CreateInstanceMetadata][google.spanner.admin.instance.v1.CreateInstanceMetadata].
+ // The [response][google.longrunning.Operation.response] field type is
+ // [Instance][google.spanner.admin.instance.v1.Instance], if successful.
+ CreateInstance(ctx context.Context, in *CreateInstanceRequest, opts ...grpc.CallOption) (*longrunningpb.Operation, error)
+ // Updates an instance, and begins allocating or releasing resources
+ // as requested. The returned [long-running
+ // operation][google.longrunning.Operation] can be used to track the
+ // progress of updating the instance. If the named instance does not
+ // exist, returns `NOT_FOUND`.
+ //
+ // Immediately upon completion of this request:
+ //
+ // - For resource types for which a decrease in the instance's allocation
+ // has been requested, billing is based on the newly-requested level.
+ //
+ // Until completion of the returned operation:
+ //
+ // - Cancelling the operation sets its metadata's
+ // [cancel_time][google.spanner.admin.instance.v1.UpdateInstanceMetadata.cancel_time],
+ // and begins restoring resources to their pre-request values. The
+ // operation is guaranteed to succeed at undoing all resource changes,
+ // after which point it terminates with a `CANCELLED` status.
+ // - All other attempts to modify the instance are rejected.
+ // - Reading the instance via the API continues to give the pre-request
+ // resource levels.
+ //
+ // Upon completion of the returned operation:
+ //
+ // - Billing begins for all successfully-allocated resources (some types
+ // may have lower than the requested levels).
+ // - All newly-reserved resources are available for serving the instance's
+ // tables.
+ // - The instance's new resource levels are readable via the API.
+ //
+ // The returned [long-running operation][google.longrunning.Operation] will
+ // have a name of the format `<instance_name>/operations/<operation_id>` and
+ // can be used to track the instance modification. The
+ // [metadata][google.longrunning.Operation.metadata] field type is
+ // [UpdateInstanceMetadata][google.spanner.admin.instance.v1.UpdateInstanceMetadata].
+ // The [response][google.longrunning.Operation.response] field type is
+ // [Instance][google.spanner.admin.instance.v1.Instance], if successful.
+ //
+ // Authorization requires `spanner.instances.update` permission on
+ // the resource [name][google.spanner.admin.instance.v1.Instance.name].
+ UpdateInstance(ctx context.Context, in *UpdateInstanceRequest, opts ...grpc.CallOption) (*longrunningpb.Operation, error)
+ // Deletes an instance.
+ //
+ // Immediately upon completion of the request:
+ //
+ // - Billing ceases for all of the instance's reserved resources.
+ //
+ // Soon afterward:
+ //
+ // - The instance and *all of its databases* immediately and
+ // irrevocably disappear from the API. All data in the databases
+ // is permanently deleted.
+ DeleteInstance(ctx context.Context, in *DeleteInstanceRequest, opts ...grpc.CallOption) (*emptypb.Empty, error)
+ // Sets the access control policy on an instance resource. Replaces any
+ // existing policy.
+ //
+ // Authorization requires `spanner.instances.setIamPolicy` on
+ // [resource][google.iam.v1.SetIamPolicyRequest.resource].
+ SetIamPolicy(ctx context.Context, in *iampb.SetIamPolicyRequest, opts ...grpc.CallOption) (*iampb.Policy, error)
+ // Gets the access control policy for an instance resource. Returns an empty
+ // policy if an instance exists but does not have a policy set.
+ //
+ // Authorization requires `spanner.instances.getIamPolicy` on
+ // [resource][google.iam.v1.GetIamPolicyRequest.resource].
+ GetIamPolicy(ctx context.Context, in *iampb.GetIamPolicyRequest, opts ...grpc.CallOption) (*iampb.Policy, error)
+ // Returns permissions that the caller has on the specified instance resource.
+ //
+ // Attempting this RPC on a non-existent Cloud Spanner instance resource will
+ // result in a NOT_FOUND error if the user has `spanner.instances.list`
+ // permission on the containing Google Cloud Project. Otherwise returns an
+ // empty set of permissions.
+ TestIamPermissions(ctx context.Context, in *iampb.TestIamPermissionsRequest, opts ...grpc.CallOption) (*iampb.TestIamPermissionsResponse, error)
+ // Gets information about a particular instance partition.
+ GetInstancePartition(ctx context.Context, in *GetInstancePartitionRequest, opts ...grpc.CallOption) (*InstancePartition, error)
+ // Creates an instance partition and begins preparing it to be used. The
+ // returned [long-running operation][google.longrunning.Operation]
+ // can be used to track the progress of preparing the new instance partition.
+ // The instance partition name is assigned by the caller. If the named
+ // instance partition already exists, `CreateInstancePartition` returns
+ // `ALREADY_EXISTS`.
+ //
+ // Immediately upon completion of this request:
+ //
+ // - The instance partition is readable via the API, with all requested
+ // attributes but no allocated resources. Its state is `CREATING`.
+ //
+ // Until completion of the returned operation:
+ //
+ // - Cancelling the operation renders the instance partition immediately
+ // unreadable via the API.
+ // - The instance partition can be deleted.
+ // - All other attempts to modify the instance partition are rejected.
+ //
+ // Upon completion of the returned operation:
+ //
+ // - Billing for all successfully-allocated resources begins (some types
+ // may have lower than the requested levels).
+ // - Databases can start using this instance partition.
+ // - The instance partition's allocated resource levels are readable via the
+ // API.
+ // - The instance partition's state becomes `READY`.
+ //
+ // The returned [long-running operation][google.longrunning.Operation] will
+ // have a name of the format
+ // `<instance_partition_name>/operations/<operation_id>` and can be used to
+ // track creation of the instance partition. The
+ // [metadata][google.longrunning.Operation.metadata] field type is
+ // [CreateInstancePartitionMetadata][google.spanner.admin.instance.v1.CreateInstancePartitionMetadata].
+ // The [response][google.longrunning.Operation.response] field type is
+ // [InstancePartition][google.spanner.admin.instance.v1.InstancePartition], if
+ // successful.
+ CreateInstancePartition(ctx context.Context, in *CreateInstancePartitionRequest, opts ...grpc.CallOption) (*longrunningpb.Operation, error)
+ // Deletes an existing instance partition. Requires that the
+ // instance partition is not used by any database or backup and is not the
+ // default instance partition of an instance.
+ //
+ // Authorization requires `spanner.instancePartitions.delete` permission on
+ // the resource
+ // [name][google.spanner.admin.instance.v1.InstancePartition.name].
+ DeleteInstancePartition(ctx context.Context, in *DeleteInstancePartitionRequest, opts ...grpc.CallOption) (*emptypb.Empty, error)
+ // Updates an instance partition, and begins allocating or releasing resources
+ // as requested. The returned [long-running
+ // operation][google.longrunning.Operation] can be used to track the
+ // progress of updating the instance partition. If the named instance
+ // partition does not exist, returns `NOT_FOUND`.
+ //
+ // Immediately upon completion of this request:
+ //
+ // - For resource types for which a decrease in the instance partition's
+ // allocation has been requested, billing is based on the newly-requested
+ // level.
+ //
+ // Until completion of the returned operation:
+ //
+ // - Cancelling the operation sets its metadata's
+ // [cancel_time][google.spanner.admin.instance.v1.UpdateInstancePartitionMetadata.cancel_time],
+ // and begins restoring resources to their pre-request values. The
+ // operation is guaranteed to succeed at undoing all resource changes,
+ // after which point it terminates with a `CANCELLED` status.
+ // - All other attempts to modify the instance partition are rejected.
+ // - Reading the instance partition via the API continues to give the
+ // pre-request resource levels.
+ //
+ // Upon completion of the returned operation:
+ //
+ // - Billing begins for all successfully-allocated resources (some types
+ // may have lower than the requested levels).
+ // - All newly-reserved resources are available for serving the instance
+ // partition's tables.
+ // - The instance partition's new resource levels are readable via the API.
+ //
+ // The returned [long-running operation][google.longrunning.Operation] will
+ // have a name of the format
+ // `<instance_partition_name>/operations/<operation_id>` and can be used to
+ // track the instance partition modification. The
+ // [metadata][google.longrunning.Operation.metadata] field type is
+ // [UpdateInstancePartitionMetadata][google.spanner.admin.instance.v1.UpdateInstancePartitionMetadata].
+ // The [response][google.longrunning.Operation.response] field type is
+ // [InstancePartition][google.spanner.admin.instance.v1.InstancePartition], if
+ // successful.
+ //
+ // Authorization requires `spanner.instancePartitions.update` permission on
+ // the resource
+ // [name][google.spanner.admin.instance.v1.InstancePartition.name].
+ UpdateInstancePartition(ctx context.Context, in *UpdateInstancePartitionRequest, opts ...grpc.CallOption) (*longrunningpb.Operation, error)
+ // Lists instance partition [long-running
+ // operations][google.longrunning.Operation] in the given instance.
+ // An instance partition operation has a name of the form
+ // `projects/<project>/instances/<instance>/instancePartitions/<instance_partition>/operations/<operation>`.
+ // The long-running operation
+ // [metadata][google.longrunning.Operation.metadata] field type
+ // `metadata.type_url` describes the type of the metadata. Operations returned
+ // include those that have completed/failed/canceled within the last 7 days,
+ // and pending operations. Operations returned are ordered by
+ // `operation.metadata.value.start_time` in descending order starting from the
+ // most recently started operation.
+ //
+ // Authorization requires `spanner.instancePartitionOperations.list`
+ // permission on the resource
+ // [parent][google.spanner.admin.instance.v1.ListInstancePartitionOperationsRequest.parent].
+ ListInstancePartitionOperations(ctx context.Context, in *ListInstancePartitionOperationsRequest, opts ...grpc.CallOption) (*ListInstancePartitionOperationsResponse, error)
+ // Moves an instance to the target instance configuration. You can use the
+ // returned [long-running operation][google.longrunning.Operation] to track
+ // the progress of moving the instance.
+ //
+ // `MoveInstance` returns `FAILED_PRECONDITION` if the instance meets any of
+ // the following criteria:
+ //
+ // - Is undergoing a move to a different instance configuration
+ // - Has backups
+ // - Has an ongoing update
+ // - Contains any CMEK-enabled databases
+ // - Is a free trial instance
+ //
+ // While the operation is pending:
+ //
+ // - All other attempts to modify the instance, including changes to its
+ // compute capacity, are rejected.
+ //
+ // - The following database and backup admin operations are rejected:
+ //
+ // - `DatabaseAdmin.CreateDatabase`
+ //
+ // - `DatabaseAdmin.UpdateDatabaseDdl` (disabled if default_leader is
+ // specified in the request.)
+ //
+ // - `DatabaseAdmin.RestoreDatabase`
+ //
+ // - `DatabaseAdmin.CreateBackup`
+ //
+ // - `DatabaseAdmin.CopyBackup`
+ //
+ // - Both the source and target instance configurations are subject to
+ // hourly compute and storage charges.
+ //
+ // - The instance might experience higher read-write latencies and a higher
+ // transaction abort rate. However, moving an instance doesn't cause any
+ // downtime.
+ //
+ // The returned [long-running operation][google.longrunning.Operation] has
+ // a name of the format
+ // `<instance_name>/operations/<operation_id>` and can be used to track
+ // the move instance operation. The
+ // [metadata][google.longrunning.Operation.metadata] field type is
+ // [MoveInstanceMetadata][google.spanner.admin.instance.v1.MoveInstanceMetadata].
+ // The [response][google.longrunning.Operation.response] field type is
+ // [Instance][google.spanner.admin.instance.v1.Instance],
+ // if successful.
+ // Cancelling the operation sets its metadata's
+ // [cancel_time][google.spanner.admin.instance.v1.MoveInstanceMetadata.cancel_time].
+ // Cancellation is not immediate because it involves moving any data
+ // previously moved to the target instance configuration back to the original
+ // instance configuration. You can use this operation to track the progress of
+ // the cancellation. Upon successful completion of the cancellation, the
+ // operation terminates with `CANCELLED` status.
+ //
+ // If not cancelled, upon completion of the returned operation:
+ //
+ // - The instance successfully moves to the target instance
+ // configuration.
+ // - You are billed for compute and storage in target instance
+ // configuration.
+ //
+ // Authorization requires the `spanner.instances.update` permission on
+ // the resource [instance][google.spanner.admin.instance.v1.Instance].
+ //
+ // For more details, see
+ // [Move an instance](https://cloud.google.com/spanner/docs/move-instance).
+ MoveInstance(ctx context.Context, in *MoveInstanceRequest, opts ...grpc.CallOption) (*longrunningpb.Operation, error)
+}
+
+type instanceAdminClient struct {
+ cc grpc.ClientConnInterface
+}
+
+func NewInstanceAdminClient(cc grpc.ClientConnInterface) InstanceAdminClient {
+ return &instanceAdminClient{cc}
+}
+
+func (c *instanceAdminClient) ListInstanceConfigs(ctx context.Context, in *ListInstanceConfigsRequest, opts ...grpc.CallOption) (*ListInstanceConfigsResponse, error) {
+ out := new(ListInstanceConfigsResponse)
+ err := c.cc.Invoke(ctx, "/google.spanner.admin.instance.v1.InstanceAdmin/ListInstanceConfigs", in, out, opts...)
+ if err != nil {
+ return nil, err
+ }
+ return out, nil
+}
+
+func (c *instanceAdminClient) GetInstanceConfig(ctx context.Context, in *GetInstanceConfigRequest, opts ...grpc.CallOption) (*InstanceConfig, error) {
+ out := new(InstanceConfig)
+ err := c.cc.Invoke(ctx, "/google.spanner.admin.instance.v1.InstanceAdmin/GetInstanceConfig", in, out, opts...)
+ if err != nil {
+ return nil, err
+ }
+ return out, nil
+}
+
+func (c *instanceAdminClient) CreateInstanceConfig(ctx context.Context, in *CreateInstanceConfigRequest, opts ...grpc.CallOption) (*longrunningpb.Operation, error) {
+ out := new(longrunningpb.Operation)
+ err := c.cc.Invoke(ctx, "/google.spanner.admin.instance.v1.InstanceAdmin/CreateInstanceConfig", in, out, opts...)
+ if err != nil {
+ return nil, err
+ }
+ return out, nil
+}
+
+func (c *instanceAdminClient) UpdateInstanceConfig(ctx context.Context, in *UpdateInstanceConfigRequest, opts ...grpc.CallOption) (*longrunningpb.Operation, error) {
+ out := new(longrunningpb.Operation)
+ err := c.cc.Invoke(ctx, "/google.spanner.admin.instance.v1.InstanceAdmin/UpdateInstanceConfig", in, out, opts...)
+ if err != nil {
+ return nil, err
+ }
+ return out, nil
+}
+
+func (c *instanceAdminClient) DeleteInstanceConfig(ctx context.Context, in *DeleteInstanceConfigRequest, opts ...grpc.CallOption) (*emptypb.Empty, error) {
+ out := new(emptypb.Empty)
+ err := c.cc.Invoke(ctx, "/google.spanner.admin.instance.v1.InstanceAdmin/DeleteInstanceConfig", in, out, opts...)
+ if err != nil {
+ return nil, err
+ }
+ return out, nil
+}
+
+func (c *instanceAdminClient) ListInstanceConfigOperations(ctx context.Context, in *ListInstanceConfigOperationsRequest, opts ...grpc.CallOption) (*ListInstanceConfigOperationsResponse, error) {
+ out := new(ListInstanceConfigOperationsResponse)
+ err := c.cc.Invoke(ctx, "/google.spanner.admin.instance.v1.InstanceAdmin/ListInstanceConfigOperations", in, out, opts...)
+ if err != nil {
+ return nil, err
+ }
+ return out, nil
+}
+
+func (c *instanceAdminClient) ListInstances(ctx context.Context, in *ListInstancesRequest, opts ...grpc.CallOption) (*ListInstancesResponse, error) {
+ out := new(ListInstancesResponse)
+ err := c.cc.Invoke(ctx, "/google.spanner.admin.instance.v1.InstanceAdmin/ListInstances", in, out, opts...)
+ if err != nil {
+ return nil, err
+ }
+ return out, nil
+}
+
+func (c *instanceAdminClient) ListInstancePartitions(ctx context.Context, in *ListInstancePartitionsRequest, opts ...grpc.CallOption) (*ListInstancePartitionsResponse, error) {
+ out := new(ListInstancePartitionsResponse)
+ err := c.cc.Invoke(ctx, "/google.spanner.admin.instance.v1.InstanceAdmin/ListInstancePartitions", in, out, opts...)
+ if err != nil {
+ return nil, err
+ }
+ return out, nil
+}
+
+func (c *instanceAdminClient) GetInstance(ctx context.Context, in *GetInstanceRequest, opts ...grpc.CallOption) (*Instance, error) {
+ out := new(Instance)
+ err := c.cc.Invoke(ctx, "/google.spanner.admin.instance.v1.InstanceAdmin/GetInstance", in, out, opts...)
+ if err != nil {
+ return nil, err
+ }
+ return out, nil
+}
+
+func (c *instanceAdminClient) CreateInstance(ctx context.Context, in *CreateInstanceRequest, opts ...grpc.CallOption) (*longrunningpb.Operation, error) {
+ out := new(longrunningpb.Operation)
+ err := c.cc.Invoke(ctx, "/google.spanner.admin.instance.v1.InstanceAdmin/CreateInstance", in, out, opts...)
+ if err != nil {
+ return nil, err
+ }
+ return out, nil
+}
+
+func (c *instanceAdminClient) UpdateInstance(ctx context.Context, in *UpdateInstanceRequest, opts ...grpc.CallOption) (*longrunningpb.Operation, error) {
+ out := new(longrunningpb.Operation)
+ err := c.cc.Invoke(ctx, "/google.spanner.admin.instance.v1.InstanceAdmin/UpdateInstance", in, out, opts...)
+ if err != nil {
+ return nil, err
+ }
+ return out, nil
+}
+
+func (c *instanceAdminClient) DeleteInstance(ctx context.Context, in *DeleteInstanceRequest, opts ...grpc.CallOption) (*emptypb.Empty, error) {
+ out := new(emptypb.Empty)
+ err := c.cc.Invoke(ctx, "/google.spanner.admin.instance.v1.InstanceAdmin/DeleteInstance", in, out, opts...)
+ if err != nil {
+ return nil, err
+ }
+ return out, nil
+}
+
+func (c *instanceAdminClient) SetIamPolicy(ctx context.Context, in *iampb.SetIamPolicyRequest, opts ...grpc.CallOption) (*iampb.Policy, error) {
+ out := new(iampb.Policy)
+ err := c.cc.Invoke(ctx, "/google.spanner.admin.instance.v1.InstanceAdmin/SetIamPolicy", in, out, opts...)
+ if err != nil {
+ return nil, err
+ }
+ return out, nil
+}
+
+func (c *instanceAdminClient) GetIamPolicy(ctx context.Context, in *iampb.GetIamPolicyRequest, opts ...grpc.CallOption) (*iampb.Policy, error) {
+ out := new(iampb.Policy)
+ err := c.cc.Invoke(ctx, "/google.spanner.admin.instance.v1.InstanceAdmin/GetIamPolicy", in, out, opts...)
+ if err != nil {
+ return nil, err
+ }
+ return out, nil
+}
+
+func (c *instanceAdminClient) TestIamPermissions(ctx context.Context, in *iampb.TestIamPermissionsRequest, opts ...grpc.CallOption) (*iampb.TestIamPermissionsResponse, error) {
+ out := new(iampb.TestIamPermissionsResponse)
+ err := c.cc.Invoke(ctx, "/google.spanner.admin.instance.v1.InstanceAdmin/TestIamPermissions", in, out, opts...)
+ if err != nil {
+ return nil, err
+ }
+ return out, nil
+}
+
+func (c *instanceAdminClient) GetInstancePartition(ctx context.Context, in *GetInstancePartitionRequest, opts ...grpc.CallOption) (*InstancePartition, error) {
+ out := new(InstancePartition)
+ err := c.cc.Invoke(ctx, "/google.spanner.admin.instance.v1.InstanceAdmin/GetInstancePartition", in, out, opts...)
+ if err != nil {
+ return nil, err
+ }
+ return out, nil
+}
+
+func (c *instanceAdminClient) CreateInstancePartition(ctx context.Context, in *CreateInstancePartitionRequest, opts ...grpc.CallOption) (*longrunningpb.Operation, error) {
+ out := new(longrunningpb.Operation)
+ err := c.cc.Invoke(ctx, "/google.spanner.admin.instance.v1.InstanceAdmin/CreateInstancePartition", in, out, opts...)
+ if err != nil {
+ return nil, err
+ }
+ return out, nil
+}
+
+func (c *instanceAdminClient) DeleteInstancePartition(ctx context.Context, in *DeleteInstancePartitionRequest, opts ...grpc.CallOption) (*emptypb.Empty, error) {
+ out := new(emptypb.Empty)
+ err := c.cc.Invoke(ctx, "/google.spanner.admin.instance.v1.InstanceAdmin/DeleteInstancePartition", in, out, opts...)
+ if err != nil {
+ return nil, err
+ }
+ return out, nil
+}
+
+func (c *instanceAdminClient) UpdateInstancePartition(ctx context.Context, in *UpdateInstancePartitionRequest, opts ...grpc.CallOption) (*longrunningpb.Operation, error) {
+ out := new(longrunningpb.Operation)
+ err := c.cc.Invoke(ctx, "/google.spanner.admin.instance.v1.InstanceAdmin/UpdateInstancePartition", in, out, opts...)
+ if err != nil {
+ return nil, err
+ }
+ return out, nil
+}
+
+func (c *instanceAdminClient) ListInstancePartitionOperations(ctx context.Context, in *ListInstancePartitionOperationsRequest, opts ...grpc.CallOption) (*ListInstancePartitionOperationsResponse, error) {
+ out := new(ListInstancePartitionOperationsResponse)
+ err := c.cc.Invoke(ctx, "/google.spanner.admin.instance.v1.InstanceAdmin/ListInstancePartitionOperations", in, out, opts...)
+ if err != nil {
+ return nil, err
+ }
+ return out, nil
+}
+
+func (c *instanceAdminClient) MoveInstance(ctx context.Context, in *MoveInstanceRequest, opts ...grpc.CallOption) (*longrunningpb.Operation, error) {
+ out := new(longrunningpb.Operation)
+ err := c.cc.Invoke(ctx, "/google.spanner.admin.instance.v1.InstanceAdmin/MoveInstance", in, out, opts...)
+ if err != nil {
+ return nil, err
+ }
+ return out, nil
+}
+
+// InstanceAdminServer is the server API for InstanceAdmin service.
+type InstanceAdminServer interface {
+ // Lists the supported instance configurations for a given project.
+ ListInstanceConfigs(context.Context, *ListInstanceConfigsRequest) (*ListInstanceConfigsResponse, error)
+ // Gets information about a particular instance configuration.
+ GetInstanceConfig(context.Context, *GetInstanceConfigRequest) (*InstanceConfig, error)
+ // Creates an instance configuration and begins preparing it to be used. The
+ // returned [long-running operation][google.longrunning.Operation]
+ // can be used to track the progress of preparing the new
+ // instance configuration. The instance configuration name is assigned by the
+ // caller. If the named instance configuration already exists,
+ // `CreateInstanceConfig` returns `ALREADY_EXISTS`.
+ //
+ // Immediately after the request returns:
+ //
+ // - The instance configuration is readable via the API, with all requested
+ // attributes. The instance configuration's
+ // [reconciling][google.spanner.admin.instance.v1.InstanceConfig.reconciling]
+ // field is set to true. Its state is `CREATING`.
+ //
+ // While the operation is pending:
+ //
+ // - Cancelling the operation renders the instance configuration immediately
+ // unreadable via the API.
+ // - Except for deleting the creating resource, all other attempts to modify
+ // the instance configuration are rejected.
+ //
+ // Upon completion of the returned operation:
+ //
+ // - Instances can be created using the instance configuration.
+ // - The instance configuration's
+ // [reconciling][google.spanner.admin.instance.v1.InstanceConfig.reconciling]
+ // field becomes false. Its state becomes `READY`.
+ //
+ // The returned [long-running operation][google.longrunning.Operation] will
+ // have a name of the format
+ // `<instance_config_name>/operations/<operation_id>` and can be used to track
+ // creation of the instance configuration. The
+ // [metadata][google.longrunning.Operation.metadata] field type is
+ // [CreateInstanceConfigMetadata][google.spanner.admin.instance.v1.CreateInstanceConfigMetadata].
+ // The [response][google.longrunning.Operation.response] field type is
+ // [InstanceConfig][google.spanner.admin.instance.v1.InstanceConfig], if
+ // successful.
+ //
+ // Authorization requires `spanner.instanceConfigs.create` permission on
+ // the resource
+ // [parent][google.spanner.admin.instance.v1.CreateInstanceConfigRequest.parent].
+ CreateInstanceConfig(context.Context, *CreateInstanceConfigRequest) (*longrunningpb.Operation, error)
+ // Updates an instance configuration. The returned
+ // [long-running operation][google.longrunning.Operation] can be used to track
+ // the progress of updating the instance. If the named instance configuration
+ // does not exist, returns `NOT_FOUND`.
+ //
+ // Only user-managed configurations can be updated.
+ //
+ // Immediately after the request returns:
+ //
+ // - The instance configuration's
+ // [reconciling][google.spanner.admin.instance.v1.InstanceConfig.reconciling]
+ // field is set to true.
+ //
+ // While the operation is pending:
+ //
+ // - Cancelling the operation sets its metadata's
+ // [cancel_time][google.spanner.admin.instance.v1.UpdateInstanceConfigMetadata.cancel_time].
+ // The operation is guaranteed to succeed at undoing all changes, after
+ // which point it terminates with a `CANCELLED` status.
+ // - All other attempts to modify the instance configuration are rejected.
+ // - Reading the instance configuration via the API continues to give the
+ // pre-request values.
+ //
+ // Upon completion of the returned operation:
+ //
+ // - Creating instances using the instance configuration uses the new
+ // values.
+ // - The new values of the instance configuration are readable via the API.
+ // - The instance configuration's
+ // [reconciling][google.spanner.admin.instance.v1.InstanceConfig.reconciling]
+ // field becomes false.
+ //
+ // The returned [long-running operation][google.longrunning.Operation] will
+ // have a name of the format
+ // `<instance_config_name>/operations/<operation_id>` and can be used to track
+ // the instance configuration modification. The
+ // [metadata][google.longrunning.Operation.metadata] field type is
+ // [UpdateInstanceConfigMetadata][google.spanner.admin.instance.v1.UpdateInstanceConfigMetadata].
+ // The [response][google.longrunning.Operation.response] field type is
+ // [InstanceConfig][google.spanner.admin.instance.v1.InstanceConfig], if
+ // successful.
+ //
+ // Authorization requires `spanner.instanceConfigs.update` permission on
+ // the resource [name][google.spanner.admin.instance.v1.InstanceConfig.name].
+ UpdateInstanceConfig(context.Context, *UpdateInstanceConfigRequest) (*longrunningpb.Operation, error)
+ // Deletes the instance configuration. Deletion is only allowed when no
+ // instances are using the configuration. If any instances are using
+ // the configuration, returns `FAILED_PRECONDITION`.
+ //
+ // Only user-managed configurations can be deleted.
+ //
+ // Authorization requires `spanner.instanceConfigs.delete` permission on
+ // the resource [name][google.spanner.admin.instance.v1.InstanceConfig.name].
+ DeleteInstanceConfig(context.Context, *DeleteInstanceConfigRequest) (*emptypb.Empty, error)
+ // Lists the user-managed instance configuration [long-running
+ // operations][google.longrunning.Operation] in the given project. An instance
+ // configuration operation has a name of the form
+ // `projects/<project>/instanceConfigs/<instance_config>/operations/<operation>`.
+ // The long-running operation
+ // [metadata][google.longrunning.Operation.metadata] field type
+ // `metadata.type_url` describes the type of the metadata. Operations returned
+ // include those that have completed/failed/canceled within the last 7 days,
+ // and pending operations. Operations returned are ordered by
+ // `operation.metadata.value.start_time` in descending order starting
+ // from the most recently started operation.
+ ListInstanceConfigOperations(context.Context, *ListInstanceConfigOperationsRequest) (*ListInstanceConfigOperationsResponse, error)
+ // Lists all instances in the given project.
+ ListInstances(context.Context, *ListInstancesRequest) (*ListInstancesResponse, error)
+ // Lists all instance partitions for the given instance.
+ ListInstancePartitions(context.Context, *ListInstancePartitionsRequest) (*ListInstancePartitionsResponse, error)
+ // Gets information about a particular instance.
+ GetInstance(context.Context, *GetInstanceRequest) (*Instance, error)
+ // Creates an instance and begins preparing it to begin serving. The
+ // returned [long-running operation][google.longrunning.Operation]
+ // can be used to track the progress of preparing the new
+ // instance. The instance name is assigned by the caller. If the
+ // named instance already exists, `CreateInstance` returns
+ // `ALREADY_EXISTS`.
+ //
+ // Immediately upon completion of this request:
+ //
+ // - The instance is readable via the API, with all requested attributes
+ // but no allocated resources. Its state is `CREATING`.
+ //
+ // Until completion of the returned operation:
+ //
+ // - Cancelling the operation renders the instance immediately unreadable
+ // via the API.
+ // - The instance can be deleted.
+ // - All other attempts to modify the instance are rejected.
+ //
+ // Upon completion of the returned operation:
+ //
+ // - Billing for all successfully-allocated resources begins (some types
+ // may have lower than the requested levels).
+ // - Databases can be created in the instance.
+ // - The instance's allocated resource levels are readable via the API.
+ // - The instance's state becomes `READY`.
+ //
+ // The returned [long-running operation][google.longrunning.Operation] will
+ // have a name of the format `<instance_name>/operations/<operation_id>` and
+ // can be used to track creation of the instance. The
+ // [metadata][google.longrunning.Operation.metadata] field type is
+ // [CreateInstanceMetadata][google.spanner.admin.instance.v1.CreateInstanceMetadata].
+ // The [response][google.longrunning.Operation.response] field type is
+ // [Instance][google.spanner.admin.instance.v1.Instance], if successful.
+ CreateInstance(context.Context, *CreateInstanceRequest) (*longrunningpb.Operation, error)
+ // Updates an instance, and begins allocating or releasing resources
+ // as requested. The returned [long-running
+ // operation][google.longrunning.Operation] can be used to track the
+ // progress of updating the instance. If the named instance does not
+ // exist, returns `NOT_FOUND`.
+ //
+ // Immediately upon completion of this request:
+ //
+ // - For resource types for which a decrease in the instance's allocation
+ // has been requested, billing is based on the newly-requested level.
+ //
+ // Until completion of the returned operation:
+ //
+ // - Cancelling the operation sets its metadata's
+ // [cancel_time][google.spanner.admin.instance.v1.UpdateInstanceMetadata.cancel_time],
+ // and begins restoring resources to their pre-request values. The
+ // operation is guaranteed to succeed at undoing all resource changes,
+ // after which point it terminates with a `CANCELLED` status.
+ // - All other attempts to modify the instance are rejected.
+ // - Reading the instance via the API continues to give the pre-request
+ // resource levels.
+ //
+ // Upon completion of the returned operation:
+ //
+ // - Billing begins for all successfully-allocated resources (some types
+ // may have lower than the requested levels).
+ // - All newly-reserved resources are available for serving the instance's
+ // tables.
+ // - The instance's new resource levels are readable via the API.
+ //
+ // The returned [long-running operation][google.longrunning.Operation] will
+ // have a name of the format `<instance_name>/operations/<operation_id>` and
+ // can be used to track the instance modification. The
+ // [metadata][google.longrunning.Operation.metadata] field type is
+ // [UpdateInstanceMetadata][google.spanner.admin.instance.v1.UpdateInstanceMetadata].
+ // The [response][google.longrunning.Operation.response] field type is
+ // [Instance][google.spanner.admin.instance.v1.Instance], if successful.
+ //
+ // Authorization requires `spanner.instances.update` permission on
+ // the resource [name][google.spanner.admin.instance.v1.Instance.name].
+ UpdateInstance(context.Context, *UpdateInstanceRequest) (*longrunningpb.Operation, error)
+ // Deletes an instance.
+ //
+ // Immediately upon completion of the request:
+ //
+ // - Billing ceases for all of the instance's reserved resources.
+ //
+ // Soon afterward:
+ //
+ // - The instance and *all of its databases* immediately and
+ // irrevocably disappear from the API. All data in the databases
+ // is permanently deleted.
+ DeleteInstance(context.Context, *DeleteInstanceRequest) (*emptypb.Empty, error)
+ // Sets the access control policy on an instance resource. Replaces any
+ // existing policy.
+ //
+ // Authorization requires `spanner.instances.setIamPolicy` on
+ // [resource][google.iam.v1.SetIamPolicyRequest.resource].
+ SetIamPolicy(context.Context, *iampb.SetIamPolicyRequest) (*iampb.Policy, error)
+ // Gets the access control policy for an instance resource. Returns an empty
+ // policy if an instance exists but does not have a policy set.
+ //
+ // Authorization requires `spanner.instances.getIamPolicy` on
+ // [resource][google.iam.v1.GetIamPolicyRequest.resource].
+ GetIamPolicy(context.Context, *iampb.GetIamPolicyRequest) (*iampb.Policy, error)
+ // Returns permissions that the caller has on the specified instance resource.
+ //
+ // Attempting this RPC on a non-existent Cloud Spanner instance resource will
+ // result in a NOT_FOUND error if the user has `spanner.instances.list`
+ // permission on the containing Google Cloud Project. Otherwise returns an
+ // empty set of permissions.
+ TestIamPermissions(context.Context, *iampb.TestIamPermissionsRequest) (*iampb.TestIamPermissionsResponse, error)
+ // Gets information about a particular instance partition.
+ GetInstancePartition(context.Context, *GetInstancePartitionRequest) (*InstancePartition, error)
+ // Creates an instance partition and begins preparing it to be used. The
+ // returned [long-running operation][google.longrunning.Operation]
+ // can be used to track the progress of preparing the new instance partition.
+ // The instance partition name is assigned by the caller. If the named
+ // instance partition already exists, `CreateInstancePartition` returns
+ // `ALREADY_EXISTS`.
+ //
+ // Immediately upon completion of this request:
+ //
+ // - The instance partition is readable via the API, with all requested
+ // attributes but no allocated resources. Its state is `CREATING`.
+ //
+ // Until completion of the returned operation:
+ //
+ // - Cancelling the operation renders the instance partition immediately
+ // unreadable via the API.
+ // - The instance partition can be deleted.
+ // - All other attempts to modify the instance partition are rejected.
+ //
+ // Upon completion of the returned operation:
+ //
+ // - Billing for all successfully-allocated resources begins (some types
+ // may have lower than the requested levels).
+ // - Databases can start using this instance partition.
+ // - The instance partition's allocated resource levels are readable via the
+ // API.
+ // - The instance partition's state becomes `READY`.
+ //
+ // The returned [long-running operation][google.longrunning.Operation] will
+ // have a name of the format
+ // `<instance_partition_name>/operations/<operation_id>` and can be used to
+ // track creation of the instance partition. The
+ // [metadata][google.longrunning.Operation.metadata] field type is
+ // [CreateInstancePartitionMetadata][google.spanner.admin.instance.v1.CreateInstancePartitionMetadata].
+ // The [response][google.longrunning.Operation.response] field type is
+ // [InstancePartition][google.spanner.admin.instance.v1.InstancePartition], if
+ // successful.
+ CreateInstancePartition(context.Context, *CreateInstancePartitionRequest) (*longrunningpb.Operation, error)
+ // Deletes an existing instance partition. Requires that the
+ // instance partition is not used by any database or backup and is not the
+ // default instance partition of an instance.
+ //
+ // Authorization requires `spanner.instancePartitions.delete` permission on
+ // the resource
+ // [name][google.spanner.admin.instance.v1.InstancePartition.name].
+ DeleteInstancePartition(context.Context, *DeleteInstancePartitionRequest) (*emptypb.Empty, error)
+ // Updates an instance partition, and begins allocating or releasing resources
+ // as requested. The returned [long-running
+ // operation][google.longrunning.Operation] can be used to track the
+ // progress of updating the instance partition. If the named instance
+ // partition does not exist, returns `NOT_FOUND`.
+ //
+ // Immediately upon completion of this request:
+ //
+ // - For resource types for which a decrease in the instance partition's
+ // allocation has been requested, billing is based on the newly-requested
+ // level.
+ //
+ // Until completion of the returned operation:
+ //
+ // - Cancelling the operation sets its metadata's
+ // [cancel_time][google.spanner.admin.instance.v1.UpdateInstancePartitionMetadata.cancel_time],
+ // and begins restoring resources to their pre-request values. The
+ // operation is guaranteed to succeed at undoing all resource changes,
+ // after which point it terminates with a `CANCELLED` status.
+ // - All other attempts to modify the instance partition are rejected.
+ // - Reading the instance partition via the API continues to give the
+ // pre-request resource levels.
+ //
+ // Upon completion of the returned operation:
+ //
+ // - Billing begins for all successfully-allocated resources (some types
+ // may have lower than the requested levels).
+ // - All newly-reserved resources are available for serving the instance
+ // partition's tables.
+ // - The instance partition's new resource levels are readable via the API.
+ //
+ // The returned [long-running operation][google.longrunning.Operation] will
+ // have a name of the format
+ // `<instance_partition_name>/operations/<operation_id>` and can be used to
+ // track the instance partition modification. The
+ // [metadata][google.longrunning.Operation.metadata] field type is
+ // [UpdateInstancePartitionMetadata][google.spanner.admin.instance.v1.UpdateInstancePartitionMetadata].
+ // The [response][google.longrunning.Operation.response] field type is
+ // [InstancePartition][google.spanner.admin.instance.v1.InstancePartition], if
+ // successful.
+ //
+ // Authorization requires `spanner.instancePartitions.update` permission on
+ // the resource
+ // [name][google.spanner.admin.instance.v1.InstancePartition.name].
+ UpdateInstancePartition(context.Context, *UpdateInstancePartitionRequest) (*longrunningpb.Operation, error)
+ // Lists instance partition [long-running
+ // operations][google.longrunning.Operation] in the given instance.
+ // An instance partition operation has a name of the form
+ // `projects/<project>/instances/<instance>/instancePartitions/<instance_partition>/operations/<operation>`.
+ // The long-running operation
+ // [metadata][google.longrunning.Operation.metadata] field type
+ // `metadata.type_url` describes the type of the metadata. Operations returned
+ // include those that have completed/failed/canceled within the last 7 days,
+ // and pending operations. Operations returned are ordered by
+ // `operation.metadata.value.start_time` in descending order starting from the
+ // most recently started operation.
+ //
+ // Authorization requires `spanner.instancePartitionOperations.list`
+ // permission on the resource
+ // [parent][google.spanner.admin.instance.v1.ListInstancePartitionOperationsRequest.parent].
+ ListInstancePartitionOperations(context.Context, *ListInstancePartitionOperationsRequest) (*ListInstancePartitionOperationsResponse, error)
+ // Moves an instance to the target instance configuration. You can use the
+ // returned [long-running operation][google.longrunning.Operation] to track
+ // the progress of moving the instance.
+ //
+ // `MoveInstance` returns `FAILED_PRECONDITION` if the instance meets any of
+ // the following criteria:
+ //
+ // - Is undergoing a move to a different instance configuration
+ // - Has backups
+ // - Has an ongoing update
+ // - Contains any CMEK-enabled databases
+ // - Is a free trial instance
+ //
+ // While the operation is pending:
+ //
+ // - All other attempts to modify the instance, including changes to its
+ // compute capacity, are rejected.
+ //
+ // - The following database and backup admin operations are rejected:
+ //
+ // - `DatabaseAdmin.CreateDatabase`
+ //
+ // - `DatabaseAdmin.UpdateDatabaseDdl` (disabled if default_leader is
+ // specified in the request.)
+ //
+ // - `DatabaseAdmin.RestoreDatabase`
+ //
+ // - `DatabaseAdmin.CreateBackup`
+ //
+ // - `DatabaseAdmin.CopyBackup`
+ //
+ // - Both the source and target instance configurations are subject to
+ // hourly compute and storage charges.
+ //
+ // - The instance might experience higher read-write latencies and a higher
+ // transaction abort rate. However, moving an instance doesn't cause any
+ // downtime.
+ //
+ // The returned [long-running operation][google.longrunning.Operation] has
+ // a name of the format
+ // `<instance_name>/operations/<operation_id>` and can be used to track
+ // the move instance operation. The
+ // [metadata][google.longrunning.Operation.metadata] field type is
+ // [MoveInstanceMetadata][google.spanner.admin.instance.v1.MoveInstanceMetadata].
+ // The [response][google.longrunning.Operation.response] field type is
+ // [Instance][google.spanner.admin.instance.v1.Instance],
+ // if successful.
+ // Cancelling the operation sets its metadata's
+ // [cancel_time][google.spanner.admin.instance.v1.MoveInstanceMetadata.cancel_time].
+ // Cancellation is not immediate because it involves moving any data
+ // previously moved to the target instance configuration back to the original
+ // instance configuration. You can use this operation to track the progress of
+ // the cancellation. Upon successful completion of the cancellation, the
+ // operation terminates with `CANCELLED` status.
+ //
+ // If not cancelled, upon completion of the returned operation:
+ //
+ // - The instance successfully moves to the target instance
+ // configuration.
+ // - You are billed for compute and storage in target instance
+ // configuration.
+ //
+ // Authorization requires the `spanner.instances.update` permission on
+ // the resource [instance][google.spanner.admin.instance.v1.Instance].
+ //
+ // For more details, see
+ // [Move an instance](https://cloud.google.com/spanner/docs/move-instance).
+ MoveInstance(context.Context, *MoveInstanceRequest) (*longrunningpb.Operation, error)
+}
+
+// UnimplementedInstanceAdminServer can be embedded to have forward compatible implementations.
+type UnimplementedInstanceAdminServer struct {
+}
+
+func (*UnimplementedInstanceAdminServer) ListInstanceConfigs(context.Context, *ListInstanceConfigsRequest) (*ListInstanceConfigsResponse, error) {
+ return nil, status.Errorf(codes.Unimplemented, "method ListInstanceConfigs not implemented")
+}
+func (*UnimplementedInstanceAdminServer) GetInstanceConfig(context.Context, *GetInstanceConfigRequest) (*InstanceConfig, error) {
+ return nil, status.Errorf(codes.Unimplemented, "method GetInstanceConfig not implemented")
+}
+func (*UnimplementedInstanceAdminServer) CreateInstanceConfig(context.Context, *CreateInstanceConfigRequest) (*longrunningpb.Operation, error) {
+ return nil, status.Errorf(codes.Unimplemented, "method CreateInstanceConfig not implemented")
+}
+func (*UnimplementedInstanceAdminServer) UpdateInstanceConfig(context.Context, *UpdateInstanceConfigRequest) (*longrunningpb.Operation, error) {
+ return nil, status.Errorf(codes.Unimplemented, "method UpdateInstanceConfig not implemented")
+}
+func (*UnimplementedInstanceAdminServer) DeleteInstanceConfig(context.Context, *DeleteInstanceConfigRequest) (*emptypb.Empty, error) {
+ return nil, status.Errorf(codes.Unimplemented, "method DeleteInstanceConfig not implemented")
+}
+func (*UnimplementedInstanceAdminServer) ListInstanceConfigOperations(context.Context, *ListInstanceConfigOperationsRequest) (*ListInstanceConfigOperationsResponse, error) {
+ return nil, status.Errorf(codes.Unimplemented, "method ListInstanceConfigOperations not implemented")
+}
+func (*UnimplementedInstanceAdminServer) ListInstances(context.Context, *ListInstancesRequest) (*ListInstancesResponse, error) {
+ return nil, status.Errorf(codes.Unimplemented, "method ListInstances not implemented")
+}
+func (*UnimplementedInstanceAdminServer) ListInstancePartitions(context.Context, *ListInstancePartitionsRequest) (*ListInstancePartitionsResponse, error) {
+ return nil, status.Errorf(codes.Unimplemented, "method ListInstancePartitions not implemented")
+}
+func (*UnimplementedInstanceAdminServer) GetInstance(context.Context, *GetInstanceRequest) (*Instance, error) {
+ return nil, status.Errorf(codes.Unimplemented, "method GetInstance not implemented")
+}
+func (*UnimplementedInstanceAdminServer) CreateInstance(context.Context, *CreateInstanceRequest) (*longrunningpb.Operation, error) {
+ return nil, status.Errorf(codes.Unimplemented, "method CreateInstance not implemented")
+}
+func (*UnimplementedInstanceAdminServer) UpdateInstance(context.Context, *UpdateInstanceRequest) (*longrunningpb.Operation, error) {
+ return nil, status.Errorf(codes.Unimplemented, "method UpdateInstance not implemented")
+}
+func (*UnimplementedInstanceAdminServer) DeleteInstance(context.Context, *DeleteInstanceRequest) (*emptypb.Empty, error) {
+ return nil, status.Errorf(codes.Unimplemented, "method DeleteInstance not implemented")
+}
+func (*UnimplementedInstanceAdminServer) SetIamPolicy(context.Context, *iampb.SetIamPolicyRequest) (*iampb.Policy, error) {
+ return nil, status.Errorf(codes.Unimplemented, "method SetIamPolicy not implemented")
+}
+func (*UnimplementedInstanceAdminServer) GetIamPolicy(context.Context, *iampb.GetIamPolicyRequest) (*iampb.Policy, error) {
+ return nil, status.Errorf(codes.Unimplemented, "method GetIamPolicy not implemented")
+}
+func (*UnimplementedInstanceAdminServer) TestIamPermissions(context.Context, *iampb.TestIamPermissionsRequest) (*iampb.TestIamPermissionsResponse, error) {
+ return nil, status.Errorf(codes.Unimplemented, "method TestIamPermissions not implemented")
+}
+func (*UnimplementedInstanceAdminServer) GetInstancePartition(context.Context, *GetInstancePartitionRequest) (*InstancePartition, error) {
+ return nil, status.Errorf(codes.Unimplemented, "method GetInstancePartition not implemented")
+}
+func (*UnimplementedInstanceAdminServer) CreateInstancePartition(context.Context, *CreateInstancePartitionRequest) (*longrunningpb.Operation, error) {
+ return nil, status.Errorf(codes.Unimplemented, "method CreateInstancePartition not implemented")
+}
+func (*UnimplementedInstanceAdminServer) DeleteInstancePartition(context.Context, *DeleteInstancePartitionRequest) (*emptypb.Empty, error) {
+ return nil, status.Errorf(codes.Unimplemented, "method DeleteInstancePartition not implemented")
+}
+func (*UnimplementedInstanceAdminServer) UpdateInstancePartition(context.Context, *UpdateInstancePartitionRequest) (*longrunningpb.Operation, error) {
+ return nil, status.Errorf(codes.Unimplemented, "method UpdateInstancePartition not implemented")
+}
+func (*UnimplementedInstanceAdminServer) ListInstancePartitionOperations(context.Context, *ListInstancePartitionOperationsRequest) (*ListInstancePartitionOperationsResponse, error) {
+ return nil, status.Errorf(codes.Unimplemented, "method ListInstancePartitionOperations not implemented")
+}
+func (*UnimplementedInstanceAdminServer) MoveInstance(context.Context, *MoveInstanceRequest) (*longrunningpb.Operation, error) {
+ return nil, status.Errorf(codes.Unimplemented, "method MoveInstance not implemented")
+}
+
+func RegisterInstanceAdminServer(s *grpc.Server, srv InstanceAdminServer) {
+ s.RegisterService(&_InstanceAdmin_serviceDesc, srv)
+}
+
+func _InstanceAdmin_ListInstanceConfigs_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+ in := new(ListInstanceConfigsRequest)
+ if err := dec(in); err != nil {
+ return nil, err
+ }
+ if interceptor == nil {
+ return srv.(InstanceAdminServer).ListInstanceConfigs(ctx, in)
+ }
+ info := &grpc.UnaryServerInfo{
+ Server: srv,
+ FullMethod: "/google.spanner.admin.instance.v1.InstanceAdmin/ListInstanceConfigs",
+ }
+ handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+ return srv.(InstanceAdminServer).ListInstanceConfigs(ctx, req.(*ListInstanceConfigsRequest))
+ }
+ return interceptor(ctx, in, info, handler)
+}
+
+func _InstanceAdmin_GetInstanceConfig_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+ in := new(GetInstanceConfigRequest)
+ if err := dec(in); err != nil {
+ return nil, err
+ }
+ if interceptor == nil {
+ return srv.(InstanceAdminServer).GetInstanceConfig(ctx, in)
+ }
+ info := &grpc.UnaryServerInfo{
+ Server: srv,
+ FullMethod: "/google.spanner.admin.instance.v1.InstanceAdmin/GetInstanceConfig",
+ }
+ handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+ return srv.(InstanceAdminServer).GetInstanceConfig(ctx, req.(*GetInstanceConfigRequest))
+ }
+ return interceptor(ctx, in, info, handler)
+}
+
+func _InstanceAdmin_CreateInstanceConfig_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+ in := new(CreateInstanceConfigRequest)
+ if err := dec(in); err != nil {
+ return nil, err
+ }
+ if interceptor == nil {
+ return srv.(InstanceAdminServer).CreateInstanceConfig(ctx, in)
+ }
+ info := &grpc.UnaryServerInfo{
+ Server: srv,
+ FullMethod: "/google.spanner.admin.instance.v1.InstanceAdmin/CreateInstanceConfig",
+ }
+ handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+ return srv.(InstanceAdminServer).CreateInstanceConfig(ctx, req.(*CreateInstanceConfigRequest))
+ }
+ return interceptor(ctx, in, info, handler)
+}
+
+func _InstanceAdmin_UpdateInstanceConfig_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+ in := new(UpdateInstanceConfigRequest)
+ if err := dec(in); err != nil {
+ return nil, err
+ }
+ if interceptor == nil {
+ return srv.(InstanceAdminServer).UpdateInstanceConfig(ctx, in)
+ }
+ info := &grpc.UnaryServerInfo{
+ Server: srv,
+ FullMethod: "/google.spanner.admin.instance.v1.InstanceAdmin/UpdateInstanceConfig",
+ }
+ handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+ return srv.(InstanceAdminServer).UpdateInstanceConfig(ctx, req.(*UpdateInstanceConfigRequest))
+ }
+ return interceptor(ctx, in, info, handler)
+}
+
+func _InstanceAdmin_DeleteInstanceConfig_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+ in := new(DeleteInstanceConfigRequest)
+ if err := dec(in); err != nil {
+ return nil, err
+ }
+ if interceptor == nil {
+ return srv.(InstanceAdminServer).DeleteInstanceConfig(ctx, in)
+ }
+ info := &grpc.UnaryServerInfo{
+ Server: srv,
+ FullMethod: "/google.spanner.admin.instance.v1.InstanceAdmin/DeleteInstanceConfig",
+ }
+ handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+ return srv.(InstanceAdminServer).DeleteInstanceConfig(ctx, req.(*DeleteInstanceConfigRequest))
+ }
+ return interceptor(ctx, in, info, handler)
+}
+
+func _InstanceAdmin_ListInstanceConfigOperations_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+ in := new(ListInstanceConfigOperationsRequest)
+ if err := dec(in); err != nil {
+ return nil, err
+ }
+ if interceptor == nil {
+ return srv.(InstanceAdminServer).ListInstanceConfigOperations(ctx, in)
+ }
+ info := &grpc.UnaryServerInfo{
+ Server: srv,
+ FullMethod: "/google.spanner.admin.instance.v1.InstanceAdmin/ListInstanceConfigOperations",
+ }
+ handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+ return srv.(InstanceAdminServer).ListInstanceConfigOperations(ctx, req.(*ListInstanceConfigOperationsRequest))
+ }
+ return interceptor(ctx, in, info, handler)
+}
+
+func _InstanceAdmin_ListInstances_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+ in := new(ListInstancesRequest)
+ if err := dec(in); err != nil {
+ return nil, err
+ }
+ if interceptor == nil {
+ return srv.(InstanceAdminServer).ListInstances(ctx, in)
+ }
+ info := &grpc.UnaryServerInfo{
+ Server: srv,
+ FullMethod: "/google.spanner.admin.instance.v1.InstanceAdmin/ListInstances",
+ }
+ handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+ return srv.(InstanceAdminServer).ListInstances(ctx, req.(*ListInstancesRequest))
+ }
+ return interceptor(ctx, in, info, handler)
+}
+
+func _InstanceAdmin_ListInstancePartitions_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+ in := new(ListInstancePartitionsRequest)
+ if err := dec(in); err != nil {
+ return nil, err
+ }
+ if interceptor == nil {
+ return srv.(InstanceAdminServer).ListInstancePartitions(ctx, in)
+ }
+ info := &grpc.UnaryServerInfo{
+ Server: srv,
+ FullMethod: "/google.spanner.admin.instance.v1.InstanceAdmin/ListInstancePartitions",
+ }
+ handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+ return srv.(InstanceAdminServer).ListInstancePartitions(ctx, req.(*ListInstancePartitionsRequest))
+ }
+ return interceptor(ctx, in, info, handler)
+}
+
+func _InstanceAdmin_GetInstance_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+ in := new(GetInstanceRequest)
+ if err := dec(in); err != nil {
+ return nil, err
+ }
+ if interceptor == nil {
+ return srv.(InstanceAdminServer).GetInstance(ctx, in)
+ }
+ info := &grpc.UnaryServerInfo{
+ Server: srv,
+ FullMethod: "/google.spanner.admin.instance.v1.InstanceAdmin/GetInstance",
+ }
+ handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+ return srv.(InstanceAdminServer).GetInstance(ctx, req.(*GetInstanceRequest))
+ }
+ return interceptor(ctx, in, info, handler)
+}
+
+func _InstanceAdmin_CreateInstance_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+ in := new(CreateInstanceRequest)
+ if err := dec(in); err != nil {
+ return nil, err
+ }
+ if interceptor == nil {
+ return srv.(InstanceAdminServer).CreateInstance(ctx, in)
+ }
+ info := &grpc.UnaryServerInfo{
+ Server: srv,
+ FullMethod: "/google.spanner.admin.instance.v1.InstanceAdmin/CreateInstance",
+ }
+ handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+ return srv.(InstanceAdminServer).CreateInstance(ctx, req.(*CreateInstanceRequest))
+ }
+ return interceptor(ctx, in, info, handler)
+}
+
+func _InstanceAdmin_UpdateInstance_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+ in := new(UpdateInstanceRequest)
+ if err := dec(in); err != nil {
+ return nil, err
+ }
+ if interceptor == nil {
+ return srv.(InstanceAdminServer).UpdateInstance(ctx, in)
+ }
+ info := &grpc.UnaryServerInfo{
+ Server: srv,
+ FullMethod: "/google.spanner.admin.instance.v1.InstanceAdmin/UpdateInstance",
+ }
+ handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+ return srv.(InstanceAdminServer).UpdateInstance(ctx, req.(*UpdateInstanceRequest))
+ }
+ return interceptor(ctx, in, info, handler)
+}
+
+func _InstanceAdmin_DeleteInstance_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+ in := new(DeleteInstanceRequest)
+ if err := dec(in); err != nil {
+ return nil, err
+ }
+ if interceptor == nil {
+ return srv.(InstanceAdminServer).DeleteInstance(ctx, in)
+ }
+ info := &grpc.UnaryServerInfo{
+ Server: srv,
+ FullMethod: "/google.spanner.admin.instance.v1.InstanceAdmin/DeleteInstance",
+ }
+ handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+ return srv.(InstanceAdminServer).DeleteInstance(ctx, req.(*DeleteInstanceRequest))
+ }
+ return interceptor(ctx, in, info, handler)
+}
+
+func _InstanceAdmin_SetIamPolicy_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+ in := new(iampb.SetIamPolicyRequest)
+ if err := dec(in); err != nil {
+ return nil, err
+ }
+ if interceptor == nil {
+ return srv.(InstanceAdminServer).SetIamPolicy(ctx, in)
+ }
+ info := &grpc.UnaryServerInfo{
+ Server: srv,
+ FullMethod: "/google.spanner.admin.instance.v1.InstanceAdmin/SetIamPolicy",
+ }
+ handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+ return srv.(InstanceAdminServer).SetIamPolicy(ctx, req.(*iampb.SetIamPolicyRequest))
+ }
+ return interceptor(ctx, in, info, handler)
+}
+
+func _InstanceAdmin_GetIamPolicy_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+ in := new(iampb.GetIamPolicyRequest)
+ if err := dec(in); err != nil {
+ return nil, err
+ }
+ if interceptor == nil {
+ return srv.(InstanceAdminServer).GetIamPolicy(ctx, in)
+ }
+ info := &grpc.UnaryServerInfo{
+ Server: srv,
+ FullMethod: "/google.spanner.admin.instance.v1.InstanceAdmin/GetIamPolicy",
+ }
+ handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+ return srv.(InstanceAdminServer).GetIamPolicy(ctx, req.(*iampb.GetIamPolicyRequest))
+ }
+ return interceptor(ctx, in, info, handler)
+}
+
+func _InstanceAdmin_TestIamPermissions_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+ in := new(iampb.TestIamPermissionsRequest)
+ if err := dec(in); err != nil {
+ return nil, err
+ }
+ if interceptor == nil {
+ return srv.(InstanceAdminServer).TestIamPermissions(ctx, in)
+ }
+ info := &grpc.UnaryServerInfo{
+ Server: srv,
+ FullMethod: "/google.spanner.admin.instance.v1.InstanceAdmin/TestIamPermissions",
+ }
+ handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+ return srv.(InstanceAdminServer).TestIamPermissions(ctx, req.(*iampb.TestIamPermissionsRequest))
+ }
+ return interceptor(ctx, in, info, handler)
+}
+
+func _InstanceAdmin_GetInstancePartition_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+ in := new(GetInstancePartitionRequest)
+ if err := dec(in); err != nil {
+ return nil, err
+ }
+ if interceptor == nil {
+ return srv.(InstanceAdminServer).GetInstancePartition(ctx, in)
+ }
+ info := &grpc.UnaryServerInfo{
+ Server: srv,
+ FullMethod: "/google.spanner.admin.instance.v1.InstanceAdmin/GetInstancePartition",
+ }
+ handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+ return srv.(InstanceAdminServer).GetInstancePartition(ctx, req.(*GetInstancePartitionRequest))
+ }
+ return interceptor(ctx, in, info, handler)
+}
+
+func _InstanceAdmin_CreateInstancePartition_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+ in := new(CreateInstancePartitionRequest)
+ if err := dec(in); err != nil {
+ return nil, err
+ }
+ if interceptor == nil {
+ return srv.(InstanceAdminServer).CreateInstancePartition(ctx, in)
+ }
+ info := &grpc.UnaryServerInfo{
+ Server: srv,
+ FullMethod: "/google.spanner.admin.instance.v1.InstanceAdmin/CreateInstancePartition",
+ }
+ handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+ return srv.(InstanceAdminServer).CreateInstancePartition(ctx, req.(*CreateInstancePartitionRequest))
+ }
+ return interceptor(ctx, in, info, handler)
+}
+
+func _InstanceAdmin_DeleteInstancePartition_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+ in := new(DeleteInstancePartitionRequest)
+ if err := dec(in); err != nil {
+ return nil, err
+ }
+ if interceptor == nil {
+ return srv.(InstanceAdminServer).DeleteInstancePartition(ctx, in)
+ }
+ info := &grpc.UnaryServerInfo{
+ Server: srv,
+ FullMethod: "/google.spanner.admin.instance.v1.InstanceAdmin/DeleteInstancePartition",
+ }
+ handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+ return srv.(InstanceAdminServer).DeleteInstancePartition(ctx, req.(*DeleteInstancePartitionRequest))
+ }
+ return interceptor(ctx, in, info, handler)
+}
+
+func _InstanceAdmin_UpdateInstancePartition_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+ in := new(UpdateInstancePartitionRequest)
+ if err := dec(in); err != nil {
+ return nil, err
+ }
+ if interceptor == nil {
+ return srv.(InstanceAdminServer).UpdateInstancePartition(ctx, in)
+ }
+ info := &grpc.UnaryServerInfo{
+ Server: srv,
+ FullMethod: "/google.spanner.admin.instance.v1.InstanceAdmin/UpdateInstancePartition",
+ }
+ handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+ return srv.(InstanceAdminServer).UpdateInstancePartition(ctx, req.(*UpdateInstancePartitionRequest))
+ }
+ return interceptor(ctx, in, info, handler)
+}
+
+func _InstanceAdmin_ListInstancePartitionOperations_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+ in := new(ListInstancePartitionOperationsRequest)
+ if err := dec(in); err != nil {
+ return nil, err
+ }
+ if interceptor == nil {
+ return srv.(InstanceAdminServer).ListInstancePartitionOperations(ctx, in)
+ }
+ info := &grpc.UnaryServerInfo{
+ Server: srv,
+ FullMethod: "/google.spanner.admin.instance.v1.InstanceAdmin/ListInstancePartitionOperations",
+ }
+ handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+ return srv.(InstanceAdminServer).ListInstancePartitionOperations(ctx, req.(*ListInstancePartitionOperationsRequest))
+ }
+ return interceptor(ctx, in, info, handler)
+}
+
+func _InstanceAdmin_MoveInstance_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+ in := new(MoveInstanceRequest)
+ if err := dec(in); err != nil {
+ return nil, err
+ }
+ if interceptor == nil {
+ return srv.(InstanceAdminServer).MoveInstance(ctx, in)
+ }
+ info := &grpc.UnaryServerInfo{
+ Server: srv,
+ FullMethod: "/google.spanner.admin.instance.v1.InstanceAdmin/MoveInstance",
+ }
+ handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+ return srv.(InstanceAdminServer).MoveInstance(ctx, req.(*MoveInstanceRequest))
+ }
+ return interceptor(ctx, in, info, handler)
+}
+
+var _InstanceAdmin_serviceDesc = grpc.ServiceDesc{
+ ServiceName: "google.spanner.admin.instance.v1.InstanceAdmin",
+ HandlerType: (*InstanceAdminServer)(nil),
+ Methods: []grpc.MethodDesc{
+ {
+ MethodName: "ListInstanceConfigs",
+ Handler: _InstanceAdmin_ListInstanceConfigs_Handler,
+ },
+ {
+ MethodName: "GetInstanceConfig",
+ Handler: _InstanceAdmin_GetInstanceConfig_Handler,
+ },
+ {
+ MethodName: "CreateInstanceConfig",
+ Handler: _InstanceAdmin_CreateInstanceConfig_Handler,
+ },
+ {
+ MethodName: "UpdateInstanceConfig",
+ Handler: _InstanceAdmin_UpdateInstanceConfig_Handler,
+ },
+ {
+ MethodName: "DeleteInstanceConfig",
+ Handler: _InstanceAdmin_DeleteInstanceConfig_Handler,
+ },
+ {
+ MethodName: "ListInstanceConfigOperations",
+ Handler: _InstanceAdmin_ListInstanceConfigOperations_Handler,
+ },
+ {
+ MethodName: "ListInstances",
+ Handler: _InstanceAdmin_ListInstances_Handler,
+ },
+ {
+ MethodName: "ListInstancePartitions",
+ Handler: _InstanceAdmin_ListInstancePartitions_Handler,
+ },
+ {
+ MethodName: "GetInstance",
+ Handler: _InstanceAdmin_GetInstance_Handler,
+ },
+ {
+ MethodName: "CreateInstance",
+ Handler: _InstanceAdmin_CreateInstance_Handler,
+ },
+ {
+ MethodName: "UpdateInstance",
+ Handler: _InstanceAdmin_UpdateInstance_Handler,
+ },
+ {
+ MethodName: "DeleteInstance",
+ Handler: _InstanceAdmin_DeleteInstance_Handler,
+ },
+ {
+ MethodName: "SetIamPolicy",
+ Handler: _InstanceAdmin_SetIamPolicy_Handler,
+ },
+ {
+ MethodName: "GetIamPolicy",
+ Handler: _InstanceAdmin_GetIamPolicy_Handler,
+ },
+ {
+ MethodName: "TestIamPermissions",
+ Handler: _InstanceAdmin_TestIamPermissions_Handler,
+ },
+ {
+ MethodName: "GetInstancePartition",
+ Handler: _InstanceAdmin_GetInstancePartition_Handler,
+ },
+ {
+ MethodName: "CreateInstancePartition",
+ Handler: _InstanceAdmin_CreateInstancePartition_Handler,
+ },
+ {
+ MethodName: "DeleteInstancePartition",
+ Handler: _InstanceAdmin_DeleteInstancePartition_Handler,
+ },
+ {
+ MethodName: "UpdateInstancePartition",
+ Handler: _InstanceAdmin_UpdateInstancePartition_Handler,
+ },
+ {
+ MethodName: "ListInstancePartitionOperations",
+ Handler: _InstanceAdmin_ListInstancePartitionOperations_Handler,
+ },
+ {
+ MethodName: "MoveInstance",
+ Handler: _InstanceAdmin_MoveInstance_Handler,
+ },
+ },
+ Streams: []grpc.StreamDesc{},
+ Metadata: "google/spanner/admin/instance/v1/spanner_instance_admin.proto",
+}
diff --git a/vendor/cloud.google.com/go/spanner/admin/instance/apiv1/path_funcs.go b/vendor/cloud.google.com/go/spanner/admin/instance/apiv1/path_funcs.go
new file mode 100644
index 000000000..92c5c053c
--- /dev/null
+++ b/vendor/cloud.google.com/go/spanner/admin/instance/apiv1/path_funcs.go
@@ -0,0 +1,61 @@
+// Copyright 2018 Google LLC
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package instance
+
+// InstanceAdminProjectPath returns the path for the project resource.
+//
+// Deprecated: Use
+//
+// fmt.Sprintf("projects/%s", project)
+//
+// instead.
+func InstanceAdminProjectPath(project string) string {
+ return "" +
+ "projects/" +
+ project +
+ ""
+}
+
+// InstanceAdminInstanceConfigPath returns the path for the instance config resource.
+//
+// Deprecated: Use
+//
+// fmt.Sprintf("projects/%s/instanceConfigs/%s", project, instanceConfig)
+//
+// instead.
+func InstanceAdminInstanceConfigPath(project, instanceConfig string) string {
+ return "" +
+ "projects/" +
+ project +
+ "/instanceConfigs/" +
+ instanceConfig +
+ ""
+}
+
+// InstanceAdminInstancePath returns the path for the instance resource.
+//
+// Deprecated: Use
+//
+// fmt.Sprintf("projects/%s/instances/%s", project, instance)
+//
+// instead.
+func InstanceAdminInstancePath(project, instance string) string {
+ return "" +
+ "projects/" +
+ project +
+ "/instances/" +
+ instance +
+ ""
+}
diff --git a/vendor/cloud.google.com/go/spanner/admin/instance/apiv1/version.go b/vendor/cloud.google.com/go/spanner/admin/instance/apiv1/version.go
new file mode 100644
index 000000000..0eaf4377d
--- /dev/null
+++ b/vendor/cloud.google.com/go/spanner/admin/instance/apiv1/version.go
@@ -0,0 +1,23 @@
+// Copyright 2023 Google LLC
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Code generated by gapicgen. DO NOT EDIT.
+
+package instance
+
+import "cloud.google.com/go/spanner/internal"
+
+func init() {
+ versionClient = internal.Version
+}
diff --git a/vendor/cloud.google.com/go/spanner/spansql/fuzz.go b/vendor/cloud.google.com/go/spanner/spansql/fuzz.go
new file mode 100644
index 000000000..3621209f1
--- /dev/null
+++ b/vendor/cloud.google.com/go/spanner/spansql/fuzz.go
@@ -0,0 +1,29 @@
+// Copyright 2020 Google LLC
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+//go:build gofuzz
+// +build gofuzz
+
+package spansql
+
+func FuzzParseQuery(data []byte) int {
+ if _, err := ParseQuery(string(data)); err != nil {
+ // The value 0 signals data is an invalid query that should be
+ // added to the corpus.
+ return 0
+ }
+ // The value 1 signals the input was lexically corrent and the
+ // fuzzer should increase the priority of the given input.
+ return 1
+}
diff --git a/vendor/cloud.google.com/go/spanner/spansql/keywords.go b/vendor/cloud.google.com/go/spanner/spansql/keywords.go
new file mode 100644
index 000000000..6515a41e5
--- /dev/null
+++ b/vendor/cloud.google.com/go/spanner/spansql/keywords.go
@@ -0,0 +1,322 @@
+/*
+Copyright 2020 Google LLC
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package spansql
+
+import (
+ "strings"
+)
+
+// IsKeyword reports whether the identifier is a reserved keyword.
+func IsKeyword(id string) bool {
+ return keywords[strings.ToUpper(id)]
+}
+
+// keywords is the set of reserved keywords.
+// https://cloud.google.com/spanner/docs/lexical#reserved-keywords
+var keywords = map[string]bool{
+ "ALL": true,
+ "AND": true,
+ "ANY": true,
+ "ARRAY": true,
+ "AS": true,
+ "ASC": true,
+ "ASSERT_ROWS_MODIFIED": true,
+ "AT": true,
+ "BETWEEN": true,
+ "BY": true,
+ "CASE": true,
+ "CAST": true,
+ "COLLATE": true,
+ "CONTAINS": true,
+ "CREATE": true,
+ "CROSS": true,
+ "CUBE": true,
+ "CURRENT": true,
+ "DEFAULT": true,
+ "DEFINE": true,
+ "DESC": true,
+ "DISTINCT": true,
+ "ELSE": true,
+ "END": true,
+ "ENUM": true,
+ "ESCAPE": true,
+ "EXCEPT": true,
+ "EXCLUDE": true,
+ "EXISTS": true,
+ "EXTRACT": true,
+ "FALSE": true,
+ "FETCH": true,
+ "FOLLOWING": true,
+ "FOR": true,
+ "FROM": true,
+ "FULL": true,
+ "GROUP": true,
+ "GROUPING": true,
+ "GROUPS": true,
+ "HASH": true,
+ "HAVING": true,
+ "IF": true,
+ "IGNORE": true,
+ "IN": true,
+ "INNER": true,
+ "INTERSECT": true,
+ "INTERVAL": true,
+ "INTO": true,
+ "IS": true,
+ "JOIN": true,
+ "LATERAL": true,
+ "LEFT": true,
+ "LIKE": true,
+ "LIMIT": true,
+ "LOOKUP": true,
+ "MERGE": true,
+ "NATURAL": true,
+ "NEW": true,
+ "NO": true,
+ "NOT": true,
+ "NULL": true,
+ "NULLS": true,
+ "OF": true,
+ "ON": true,
+ "OR": true,
+ "ORDER": true,
+ "OUTER": true,
+ "OVER": true,
+ "PARTITION": true,
+ "PRECEDING": true,
+ "PROTO": true,
+ "RANGE": true,
+ "RECURSIVE": true,
+ "RESPECT": true,
+ "RIGHT": true,
+ "ROLLUP": true,
+ "ROWS": true,
+ "SELECT": true,
+ "SET": true,
+ "SOME": true,
+ "STRUCT": true,
+ "TABLESAMPLE": true,
+ "THEN": true,
+ "TO": true,
+ "TREAT": true,
+ "TRUE": true,
+ "UNBOUNDED": true,
+ "UNION": true,
+ "UNNEST": true,
+ "USING": true,
+ "WHEN": true,
+ "WHERE": true,
+ "WINDOW": true,
+ "WITH": true,
+ "WITHIN": true,
+}
+
+// funcs is the set of reserved keywords that are functions.
+// https://cloud.google.com/spanner/docs/functions-and-operators
+var funcs = make(map[string]bool)
+var funcArgParsers = make(map[string]func(*parser) (Expr, *parseError))
+var aggregateFuncs = make(map[string]bool)
+
+func init() {
+ for _, f := range funcNames {
+ funcs[f] = true
+ }
+ for _, f := range aggregateFuncNames {
+ funcs[f] = true
+ aggregateFuncs[f] = true
+ }
+ // Special case for CAST, SAFE_CAST and EXTRACT
+ funcArgParsers["CAST"] = typedArgParser
+ funcArgParsers["SAFE_CAST"] = typedArgParser
+ funcArgParsers["EXTRACT"] = extractArgParser
+ // Spacial case of INTERVAL arg for DATE_ADD, DATE_SUB, GENERATE_DATE_ARRAY
+ funcArgParsers["DATE_ADD"] = dateIntervalArgParser
+ funcArgParsers["DATE_SUB"] = dateIntervalArgParser
+ funcArgParsers["GENERATE_DATE_ARRAY"] = dateIntervalArgParser
+ // Spacial case of INTERVAL arg for TIMESTAMP_ADD, TIMESTAMP_SUB
+ funcArgParsers["TIMESTAMP_ADD"] = timestampIntervalArgParser
+ funcArgParsers["TIMESTAMP_SUB"] = timestampIntervalArgParser
+ // Special case of SEQUENCE arg for GET_NEXT_SEQUENCE_VALUE, GET_INTERNAL_SEQUENCE_STATE
+ funcArgParsers["GET_NEXT_SEQUENCE_VALUE"] = sequenceArgParser
+ funcArgParsers["GET_INTERNAL_SEQUENCE_STATE"] = sequenceArgParser
+}
+
+var funcNames = []string{
+ // TODO: many more
+
+ // Cast functions.
+ "CAST",
+ "SAFE_CAST",
+
+ // Mathematical functions.
+ "ABS",
+ "ACOS",
+ "ACOSH",
+ "ASIN",
+ "ASINH",
+ "ATAN",
+ "ATAN2",
+ "ATANH",
+ "CEIL",
+ "CEILING",
+ "COS",
+ "COSH",
+ "DIV",
+ "EXP",
+ "FLOOR",
+ "GREATEST",
+ "IEEE_DIVIDE",
+ "IS_INF",
+ "IS_NAN",
+ "LEAST",
+ "LN",
+ "LOG",
+ "LOG10",
+ "MOD",
+ "POW",
+ "POWER",
+ "ROUND",
+ "SAFE_ADD",
+ "SAFE_DIVIDE",
+ "SAFE_MULTIPLY",
+ "SAFE_NEGATE",
+ "SAFE_SUBTRACT",
+ "SIGN",
+ "SIN",
+ "SINH",
+ "SQRT",
+ "TAN",
+ "TANH",
+ "TRUNC",
+
+ // Hash functions.
+ "FARM_FINGERPRINT",
+ "SHA1",
+ "SHA256", "SHA512",
+
+ // String functions.
+ "BYTE_LENGTH", "CHAR_LENGTH", "CHARACTER_LENGTH",
+ "CODE_POINTS_TO_BYTES", "CODE_POINTS_TO_STRING",
+ "CONCAT",
+ "ENDS_WITH",
+ "FORMAT",
+ "FROM_BASE32", "FROM_BASE64", "FROM_HEX",
+ "LENGTH",
+ "LOWER",
+ "LPAD",
+ "LTRIM",
+ "REGEXP_CONTAINS", "REGEXP_EXTRACT", "REGEXP_EXTRACT_ALL", "REGEXP_REPLACE",
+ "REPEAT",
+ "REPLACE",
+ "REVERSE",
+ "RPAD",
+ "RTRIM",
+ "SAFE_CONVERT_BYTES_TO_STRING",
+ "SPLIT",
+ "STARTS_WITH",
+ "STRPOS",
+ "SUBSTR",
+ "TO_BASE32", "TO_BASE64", "TO_CODE_POINTS", "TO_HEX",
+ "TRIM",
+ "UPPER",
+
+ // Array functions.
+ "ARRAY",
+ "ARRAY_CONCAT",
+ "ARRAY_FIRST", "ARRAY_INCLUDES", "ARRAY_INCLUDES_ALL", "ARRAY_INCLUDES_ANY", "ARRAY_LAST",
+ "ARRAY_LENGTH",
+ "ARRAY_MAX", "ARRAY_MIN", "ARRAY_REVERSE", "ARRAY_SLICE", "ARRAY_TRANSFORM",
+ "ARRAY_TO_STRING",
+ "GENERATE_ARRAY", "GENERATE_DATE_ARRAY",
+ "OFFSET", "ORDINAL",
+ "ARRAY_REVERSE",
+ "ARRAY_IS_DISTINCT",
+ "SAFE_OFFSET", "SAFE_ORDINAL",
+
+ // Date functions.
+ "CURRENT_DATE",
+ "EXTRACT",
+ "DATE",
+ "DATE_ADD",
+ "DATE_SUB",
+ "DATE_DIFF",
+ "DATE_TRUNC",
+ "DATE_FROM_UNIX_DATE",
+ "FORMAT_DATE",
+ "PARSE_DATE",
+ "UNIX_DATE",
+
+ // Timestamp functions.
+ "CURRENT_TIMESTAMP",
+ "STRING",
+ "TIMESTAMP",
+ "TIMESTAMP_ADD",
+ "TIMESTAMP_SUB",
+ "TIMESTAMP_DIFF",
+ "TIMESTAMP_TRUNC",
+ "FORMAT_TIMESTAMP",
+ "PARSE_TIMESTAMP",
+ "TIMESTAMP_SECONDS",
+ "TIMESTAMP_MILLIS",
+ "TIMESTAMP_MICROS",
+ "UNIX_SECONDS",
+ "UNIX_MILLIS",
+ "UNIX_MICROS",
+ "PENDING_COMMIT_TIMESTAMP",
+
+ // JSON functions.
+ "JSON_QUERY",
+ "JSON_VALUE",
+ "JSON_QUERY_ARRAY",
+ "JSON_VALUE_ARRAY",
+
+ // Bit functions.
+ "BIT_COUNT",
+ "BIT_REVERSE",
+
+ // Sequence functions.
+ "GET_NEXT_SEQUENCE_VALUE",
+ "GET_INTERNAL_SEQUENCE_STATE",
+
+ // Utility functions.
+ "GENERATE_UUID",
+}
+
+var aggregateFuncNames = []string{
+ // Aggregate functions.
+ "ANY_VALUE",
+ "ARRAY_AGG",
+ "ARRAY_CONCAT_AGG",
+ "AVG",
+ "BIT_AND",
+ "BIT_OR",
+ "BIT_XOR",
+ "COUNT",
+ "COUNTIF",
+ "LOGICAL_AND",
+ "LOGICAL_OR",
+ "MAX",
+ "MIN",
+ "STRING_AGG",
+ "SUM",
+
+ // Statistical aggregate functions.
+ "STDDEV",
+ "STDDEV_SAMP",
+ "VAR_SAMP",
+ "VARIANCE",
+}
diff --git a/vendor/cloud.google.com/go/spanner/spansql/parser.go b/vendor/cloud.google.com/go/spanner/spansql/parser.go
new file mode 100644
index 000000000..982b6d1d5
--- /dev/null
+++ b/vendor/cloud.google.com/go/spanner/spansql/parser.go
@@ -0,0 +1,4696 @@
+/*
+Copyright 2019 Google LLC
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+/*
+Package spansql contains types and a parser for the Cloud Spanner SQL dialect.
+
+To parse, use one of the Parse functions (ParseDDL, ParseDDLStmt, ParseQuery, etc.).
+
+Sources:
+
+ https://cloud.google.com/spanner/docs/lexical
+ https://cloud.google.com/spanner/docs/query-syntax
+ https://cloud.google.com/spanner/docs/data-definition-language
+*/
+package spansql
+
+/*
+This file is structured as follows:
+
+- There are several exported ParseFoo functions that accept an input string
+ and return a type defined in types.go. This is the principal API of this package.
+ These functions are implemented as wrappers around the lower-level functions,
+ with additional checks to ensure things such as input exhaustion.
+- The token and parser types are defined. These constitute the lexical token
+ and parser machinery. parser.next is the main way that other functions get
+ the next token, with parser.back providing a single token rewind, and
+ parser.sniff, parser.eat and parser.expect providing lookahead helpers.
+- The parseFoo methods are defined, matching the SQL grammar. Each consumes its
+ namesake production from the parser. There are also some fooParser helper vars
+ defined that abbreviate the parsing of some of the regular productions.
+*/
+
+import (
+ "fmt"
+ "os"
+ "strconv"
+ "strings"
+ "time"
+ "unicode/utf8"
+
+ "cloud.google.com/go/civil"
+)
+
+const debug = false
+
+func debugf(format string, args ...interface{}) {
+ if !debug {
+ return
+ }
+ fmt.Fprintf(os.Stderr, "spansql debug: "+format+"\n", args...)
+}
+
+// ParseDDL parses a DDL file.
+//
+// The provided filename is used for error reporting and will
+// appear in the returned structure.
+func ParseDDL(filename, s string) (*DDL, error) {
+ ddl := &DDL{}
+ if err := parseStatements(ddl, filename, s); err != nil {
+ return nil, err
+ }
+
+ return ddl, nil
+}
+
+// ParseDML parses a DML file.
+//
+// The provided filename is used for error reporting and will
+// appear in the returned structure.
+func ParseDML(filename, s string) (*DML, error) {
+ dml := &DML{}
+ if err := parseStatements(dml, filename, s); err != nil {
+ return nil, err
+ }
+
+ return dml, nil
+}
+
+func parseStatements(stmts statements, filename string, s string) error {
+ p := newParser(filename, s)
+
+ stmts.setFilename(filename)
+
+ for {
+ p.skipSpace()
+ if p.done {
+ break
+ }
+
+ switch v := stmts.(type) {
+ case *DDL:
+ stmt, err := p.parseDDLStmt()
+ if err != nil {
+ return err
+ }
+ v.List = append(v.List, stmt)
+ case *DML:
+ stmt, err := p.parseDMLStmt()
+ if err != nil {
+ return err
+ }
+ v.List = append(v.List, stmt)
+ }
+
+ tok := p.next()
+ if tok.err == eof {
+ break
+ } else if tok.err != nil {
+ return tok.err
+ }
+ if tok.value == ";" {
+ continue
+ } else {
+ return p.errorf("unexpected token %q", tok.value)
+ }
+ }
+ if p.Rem() != "" {
+ return fmt.Errorf("unexpected trailing contents %q", p.Rem())
+ }
+
+ // Handle comments.
+ for _, com := range p.comments {
+ c := &Comment{
+ Marker: com.marker,
+ Isolated: com.isolated,
+ Start: com.start,
+ End: com.end,
+ Text: com.text,
+ }
+
+ // Strip common whitespace prefix and any whitespace suffix.
+ // TODO: This is a bodgy implementation of Longest Common Prefix,
+ // and also doesn't do tabs vs. spaces well.
+ var prefix string
+ for i, line := range c.Text {
+ line = strings.TrimRight(line, " \b\t")
+ c.Text[i] = line
+ trim := len(line) - len(strings.TrimLeft(line, " \b\t"))
+ if i == 0 {
+ prefix = line[:trim]
+ } else {
+ // Check how much of prefix is in common.
+ for !strings.HasPrefix(line, prefix) {
+ prefix = prefix[:len(prefix)-1]
+ }
+ }
+ if prefix == "" {
+ break
+ }
+ }
+ if prefix != "" {
+ for i, line := range c.Text {
+ c.Text[i] = strings.TrimPrefix(line, prefix)
+ }
+ }
+
+ stmts.addComment(c)
+ }
+
+ return nil
+}
+
+// ParseDDLStmt parses a single DDL statement.
+func ParseDDLStmt(s string) (DDLStmt, error) {
+ p := newParser("-", s)
+ stmt, err := p.parseDDLStmt()
+ if err != nil {
+ return nil, err
+ }
+ if p.Rem() != "" {
+ return nil, fmt.Errorf("unexpected trailing contents %q", p.Rem())
+ }
+ return stmt, nil
+}
+
+// ParseDMLStmt parses a single DML statement.
+func ParseDMLStmt(s string) (DMLStmt, error) {
+ p := newParser("-", s)
+ stmt, err := p.parseDMLStmt()
+ if err != nil {
+ return nil, err
+ }
+ if p.Rem() != "" {
+ return nil, fmt.Errorf("unexpected trailing contents %q", p.Rem())
+ }
+ return stmt, nil
+}
+
+// ParseQuery parses a query string.
+func ParseQuery(s string) (Query, error) {
+ p := newParser("-", s)
+ q, err := p.parseQuery()
+ if err != nil {
+ return Query{}, err
+ }
+ if p.Rem() != "" {
+ return Query{}, fmt.Errorf("unexpected trailing query contents %q", p.Rem())
+ }
+ return q, nil
+}
+
+type token struct {
+ value string
+ err *parseError
+ line, offset int
+
+ typ tokenType
+ float64 float64
+ string string // unquoted form for stringToken/bytesToken/quotedID
+
+ // int64Token is parsed as a number only when it is known to be a literal.
+ // This permits correct handling of operators preceding such a token,
+ // which cannot be identified as part of the int64 until later.
+ int64Base int
+}
+
+type tokenType int
+
+const (
+ unknownToken tokenType = iota
+ int64Token
+ float64Token
+ stringToken
+ bytesToken
+ unquotedID
+ quotedID
+)
+
+func (t *token) String() string {
+ if t.err != nil {
+ return fmt.Sprintf("parse error: %v", t.err)
+ }
+ return strconv.Quote(t.value)
+}
+
+type parseError struct {
+ message string
+ filename string
+ line int // 1-based line number
+ offset int // 0-based byte offset from start of input
+}
+
+func (pe *parseError) Error() string {
+ if pe == nil {
+ return "<nil>"
+ }
+ if pe.line == 1 {
+ return fmt.Sprintf("%s:1.%d: %v", pe.filename, pe.offset, pe.message)
+ }
+ return fmt.Sprintf("%s:%d: %v", pe.filename, pe.line, pe.message)
+}
+
+var eof = &parseError{message: "EOF"}
+
+type parser struct {
+ s string // Remaining input.
+ done bool // Whether the parsing is finished (success or error).
+ backed bool // Whether back() was called.
+ cur token
+
+ filename string
+ line, offset int // updated by places that shrink s
+
+ comments []comment // accumulated during parse
+}
+
+type comment struct {
+ marker string // "#" or "--" or "/*"
+ isolated bool // if it starts on its own line
+ start, end Position
+ text []string
+}
+
+// Pos reports the position of the current token.
+func (p *parser) Pos() Position { return Position{Line: p.cur.line, Offset: p.cur.offset} }
+
+func newParser(filename, s string) *parser {
+ return &parser{
+ s: s,
+
+ cur: token{line: 1},
+
+ filename: filename,
+ line: 1,
+ }
+}
+
+// Rem returns the unparsed remainder, ignoring space.
+func (p *parser) Rem() string {
+ rem := p.s
+ if p.backed {
+ rem = p.cur.value + rem
+ }
+ i := 0
+ for ; i < len(rem); i++ {
+ if !isSpace(rem[i]) {
+ break
+ }
+ }
+ return rem[i:]
+}
+
+func (p *parser) String() string {
+ if p.backed {
+ return fmt.Sprintf("next tok: %s (rem: %q)", &p.cur, p.s)
+ }
+ return fmt.Sprintf("rem: %q", p.s)
+}
+
+func (p *parser) errorf(format string, args ...interface{}) *parseError {
+ pe := &parseError{
+ message: fmt.Sprintf(format, args...),
+ filename: p.filename,
+ line: p.cur.line,
+ offset: p.cur.offset,
+ }
+ p.cur.err = pe
+ p.done = true
+ return pe
+}
+
+func isInitialIdentifierChar(c byte) bool {
+ // https://cloud.google.com/spanner/docs/lexical#identifiers
+ switch {
+ case 'A' <= c && c <= 'Z':
+ return true
+ case 'a' <= c && c <= 'z':
+ return true
+ case c == '_':
+ return true
+ }
+ return false
+}
+
+func isIdentifierChar(c byte) bool {
+ // https://cloud.google.com/spanner/docs/lexical#identifiers
+ // This doesn't apply the restriction that an identifier cannot start with [0-9],
+ // nor does it check against reserved keywords.
+ switch {
+ case 'A' <= c && c <= 'Z':
+ return true
+ case 'a' <= c && c <= 'z':
+ return true
+ case '0' <= c && c <= '9':
+ return true
+ case c == '_':
+ return true
+ }
+ return false
+}
+
+func isHexDigit(c byte) bool {
+ return '0' <= c && c <= '9' || 'a' <= c && c <= 'f' || 'A' <= c && c <= 'F'
+}
+
+func isOctalDigit(c byte) bool {
+ return '0' <= c && c <= '7'
+}
+
+func (p *parser) consumeNumber() {
+ /*
+ int64_value:
+ { decimal_value | hex_value }
+
+ decimal_value:
+ [-]0—9+
+
+ hex_value:
+ [-]0[xX]{0—9|a—f|A—F}+
+
+ (float64_value is not formally specified)
+
+ float64_value :=
+ [+-]DIGITS.[DIGITS][e[+-]DIGITS]
+ | [DIGITS].DIGITS[e[+-]DIGITS]
+ | DIGITSe[+-]DIGITS
+ */
+
+ i, neg, base := 0, false, 10
+ float, e, dot := false, false, false
+ if p.s[i] == '-' {
+ neg = true
+ i++
+ } else if p.s[i] == '+' {
+ // This isn't in the formal grammar, but is mentioned informally.
+ // https://cloud.google.com/spanner/docs/lexical#integer-literals
+ i++
+ }
+ if strings.HasPrefix(p.s[i:], "0x") || strings.HasPrefix(p.s[i:], "0X") {
+ base = 16
+ i += 2
+ }
+ d0 := i
+digitLoop:
+ for i < len(p.s) {
+ switch c := p.s[i]; {
+ case '0' <= c && c <= '9':
+ i++
+ case base == 16 && 'A' <= c && c <= 'F':
+ i++
+ case base == 16 && 'a' <= c && c <= 'f':
+ i++
+ case base == 10 && (c == 'e' || c == 'E'):
+ if e {
+ p.errorf("bad token %q", p.s[:i])
+ return
+ }
+ // Switch to consuming float.
+ float, e = true, true
+ i++
+
+ if i < len(p.s) && (p.s[i] == '+' || p.s[i] == '-') {
+ i++
+ }
+ case base == 10 && c == '.':
+ if dot || e { // any dot must come before E
+ p.errorf("bad token %q", p.s[:i])
+ return
+ }
+ // Switch to consuming float.
+ float, dot = true, true
+ i++
+ default:
+ break digitLoop
+ }
+ }
+ if d0 == i {
+ p.errorf("no digits in numeric literal")
+ return
+ }
+ sign := ""
+ if neg {
+ sign = "-"
+ }
+ p.cur.value, p.s = p.s[:i], p.s[i:]
+ p.offset += i
+ var err error
+ if float {
+ p.cur.typ = float64Token
+ p.cur.float64, err = strconv.ParseFloat(sign+p.cur.value[d0:], 64)
+ } else {
+ p.cur.typ = int64Token
+ p.cur.value = sign + p.cur.value[d0:]
+ p.cur.int64Base = base
+ // This is parsed on demand.
+ }
+ if err != nil {
+ p.errorf("bad numeric literal %q: %v", p.cur.value, err)
+ }
+}
+
+func (p *parser) consumeString() {
+ // https://cloud.google.com/spanner/docs/lexical#string-and-bytes-literals
+
+ delim := p.stringDelimiter()
+ if p.cur.err != nil {
+ return
+ }
+
+ p.cur.string, p.cur.err = p.consumeStringContent(delim, false, true, "string literal")
+ p.cur.typ = stringToken
+}
+
+func (p *parser) consumeRawString() {
+ // https://cloud.google.com/spanner/docs/lexical#string-and-bytes-literals
+
+ p.s = p.s[1:] // consume 'R'
+ delim := p.stringDelimiter()
+ if p.cur.err != nil {
+ return
+ }
+
+ p.cur.string, p.cur.err = p.consumeStringContent(delim, true, true, "raw string literal")
+ p.cur.typ = stringToken
+}
+
+func (p *parser) consumeBytes() {
+ // https://cloud.google.com/spanner/docs/lexical#string-and-bytes-literals
+
+ p.s = p.s[1:] // consume 'B'
+ delim := p.stringDelimiter()
+ if p.cur.err != nil {
+ return
+ }
+
+ p.cur.string, p.cur.err = p.consumeStringContent(delim, false, false, "bytes literal")
+ p.cur.typ = bytesToken
+}
+
+func (p *parser) consumeRawBytes() {
+ // https://cloud.google.com/spanner/docs/lexical#string-and-bytes-literals
+
+ p.s = p.s[2:] // consume 'RB'
+ delim := p.stringDelimiter()
+ if p.cur.err != nil {
+ return
+ }
+
+ p.cur.string, p.cur.err = p.consumeStringContent(delim, true, false, "raw bytes literal")
+ p.cur.typ = bytesToken
+}
+
+// stringDelimiter returns the opening string delimiter.
+func (p *parser) stringDelimiter() string {
+ c := p.s[0]
+ if c != '"' && c != '\'' {
+ p.errorf("invalid string literal")
+ return ""
+ }
+ // Look for triple.
+ if len(p.s) >= 3 && p.s[1] == c && p.s[2] == c {
+ return p.s[:3]
+ }
+ return p.s[:1]
+}
+
+// consumeStringContent consumes a string-like literal, including its delimiters.
+//
+// - delim is the opening/closing delimiter.
+// - raw is true if consuming a raw string.
+// - unicode is true if unicode escape sequence (\uXXXX or \UXXXXXXXX) are permitted.
+// - name identifies the name of the consuming token.
+//
+// It is designed for consuming string, bytes literals, and also backquoted identifiers.
+func (p *parser) consumeStringContent(delim string, raw, unicode bool, name string) (string, *parseError) {
+ // https://cloud.google.com/spanner/docs/lexical#string-and-bytes-literals
+
+ if len(delim) == 3 {
+ name = "triple-quoted " + name
+ }
+
+ i := len(delim)
+ var content []byte
+
+ for i < len(p.s) {
+ if strings.HasPrefix(p.s[i:], delim) {
+ i += len(delim)
+ p.s = p.s[i:]
+ p.offset += i
+ return string(content), nil
+ }
+
+ if p.s[i] == '\\' {
+ i++
+ if i >= len(p.s) {
+ return "", p.errorf("unclosed %s", name)
+ }
+
+ if raw {
+ content = append(content, '\\', p.s[i])
+ i++
+ continue
+ }
+
+ switch p.s[i] {
+ case 'a':
+ i++
+ content = append(content, '\a')
+ case 'b':
+ i++
+ content = append(content, '\b')
+ case 'f':
+ i++
+ content = append(content, '\f')
+ case 'n':
+ i++
+ content = append(content, '\n')
+ case 'r':
+ i++
+ content = append(content, '\r')
+ case 't':
+ i++
+ content = append(content, '\t')
+ case 'v':
+ i++
+ content = append(content, '\v')
+ case '\\':
+ i++
+ content = append(content, '\\')
+ case '?':
+ i++
+ content = append(content, '?')
+ case '"':
+ i++
+ content = append(content, '"')
+ case '\'':
+ i++
+ content = append(content, '\'')
+ case '`':
+ i++
+ content = append(content, '`')
+ case 'x', 'X':
+ i++
+ if !(i+1 < len(p.s) && isHexDigit(p.s[i]) && isHexDigit(p.s[i+1])) {
+ return "", p.errorf("illegal escape sequence: hex escape sequence must be followed by 2 hex digits")
+ }
+ c, err := strconv.ParseUint(p.s[i:i+2], 16, 8)
+ if err != nil {
+ return "", p.errorf("illegal escape sequence: invalid hex digits: %q: %v", p.s[i:i+2], err)
+ }
+ content = append(content, byte(c))
+ i += 2
+ case 'u', 'U':
+ t := p.s[i]
+ if !unicode {
+ return "", p.errorf("illegal escape sequence: \\%c", t)
+ }
+
+ i++
+ size := 4
+ if t == 'U' {
+ size = 8
+ }
+ if i+size-1 >= len(p.s) {
+ return "", p.errorf("illegal escape sequence: \\%c escape sequence must be followed by %d hex digits", t, size)
+ }
+ for j := 0; j < size; j++ {
+ if !isHexDigit(p.s[i+j]) {
+ return "", p.errorf("illegal escape sequence: \\%c escape sequence must be followed by %d hex digits", t, size)
+ }
+ }
+ c, err := strconv.ParseUint(p.s[i:i+size], 16, 64)
+ if err != nil {
+ return "", p.errorf("illegal escape sequence: invalid \\%c digits: %q: %v", t, p.s[i:i+size], err)
+ }
+ if 0xD800 <= c && c <= 0xDFFF || 0x10FFFF < c {
+ return "", p.errorf("illegal escape sequence: invalid codepoint: %x", c)
+ }
+ var buf [utf8.UTFMax]byte
+ n := utf8.EncodeRune(buf[:], rune(c))
+ content = append(content, buf[:n]...)
+ i += size
+ case '0', '1', '2', '3', '4', '5', '6', '7':
+ if !(i+2 < len(p.s) && isOctalDigit(p.s[i+1]) && isOctalDigit(p.s[i+2])) {
+ return "", p.errorf("illegal escape sequence: octal escape sequence must be followed by 3 octal digits")
+ }
+ c, err := strconv.ParseUint(p.s[i:i+3], 8, 64)
+ if err != nil {
+ return "", p.errorf("illegal escape sequence: invalid octal digits: %q: %v", p.s[i:i+3], err)
+ }
+ if c >= 256 {
+ return "", p.errorf("illegal escape sequence: octal digits overflow: %q (%d)", p.s[i:i+3], c)
+ }
+ content = append(content, byte(c))
+ i += 3
+ default:
+ return "", p.errorf("illegal escape sequence: \\%c", p.s[i])
+ }
+
+ continue
+ }
+
+ if p.s[i] == '\n' {
+ if len(delim) != 3 { // newline is only allowed inside triple-quoted.
+ return "", p.errorf("newline forbidden in %s", name)
+ }
+ p.line++
+ }
+
+ content = append(content, p.s[i])
+ i++
+ }
+
+ return "", p.errorf("unclosed %s", name)
+}
+
+var operators = map[string]bool{
+ // Arithmetic operators.
+ "-": true, // both unary and binary
+ "~": true,
+ "*": true,
+ "/": true,
+ "||": true,
+ "+": true,
+ "<<": true,
+ ">>": true,
+ "&": true,
+ "^": true,
+ "|": true,
+
+ // Comparison operators.
+ "<": true,
+ "<=": true,
+ ">": true,
+ ">=": true,
+ "=": true,
+ "!=": true,
+ "<>": true,
+}
+
+func isSpace(c byte) bool {
+ // Per https://cloud.google.com/spanner/docs/lexical, informally,
+ // whitespace is defined as "space, backspace, tab, newline".
+ switch c {
+ case ' ', '\b', '\t', '\n':
+ return true
+ }
+ return false
+}
+
+// skipSpace skips past any space or comments.
+func (p *parser) skipSpace() bool {
+ initLine := p.line
+ // If we start capturing a comment in this method,
+ // this is set to its comment value. Multi-line comments
+ // are only joined during a single skipSpace invocation.
+ var com *comment
+
+ i := 0
+ for i < len(p.s) {
+ if isSpace(p.s[i]) {
+ if p.s[i] == '\n' {
+ p.line++
+ }
+ i++
+ continue
+ }
+ // Comments.
+ marker, term := "", ""
+ if p.s[i] == '#' {
+ marker, term = "#", "\n"
+ } else if i+1 < len(p.s) && p.s[i] == '-' && p.s[i+1] == '-' {
+ marker, term = "--", "\n"
+ } else if i+1 < len(p.s) && p.s[i] == '/' && p.s[i+1] == '*' {
+ marker, term = "/*", "*/"
+ }
+ if term == "" {
+ break
+ }
+ // Search for the terminator, starting after the marker.
+ ti := strings.Index(p.s[i+len(marker):], term)
+ if ti < 0 {
+ p.errorf("unterminated comment")
+ return false
+ }
+ ti += len(marker) // make ti relative to p.s[i:]
+ if com != nil && (com.end.Line+1 < p.line || com.marker != marker) {
+ // There's a previous comment, but there's an
+ // intervening blank line, or the marker changed.
+ // Terminate the previous comment.
+ com = nil
+ }
+ if com == nil {
+ // New comment.
+ p.comments = append(p.comments, comment{
+ marker: marker,
+ isolated: (p.line != initLine) || p.line == 1,
+ start: Position{
+ Line: p.line,
+ Offset: p.offset + i,
+ },
+ })
+ com = &p.comments[len(p.comments)-1]
+ }
+ textLines := strings.Split(p.s[i+len(marker):i+ti], "\n")
+ com.text = append(com.text, textLines...)
+ com.end = Position{
+ Line: p.line + len(textLines) - 1,
+ Offset: p.offset + i + ti,
+ }
+ p.line = com.end.Line
+ if term == "\n" {
+ p.line++
+ }
+ i += ti + len(term)
+
+ // A non-isolated comment is always complete and doesn't get
+ // combined with any future comment.
+ if !com.isolated {
+ com = nil
+ }
+ }
+ p.s = p.s[i:]
+ p.offset += i
+ if p.s == "" {
+ p.done = true
+ }
+ return i > 0
+}
+
+// advance moves the parser to the next token, which will be available in p.cur.
+func (p *parser) advance() {
+ prevID := p.cur.typ == quotedID || p.cur.typ == unquotedID
+
+ p.skipSpace()
+ if p.done {
+ return
+ }
+
+ // If the previous token was an identifier (quoted or unquoted),
+ // the next token being a dot means this is a path expression (not a number).
+ if prevID && p.s[0] == '.' {
+ p.cur.err = nil
+ p.cur.line, p.cur.offset = p.line, p.offset
+ p.cur.typ = unknownToken
+ p.cur.value, p.s = p.s[:1], p.s[1:]
+ p.offset++
+ return
+ }
+
+ p.cur.err = nil
+ p.cur.line, p.cur.offset = p.line, p.offset
+ p.cur.typ = unknownToken
+ // TODO: struct literals
+ switch p.s[0] {
+ case ',', ';', '(', ')', '{', '}', '[', ']', '*', '+', '-':
+ // Single character symbol.
+ p.cur.value, p.s = p.s[:1], p.s[1:]
+ p.offset++
+ return
+ // String literal prefix.
+ case 'B', 'b', 'R', 'r', '"', '\'':
+ // "B", "b", "BR", "Rb" etc are valid string literal prefix, however "BB", "rR" etc are not.
+ raw, bytes := false, false
+ for i := 0; i < 4 && i < len(p.s); i++ {
+ switch {
+ case !raw && (p.s[i] == 'R' || p.s[i] == 'r'):
+ raw = true
+ continue
+ case !bytes && (p.s[i] == 'B' || p.s[i] == 'b'):
+ bytes = true
+ continue
+ case p.s[i] == '"' || p.s[i] == '\'':
+ switch {
+ case raw && bytes:
+ p.consumeRawBytes()
+ case raw:
+ p.consumeRawString()
+ case bytes:
+ p.consumeBytes()
+ default:
+ p.consumeString()
+ }
+ return
+ }
+ break
+ }
+ case '`':
+ // Quoted identifier.
+ p.cur.string, p.cur.err = p.consumeStringContent("`", false, true, "quoted identifier")
+ p.cur.typ = quotedID
+ return
+ }
+ if p.s[0] == '@' || isInitialIdentifierChar(p.s[0]) {
+ // Start consuming identifier.
+ i := 1
+ for i < len(p.s) && isIdentifierChar(p.s[i]) {
+ i++
+ }
+ p.cur.value, p.s = p.s[:i], p.s[i:]
+ p.cur.typ = unquotedID
+ p.offset += i
+ return
+ }
+ if len(p.s) >= 2 && p.s[0] == '.' && ('0' <= p.s[1] && p.s[1] <= '9') {
+ // dot followed by a digit.
+ p.consumeNumber()
+ return
+ }
+ if '0' <= p.s[0] && p.s[0] <= '9' {
+ p.consumeNumber()
+ return
+ }
+
+ // Look for operator (two or one bytes).
+ for i := 2; i >= 1; i-- {
+ if i <= len(p.s) && operators[p.s[:i]] {
+ p.cur.value, p.s = p.s[:i], p.s[i:]
+ p.offset += i
+ return
+ }
+ }
+
+ p.errorf("unexpected byte %#x", p.s[0])
+}
+
+// back steps the parser back one token. It cannot be called twice in succession.
+func (p *parser) back() {
+ if p.backed {
+ panic("parser backed up twice")
+ }
+ p.done = false
+ p.backed = true
+ // If an error was being recovered, we wish to ignore the error.
+ // Don't do that for eof since that'll be returned next.
+ if p.cur.err != eof {
+ p.cur.err = nil
+ }
+}
+
+// next returns the next token.
+func (p *parser) next() *token {
+ if p.backed || p.done {
+ p.backed = false
+ return &p.cur
+ }
+ p.advance()
+ if p.done && p.cur.err == nil {
+ p.cur.value = ""
+ p.cur.err = eof
+ }
+ debugf("parser·next(): returning [%v] [err: %v] @l%d,o%d", p.cur.value, p.cur.err, p.cur.line, p.cur.offset)
+ return &p.cur
+}
+
+// caseEqual reports whether the token is valid, not a quoted identifier, and
+// equal to the provided string under a case insensitive comparison.
+// Use this (or sniff/eat/expect) instead of comparing a string directly for keywords, etc.
+func (t *token) caseEqual(x string) bool {
+ return t.err == nil && t.typ != quotedID && strings.EqualFold(t.value, x)
+}
+
+// sniff reports whether the next N tokens are as specified.
+func (p *parser) sniff(want ...string) bool {
+ // Store current parser state and restore on the way out.
+ orig := *p
+ defer func() { *p = orig }()
+
+ for _, w := range want {
+ if !p.next().caseEqual(w) {
+ return false
+ }
+ }
+ return true
+}
+
+// sniffTokenType reports whether the next token type is as specified.
+func (p *parser) sniffTokenType(want tokenType) bool {
+ orig := *p
+ defer func() { *p = orig }()
+
+ if p.next().typ == want {
+ return true
+ }
+ return false
+}
+
+// eat reports whether the next N tokens are as specified,
+// then consumes them.
+func (p *parser) eat(want ...string) bool {
+ // Store current parser state so we can restore if we get a failure.
+ orig := *p
+
+ for _, w := range want {
+ if !p.next().caseEqual(w) {
+ // Mismatch.
+ *p = orig
+ return false
+ }
+ }
+ return true
+}
+
+func (p *parser) expect(want ...string) *parseError {
+ for _, w := range want {
+ tok := p.next()
+ if tok.err != nil {
+ return tok.err
+ }
+ if !tok.caseEqual(w) {
+ return p.errorf("got %q while expecting %q", tok.value, w)
+ }
+ }
+ return nil
+}
+
+func (p *parser) parseDDLStmt() (DDLStmt, *parseError) {
+ debugf("parseDDLStmt: %v", p)
+
+ /*
+ statement:
+ { create_database | create_table | create_index | alter_table | drop_table | rename_table | drop_index | create_change_stream | alter_change_stream | drop_change_stream }
+ */
+
+ // TODO: support create_database
+
+ if p.sniff("CREATE", "TABLE") {
+ ct, err := p.parseCreateTable()
+ return ct, err
+ } else if p.sniff("CREATE", "INDEX") || p.sniff("CREATE", "UNIQUE", "INDEX") || p.sniff("CREATE", "NULL_FILTERED", "INDEX") || p.sniff("CREATE", "UNIQUE", "NULL_FILTERED", "INDEX") {
+ ci, err := p.parseCreateIndex()
+ return ci, err
+ } else if p.sniff("CREATE", "VIEW") || p.sniff("CREATE", "OR", "REPLACE", "VIEW") {
+ cv, err := p.parseCreateView()
+ return cv, err
+ } else if p.sniff("CREATE", "ROLE") {
+ cr, err := p.parseCreateRole()
+ return cr, err
+ } else if p.sniff("ALTER", "TABLE") {
+ a, err := p.parseAlterTable()
+ return a, err
+ } else if p.eat("DROP") {
+ pos := p.Pos()
+ // These statements are simple.
+ // DROP TABLE [ IF EXISTS ] table_name
+ // DROP INDEX [ IF EXISTS ] index_name
+ // DROP VIEW view_name
+ // DROP ROLE role_name
+ // DROP CHANGE STREAM change_stream_name
+ tok := p.next()
+ if tok.err != nil {
+ return nil, tok.err
+ }
+ switch {
+ default:
+ return nil, p.errorf("got %q, want TABLE, VIEW, INDEX or CHANGE", tok.value)
+ case tok.caseEqual("TABLE"):
+ var ifExists bool
+ if p.eat("IF", "EXISTS") {
+ ifExists = true
+ }
+ name, err := p.parseTableOrIndexOrColumnName()
+ if err != nil {
+ return nil, err
+ }
+ return &DropTable{Name: name, IfExists: ifExists, Position: pos}, nil
+ case tok.caseEqual("INDEX"):
+ var ifExists bool
+ if p.eat("IF", "EXISTS") {
+ ifExists = true
+ }
+ name, err := p.parseTableOrIndexOrColumnName()
+ if err != nil {
+ return nil, err
+ }
+ return &DropIndex{Name: name, IfExists: ifExists, Position: pos}, nil
+ case tok.caseEqual("VIEW"):
+ name, err := p.parseTableOrIndexOrColumnName()
+ if err != nil {
+ return nil, err
+ }
+ return &DropView{Name: name, Position: pos}, nil
+ case tok.caseEqual("ROLE"):
+ name, err := p.parseTableOrIndexOrColumnName()
+ if err != nil {
+ return nil, err
+ }
+ return &DropRole{Name: name, Position: pos}, nil
+ case tok.caseEqual("CHANGE"):
+ if err := p.expect("STREAM"); err != nil {
+ return nil, err
+ }
+ name, err := p.parseTableOrIndexOrColumnName()
+ if err != nil {
+ return nil, err
+ }
+ return &DropChangeStream{Name: name, Position: pos}, nil
+ case tok.caseEqual("SEQUENCE"):
+ var ifExists bool
+ if p.eat("IF", "EXISTS") {
+ ifExists = true
+ }
+ name, err := p.parseTableOrIndexOrColumnName()
+ if err != nil {
+ return nil, err
+ }
+ return &DropSequence{Name: name, IfExists: ifExists, Position: pos}, nil
+ }
+ } else if p.sniff("RENAME", "TABLE") {
+ a, err := p.parseRenameTable()
+ return a, err
+ } else if p.sniff("ALTER", "DATABASE") {
+ a, err := p.parseAlterDatabase()
+ return a, err
+ } else if p.eat("GRANT") {
+ a, err := p.parseGrantRole()
+ return a, err
+ } else if p.eat("REVOKE") {
+ a, err := p.parseRevokeRole()
+ return a, err
+ } else if p.sniff("CREATE", "CHANGE", "STREAM") {
+ cs, err := p.parseCreateChangeStream()
+ return cs, err
+ } else if p.sniff("ALTER", "CHANGE", "STREAM") {
+ acs, err := p.parseAlterChangeStream()
+ return acs, err
+ } else if p.sniff("ALTER", "STATISTICS") {
+ as, err := p.parseAlterStatistics()
+ return as, err
+ } else if p.sniff("ALTER", "INDEX") {
+ ai, err := p.parseAlterIndex()
+ return ai, err
+ } else if p.sniff("CREATE", "SEQUENCE") {
+ cs, err := p.parseCreateSequence()
+ return cs, err
+ } else if p.sniff("ALTER", "SEQUENCE") {
+ as, err := p.parseAlterSequence()
+ return as, err
+ }
+
+ return nil, p.errorf("unknown DDL statement")
+}
+
+func (p *parser) parseCreateTable() (*CreateTable, *parseError) {
+ debugf("parseCreateTable: %v", p)
+
+ /*
+ CREATE TABLE [ IF NOT EXISTS ] table_name(
+ [column_def, ...] [ table_constraint, ...] [ synonym ] )
+ primary_key [, cluster]
+
+ synonym:
+ SYNONYM (name)
+
+ primary_key:
+ PRIMARY KEY ( [key_part, ...] )
+
+ cluster:
+ INTERLEAVE IN PARENT table_name [ ON DELETE { CASCADE | NO ACTION } ]
+ */
+ var ifNotExists bool
+
+ if err := p.expect("CREATE"); err != nil {
+ return nil, err
+ }
+ pos := p.Pos()
+ if err := p.expect("TABLE"); err != nil {
+ return nil, err
+ }
+ if p.eat("IF", "NOT", "EXISTS") {
+ ifNotExists = true
+ }
+ tname, err := p.parseTableOrIndexOrColumnName()
+ if err != nil {
+ return nil, err
+ }
+
+ ct := &CreateTable{Name: tname, Position: pos, IfNotExists: ifNotExists}
+ err = p.parseCommaList("(", ")", func(p *parser) *parseError {
+ if p.sniffTableConstraint() {
+ tc, err := p.parseTableConstraint()
+ if err != nil {
+ return err
+ }
+ ct.Constraints = append(ct.Constraints, tc)
+ return nil
+ }
+
+ if p.sniffTableSynonym() {
+ ts, err := p.parseTableSynonym()
+ if err != nil {
+ return err
+ }
+ ct.Synonym = ts
+ return nil
+ }
+
+ cd, err := p.parseColumnDef()
+ if err != nil {
+ return err
+ }
+ ct.Columns = append(ct.Columns, cd)
+ return nil
+ })
+ if err != nil {
+ return nil, err
+ }
+
+ if err := p.expect("PRIMARY"); err != nil {
+ return nil, err
+ }
+ if err := p.expect("KEY"); err != nil {
+ return nil, err
+ }
+ ct.PrimaryKey, err = p.parseKeyPartList()
+ if err != nil {
+ return nil, err
+ }
+
+ if p.eat(",", "INTERLEAVE") {
+ if err := p.expect("IN"); err != nil {
+ return nil, err
+ }
+ if err := p.expect("PARENT"); err != nil {
+ return nil, err
+ }
+ pname, err := p.parseTableOrIndexOrColumnName()
+ if err != nil {
+ return nil, err
+ }
+ ct.Interleave = &Interleave{
+ Parent: pname,
+ OnDelete: NoActionOnDelete,
+ }
+ // The ON DELETE clause is optional; it defaults to NoActionOnDelete.
+ if p.eat("ON", "DELETE") {
+ od, err := p.parseOnDelete()
+ if err != nil {
+ return nil, err
+ }
+ ct.Interleave.OnDelete = od
+ }
+ }
+ if p.eat(",", "ROW", "DELETION", "POLICY") {
+ rdp, err := p.parseRowDeletionPolicy()
+ if err != nil {
+ return nil, err
+ }
+ ct.RowDeletionPolicy = &rdp
+ }
+
+ return ct, nil
+}
+
+func (p *parser) sniffTableConstraint() bool {
+ // Unfortunately the Cloud Spanner grammar is LL(3) because
+ // CONSTRAINT BOOL
+ // could be the start of a declaration of a column called "CONSTRAINT" of boolean type,
+ // or it could be the start of a foreign key constraint called "BOOL".
+ // We have to sniff up to the third token to see what production it is.
+ // If we have "FOREIGN" and "KEY" (or "CHECK"), this is an unnamed table constraint.
+ // If we have "CONSTRAINT", an identifier and "FOREIGN" (or "CHECK"), this is a table constraint.
+ // Otherwise, this is a column definition.
+
+ if p.sniff("FOREIGN", "KEY") || p.sniff("CHECK") {
+ return true
+ }
+
+ // Store parser state, and peek ahead.
+ // Restore on the way out.
+ orig := *p
+ defer func() { *p = orig }()
+
+ if !p.eat("CONSTRAINT") {
+ return false
+ }
+ if _, err := p.parseTableOrIndexOrColumnName(); err != nil {
+ return false
+ }
+ return p.sniff("FOREIGN") || p.sniff("CHECK")
+}
+
+func (p *parser) sniffTableSynonym() bool {
+ return p.sniff("SYNONYM")
+}
+
+func (p *parser) parseTableSynonym() (ID, *parseError) {
+ debugf("parseTableSynonym: %v", p)
+
+ /*
+ table_synonym:
+ SYNONYM ( name )
+ */
+
+ if err := p.expect("SYNONYM"); err != nil {
+ return "", err
+ }
+ if err := p.expect("("); err != nil {
+ return "", err
+ }
+ name, err := p.parseTableOrIndexOrColumnName()
+ if err != nil {
+ return "", err
+ }
+ if err := p.expect(")"); err != nil {
+ return "", err
+ }
+
+ return name, nil
+}
+
+func (p *parser) parseCreateIndex() (*CreateIndex, *parseError) {
+ debugf("parseCreateIndex: %v", p)
+
+ /*
+ CREATE [UNIQUE] [NULL_FILTERED] INDEX [IF NOT EXISTS] index_name
+ ON table_name ( key_part [, ...] ) [ storing_clause ] [ , interleave_clause ]
+
+ index_name:
+ {a—z|A—Z}[{a—z|A—Z|0—9|_}+]
+
+ storing_clause:
+ STORING ( column_name [, ...] )
+
+ interleave_clause:
+ INTERLEAVE IN table_name
+ */
+
+ var unique, nullFiltered, ifNotExists bool
+
+ if err := p.expect("CREATE"); err != nil {
+ return nil, err
+ }
+ pos := p.Pos()
+ if p.eat("UNIQUE") {
+ unique = true
+ }
+ if p.eat("NULL_FILTERED") {
+ nullFiltered = true
+ }
+ if err := p.expect("INDEX"); err != nil {
+ return nil, err
+ }
+ if p.eat("IF", "NOT", "EXISTS") {
+ ifNotExists = true
+ }
+ iname, err := p.parseTableOrIndexOrColumnName()
+ if err != nil {
+ return nil, err
+ }
+ if err := p.expect("ON"); err != nil {
+ return nil, err
+ }
+ tname, err := p.parseTableOrIndexOrColumnName()
+ if err != nil {
+ return nil, err
+ }
+ ci := &CreateIndex{
+ Name: iname,
+ Table: tname,
+
+ Unique: unique,
+ NullFiltered: nullFiltered,
+ IfNotExists: ifNotExists,
+
+ Position: pos,
+ }
+ ci.Columns, err = p.parseKeyPartList()
+ if err != nil {
+ return nil, err
+ }
+
+ if p.eat("STORING") {
+ ci.Storing, err = p.parseColumnNameList()
+ if err != nil {
+ return nil, err
+ }
+ }
+
+ if p.eat(",", "INTERLEAVE", "IN") {
+ ci.Interleave, err = p.parseTableOrIndexOrColumnName()
+ if err != nil {
+ return nil, err
+ }
+ }
+
+ return ci, nil
+}
+
+func (p *parser) parseCreateView() (*CreateView, *parseError) {
+ debugf("parseCreateView: %v", p)
+
+ /*
+ { CREATE VIEW | CREATE OR REPLACE VIEW } view_name
+ SQL SECURITY {INVOKER | DEFINER}
+ AS query
+ */
+
+ var orReplace bool
+
+ if err := p.expect("CREATE"); err != nil {
+ return nil, err
+ }
+ pos := p.Pos()
+ if p.eat("OR", "REPLACE") {
+ orReplace = true
+ }
+ if err := p.expect("VIEW"); err != nil {
+ return nil, err
+ }
+ vname, err := p.parseTableOrIndexOrColumnName()
+ if err != nil {
+ return nil, err
+ }
+ if err := p.expect("SQL", "SECURITY"); err != nil {
+ return nil, err
+ }
+ tok := p.next()
+ if tok.err != nil {
+ return nil, tok.err
+ }
+ var securityType SecurityType
+ switch {
+ case tok.caseEqual("INVOKER"):
+ securityType = Invoker
+ case tok.caseEqual("DEFINER"):
+ securityType = Definer
+ default:
+ return nil, p.errorf("got %q, want INVOKER or DEFINER", tok.value)
+ }
+ if err := p.expect("AS"); err != nil {
+ return nil, err
+ }
+ query, err := p.parseQuery()
+ if err != nil {
+ return nil, err
+ }
+
+ return &CreateView{
+ Name: vname,
+ OrReplace: orReplace,
+ SecurityType: securityType,
+ Query: query,
+
+ Position: pos,
+ }, nil
+}
+
+func (p *parser) parseCreateRole() (*CreateRole, *parseError) {
+ debugf("parseCreateRole: %v", p)
+
+ /*
+ CREATE ROLE database_role_name
+ */
+
+ if err := p.expect("CREATE"); err != nil {
+ return nil, err
+ }
+ pos := p.Pos()
+ if err := p.expect("ROLE"); err != nil {
+ return nil, err
+ }
+ rname, err := p.parseTableOrIndexOrColumnName()
+ if err != nil {
+ return nil, err
+ }
+ cr := &CreateRole{
+ Name: rname,
+
+ Position: pos,
+ }
+
+ return cr, nil
+}
+
+func (p *parser) parseGrantRole() (*GrantRole, *parseError) {
+ pos := p.Pos()
+ g := &GrantRole{
+ Position: pos,
+ }
+ if p.eat("ROLE") {
+ roleList, err := p.parseGrantOrRevokeRoleList("TO")
+ if err != nil {
+ return nil, err
+ }
+ g.GrantRoleNames = roleList
+ } else if p.eat("EXECUTE", "ON", "TABLE", "FUNCTION") {
+ tvfList, err := p.parseGrantOrRevokeRoleList("TO")
+ if err != nil {
+ return nil, err
+ }
+ g.TvfNames = tvfList
+ } else if p.eat("SELECT", "ON", "VIEW") {
+ viewList, err := p.parseGrantOrRevokeRoleList("TO")
+ if err != nil {
+ return nil, err
+ }
+ g.ViewNames = viewList
+ } else if p.eat("SELECT", "ON", "CHANGE", "STREAM") {
+ csList, err := p.parseGrantOrRevokeRoleList("TO")
+ if err != nil {
+ return nil, err
+ }
+ g.ChangeStreamNames = csList
+ } else {
+ var privs []Privilege
+ privs, err := p.parsePrivileges()
+ if err != nil {
+ return nil, err
+ }
+ g.Privileges = privs
+ var tableList []ID
+ f := func(p *parser) *parseError {
+ table, err := p.parseTableOrIndexOrColumnName()
+ if err != nil {
+ return err
+ }
+ tableList = append(tableList, table)
+ return nil
+ }
+ if err := p.parseCommaListWithEnds(f, "TO", "ROLE"); err != nil {
+ return nil, err
+ }
+ g.TableNames = tableList
+ }
+ list, err := p.parseIDList()
+ if err != nil {
+ return nil, err
+ }
+ g.ToRoleNames = list
+
+ return g, nil
+}
+
+func (p *parser) parseRevokeRole() (*RevokeRole, *parseError) {
+ pos := p.Pos()
+ r := &RevokeRole{
+ Position: pos,
+ }
+ if p.eat("ROLE") {
+ roleList, err := p.parseGrantOrRevokeRoleList("FROM")
+ if err != nil {
+ return nil, err
+ }
+ r.RevokeRoleNames = roleList
+ } else if p.eat("EXECUTE", "ON", "TABLE", "FUNCTION") {
+ tvfList, err := p.parseGrantOrRevokeRoleList("FROM")
+ if err != nil {
+ return nil, err
+ }
+ r.TvfNames = tvfList
+ } else if p.eat("SELECT", "ON", "VIEW") {
+ viewList, err := p.parseGrantOrRevokeRoleList("FROM")
+ if err != nil {
+ return nil, err
+ }
+ r.ViewNames = viewList
+ } else if p.eat("SELECT", "ON", "CHANGE", "STREAM") {
+ csList, err := p.parseGrantOrRevokeRoleList("FROM")
+ if err != nil {
+ return nil, err
+ }
+ r.ChangeStreamNames = csList
+ } else {
+ var privs []Privilege
+ privs, err := p.parsePrivileges()
+ if err != nil {
+ return nil, err
+ }
+ r.Privileges = privs
+ var tableList []ID
+ f := func(p *parser) *parseError {
+ table, err := p.parseTableOrIndexOrColumnName()
+ if err != nil {
+ return err
+ }
+ tableList = append(tableList, table)
+ return nil
+ }
+ if err := p.parseCommaListWithEnds(f, "FROM", "ROLE"); err != nil {
+ return nil, err
+ }
+ r.TableNames = tableList
+ }
+ list, err := p.parseIDList()
+ if err != nil {
+ return nil, err
+ }
+ r.FromRoleNames = list
+
+ return r, nil
+}
+func (p *parser) parseGrantOrRevokeRoleList(end string) ([]ID, *parseError) {
+ var roleList []ID
+ f := func(p *parser) *parseError {
+ role, err := p.parseTableOrIndexOrColumnName()
+ if err != nil {
+ return err
+ }
+ roleList = append(roleList, role)
+ return nil
+ }
+ err := p.parseCommaListWithEnds(f, end, "ROLE")
+ if err != nil {
+ return nil, err
+ }
+ return roleList, nil
+}
+
+func (p *parser) parsePrivileges() ([]Privilege, *parseError) {
+ var privs []Privilege
+ for {
+ tok := p.next()
+ if tok.err != nil {
+ return []Privilege{}, tok.err
+ }
+
+ priv := Privilege{}
+ switch {
+ default:
+ return []Privilege{}, p.errorf("got %q, want SELECT or UPDATE or INSERT or DELETE", tok.value)
+ case tok.caseEqual("SELECT"):
+ priv.Type = PrivilegeTypeSelect
+ case tok.caseEqual("UPDATE"):
+ priv.Type = PrivilegeTypeUpdate
+ case tok.caseEqual("INSERT"):
+ priv.Type = PrivilegeTypeInsert
+ case tok.caseEqual("DELETE"):
+ priv.Type = PrivilegeTypeDelete
+ }
+ // can grant DELETE only at the table level.
+ // https://cloud.google.com/spanner/docs/reference/standard-sql/data-definition-language#notes_and_restrictions
+ if p.sniff("(") && !tok.caseEqual("DELETE") {
+ list, err := p.parseColumnNameList()
+ if err != nil {
+ return nil, err
+ }
+ priv.Columns = list
+ }
+ privs = append(privs, priv)
+ tok = p.next()
+ if tok.err != nil {
+ return []Privilege{}, tok.err
+ }
+ if tok.value == "," {
+ continue
+ } else if tok.caseEqual("ON") && p.eat("TABLE") {
+ break
+ } else {
+ return []Privilege{}, p.errorf("got %q, want , or ON TABLE", tok.value)
+ }
+ }
+ return privs, nil
+}
+func (p *parser) parseAlterTable() (*AlterTable, *parseError) {
+ debugf("parseAlterTable: %v", p)
+
+ /*
+ alter_table:
+ ALTER TABLE table_name { table_alteration | table_column_alteration }
+
+ table_alteration:
+ { ADD [ COLUMN ] [ IF NOT EXISTS ] column_def
+ | DROP [ COLUMN ] column_name
+ | ADD table_constraint
+ | DROP CONSTRAINT constraint_name
+ | SET ON DELETE { CASCADE | NO ACTION }
+ | ADD SYNONYM synonym_name
+ | DROP SYNONYM synonym_name
+ | RENAME TO new_table_name }
+
+ table_column_alteration:
+ ALTER [ COLUMN ] column_name { { scalar_type | array_type } [NOT NULL] | SET options_def }
+ */
+
+ if err := p.expect("ALTER"); err != nil {
+ return nil, err
+ }
+ pos := p.Pos()
+ if err := p.expect("TABLE"); err != nil {
+ return nil, err
+ }
+ tname, err := p.parseTableOrIndexOrColumnName()
+ if err != nil {
+ return nil, err
+ }
+ a := &AlterTable{Name: tname, Position: pos}
+
+ tok := p.next()
+ if tok.err != nil {
+ return nil, tok.err
+ }
+ switch {
+ default:
+ return nil, p.errorf("got %q, expected ADD or DROP or SET or ALTER", tok.value)
+ case tok.caseEqual("ADD"):
+ if p.sniff("CONSTRAINT") || p.sniff("FOREIGN") || p.sniff("CHECK") {
+ tc, err := p.parseTableConstraint()
+ if err != nil {
+ return nil, err
+ }
+ a.Alteration = AddConstraint{Constraint: tc}
+ return a, nil
+ }
+
+ if p.eat("ROW", "DELETION", "POLICY") {
+ rdp, err := p.parseRowDeletionPolicy()
+ if err != nil {
+ return nil, err
+ }
+ a.Alteration = AddRowDeletionPolicy{RowDeletionPolicy: rdp}
+ return a, nil
+ }
+
+ // TODO: "COLUMN" is optional. A column named SYNONYM is allowed.
+ if p.eat("SYNONYM") {
+ synonym, err := p.parseTableOrIndexOrColumnName()
+ if err != nil {
+ return nil, err
+ }
+ a.Alteration = AddSynonym{Name: synonym}
+ return a, nil
+ }
+
+ // TODO: "COLUMN" is optional.
+ if err := p.expect("COLUMN"); err != nil {
+ return nil, err
+ }
+ var ifNotExists bool
+ if p.eat("IF", "NOT", "EXISTS") {
+ ifNotExists = true
+ }
+ cd, err := p.parseColumnDef()
+ if err != nil {
+ return nil, err
+ }
+ a.Alteration = AddColumn{Def: cd, IfNotExists: ifNotExists}
+ return a, nil
+ case tok.caseEqual("DROP"):
+ if p.eat("CONSTRAINT") {
+ name, err := p.parseTableOrIndexOrColumnName()
+ if err != nil {
+ return nil, err
+ }
+ a.Alteration = DropConstraint{Name: name}
+ return a, nil
+ }
+
+ if p.eat("ROW", "DELETION", "POLICY") {
+ a.Alteration = DropRowDeletionPolicy{}
+ return a, nil
+ }
+
+ // TODO: "COLUMN" is optional. A column named SYNONYM is allowed.
+ if p.eat("SYNONYM") {
+ synonym, err := p.parseTableOrIndexOrColumnName()
+ if err != nil {
+ return nil, err
+ }
+ a.Alteration = DropSynonym{Name: synonym}
+ return a, nil
+ }
+
+ // TODO: "COLUMN" is optional.
+ if err := p.expect("COLUMN"); err != nil {
+ return nil, err
+ }
+ name, err := p.parseTableOrIndexOrColumnName()
+ if err != nil {
+ return nil, err
+ }
+ a.Alteration = DropColumn{Name: name}
+ return a, nil
+ case tok.caseEqual("SET"):
+ if err := p.expect("ON"); err != nil {
+ return nil, err
+ }
+ if err := p.expect("DELETE"); err != nil {
+ return nil, err
+ }
+ od, err := p.parseOnDelete()
+ if err != nil {
+ return nil, err
+ }
+ a.Alteration = SetOnDelete{Action: od}
+ return a, nil
+ case tok.caseEqual("ALTER"):
+ // TODO: "COLUMN" is optional.
+ if err := p.expect("COLUMN"); err != nil {
+ return nil, err
+ }
+ name, err := p.parseTableOrIndexOrColumnName()
+ if err != nil {
+ return nil, err
+ }
+ ca, err := p.parseColumnAlteration()
+ if err != nil {
+ return nil, err
+ }
+ a.Alteration = AlterColumn{
+ Name: name,
+ Alteration: ca,
+ }
+ return a, nil
+ case tok.caseEqual("REPLACE"):
+ if p.eat("ROW", "DELETION", "POLICY") {
+ rdp, err := p.parseRowDeletionPolicy()
+ if err != nil {
+ return nil, err
+ }
+ a.Alteration = ReplaceRowDeletionPolicy{RowDeletionPolicy: rdp}
+ return a, nil
+ }
+ case tok.caseEqual("RENAME"):
+ if p.eat("TO") {
+ newName, err := p.parseTableOrIndexOrColumnName()
+ if err != nil {
+ return nil, err
+ }
+ rt := RenameTo{ToName: newName}
+ if p.eat(",", "ADD", "SYNONYM") {
+ synonym, err := p.parseTableOrIndexOrColumnName()
+ if err != nil {
+ return nil, err
+ }
+ rt.Synonym = synonym
+ }
+ a.Alteration = rt
+ return a, nil
+ }
+ }
+ return a, nil
+}
+
+func (p *parser) parseRenameTable() (*RenameTable, *parseError) {
+ debugf("parseRenameTable: %v", p)
+
+ /*
+ RENAME TABLE table_name TO new_name [, table_name2 TO new_name2, ...]
+ */
+
+ if err := p.expect("RENAME"); err != nil {
+ return nil, err
+ }
+ pos := p.Pos()
+ if err := p.expect("TABLE"); err != nil {
+ return nil, err
+ }
+ rt := &RenameTable{
+ Position: pos,
+ }
+
+ var renameOps []TableRenameOp
+ for {
+ fromName, err := p.parseTableOrIndexOrColumnName()
+ if err != nil {
+ return nil, err
+ }
+ if err := p.expect("TO"); err != nil {
+ return nil, err
+ }
+ toName, err := p.parseTableOrIndexOrColumnName()
+ if err != nil {
+ return nil, err
+ }
+ renameOps = append(renameOps, TableRenameOp{FromName: fromName, ToName: toName})
+
+ tok := p.next()
+ if tok.err != nil {
+ if tok.err == eof {
+ break
+ }
+ return nil, tok.err
+ } else if tok.value == "," {
+ continue
+ } else if tok.value == ";" {
+ break
+ } else {
+ return nil, p.errorf("unexpected token %q", tok.value)
+ }
+ }
+ rt.TableRenameOps = renameOps
+ return rt, nil
+}
+
+func (p *parser) parseAlterDatabase() (*AlterDatabase, *parseError) {
+ debugf("parseAlterDatabase: %v", p)
+
+ /*
+ ALTER DATABASE database_id
+ action
+
+ where database_id is:
+ {a—z}[{a—z|0—9|_|-}+]{a—z|0—9}
+
+ and action is:
+ SET OPTIONS ( optimizer_version = { 1 ... 2 | null },
+ version_retention_period = { 'duration' | null } )
+ */
+
+ if err := p.expect("ALTER"); err != nil {
+ return nil, err
+ }
+ pos := p.Pos()
+ if err := p.expect("DATABASE"); err != nil {
+ return nil, err
+ }
+ // This is not 100% correct as database identifiers have slightly more
+ // restrictions than table names, but the restrictions are currently not
+ // applied in the spansql parser.
+ // TODO: Apply restrictions for all identifiers.
+ dbname, err := p.parseTableOrIndexOrColumnName()
+ if err != nil {
+ return nil, err
+ }
+ a := &AlterDatabase{Name: dbname, Position: pos}
+
+ tok := p.next()
+ if tok.err != nil {
+ return nil, tok.err
+ }
+ switch {
+ default:
+ return nil, p.errorf("got %q, expected SET", tok.value)
+ case tok.caseEqual("SET"):
+ options, err := p.parseDatabaseOptions()
+ if err != nil {
+ return nil, err
+ }
+ a.Alteration = SetDatabaseOptions{Options: options}
+ return a, nil
+ }
+}
+
+func (p *parser) parseDMLStmt() (DMLStmt, *parseError) {
+ debugf("parseDMLStmt: %v", p)
+
+ /*
+ DELETE [FROM] target_name [[AS] alias]
+ WHERE condition
+
+ UPDATE target_name [[AS] alias]
+ SET update_item [, ...]
+ WHERE condition
+
+ update_item: path_expression = expression | path_expression = DEFAULT
+
+ INSERT [INTO] target_name
+ (column_name_1 [, ..., column_name_n] )
+ input
+
+ input:
+ VALUES (row_1_column_1_expr [, ..., row_1_column_n_expr ] )
+ [, ..., (row_k_column_1_expr [, ..., row_k_column_n_expr ] ) ]
+ | select_query
+
+ expr: value_expression | DEFAULT
+ */
+
+ if p.eat("DELETE") {
+ p.eat("FROM") // optional
+ tname, err := p.parseTableOrIndexOrColumnName()
+ if err != nil {
+ return nil, err
+ }
+ // TODO: parse alias.
+ if err := p.expect("WHERE"); err != nil {
+ return nil, err
+ }
+ where, err := p.parseBoolExpr()
+ if err != nil {
+ return nil, err
+ }
+ return &Delete{
+ Table: tname,
+ Where: where,
+ }, nil
+ }
+
+ if p.eat("UPDATE") {
+ tname, err := p.parseTableOrIndexOrColumnName()
+ if err != nil {
+ return nil, err
+ }
+ u := &Update{
+ Table: tname,
+ }
+ // TODO: parse alias.
+ if err := p.expect("SET"); err != nil {
+ return nil, err
+ }
+ for {
+ ui, err := p.parseUpdateItem()
+ if err != nil {
+ return nil, err
+ }
+ u.Items = append(u.Items, ui)
+ if p.eat(",") {
+ continue
+ }
+ break
+ }
+ if err := p.expect("WHERE"); err != nil {
+ return nil, err
+ }
+ where, err := p.parseBoolExpr()
+ if err != nil {
+ return nil, err
+ }
+ u.Where = where
+ return u, nil
+ }
+
+ if p.eat("INSERT") {
+ p.eat("INTO") // optional
+ tname, err := p.parseTableOrIndexOrColumnName()
+ if err != nil {
+ return nil, err
+ }
+
+ columns, err := p.parseColumnNameList()
+ if err != nil {
+ return nil, err
+ }
+
+ var input ValuesOrSelect
+ if p.eat("VALUES") {
+ values := make([][]Expr, 0)
+ for {
+ exprs, err := p.parseParenExprList()
+ if err != nil {
+ return nil, err
+ }
+ values = append(values, exprs)
+ if !p.eat(",") {
+ break
+ }
+ }
+ input = Values(values)
+ } else {
+ input, err = p.parseSelect()
+ if err != nil {
+ return nil, err
+ }
+ }
+
+ return &Insert{
+ Table: tname,
+ Columns: columns,
+ Input: input,
+ }, nil
+ }
+
+ return nil, p.errorf("unknown DML statement")
+}
+
+func (p *parser) parseUpdateItem() (UpdateItem, *parseError) {
+ col, err := p.parseTableOrIndexOrColumnName()
+ if err != nil {
+ return UpdateItem{}, err
+ }
+ ui := UpdateItem{
+ Column: col,
+ }
+ if err := p.expect("="); err != nil {
+ return UpdateItem{}, err
+ }
+ if p.eat("DEFAULT") {
+ return ui, nil
+ }
+ ui.Value, err = p.parseExpr()
+ if err != nil {
+ return UpdateItem{}, err
+ }
+ return ui, nil
+}
+
+func (p *parser) parseColumnDef() (ColumnDef, *parseError) {
+ debugf("parseColumnDef: %v", p)
+
+ /*
+ column_def:
+ column_name {scalar_type | array_type} [NOT NULL] [{DEFAULT ( expression ) | AS ( expression ) STORED}] [options_def]
+ */
+
+ name, err := p.parseTableOrIndexOrColumnName()
+ if err != nil {
+ return ColumnDef{}, err
+ }
+
+ cd := ColumnDef{Name: name, Position: p.Pos()}
+
+ cd.Type, err = p.parseType()
+ if err != nil {
+ return ColumnDef{}, err
+ }
+
+ if p.eat("NOT", "NULL") {
+ cd.NotNull = true
+ }
+
+ if p.eat("DEFAULT", "(") {
+ cd.Default, err = p.parseExpr()
+ if err != nil {
+ return ColumnDef{}, err
+ }
+ if err := p.expect(")"); err != nil {
+ return ColumnDef{}, err
+ }
+ }
+
+ if p.eat("AS", "(") {
+ cd.Generated, err = p.parseExpr()
+ if err != nil {
+ return ColumnDef{}, err
+ }
+ if err := p.expect(")"); err != nil {
+ return ColumnDef{}, err
+ }
+ if err := p.expect("STORED"); err != nil {
+ return ColumnDef{}, err
+ }
+ }
+
+ if p.sniff("OPTIONS") {
+ cd.Options, err = p.parseColumnOptions()
+ if err != nil {
+ return ColumnDef{}, err
+ }
+ }
+
+ return cd, nil
+}
+
+func (p *parser) parseColumnAlteration() (ColumnAlteration, *parseError) {
+ debugf("parseColumnAlteration: %v", p)
+ /*
+ {
+ data_type [ NOT NULL ] [ DEFAULT ( expression ) ]
+ | SET ( options_def )
+ | SET DEFAULT ( expression )
+ | DROP DEFAULT
+ }
+ */
+
+ if p.eat("SET", "DEFAULT", "(") {
+ d, err := p.parseExpr()
+ if err != nil {
+ return nil, err
+ }
+ if err := p.expect(")"); err != nil {
+ return nil, err
+ }
+ return SetDefault{Default: d}, nil
+ }
+
+ if p.eat("DROP", "DEFAULT") {
+ return DropDefault{}, nil
+ }
+
+ if p.eat("SET") {
+ co, err := p.parseColumnOptions()
+ if err != nil {
+ return nil, err
+ }
+ return SetColumnOptions{Options: co}, nil
+ }
+
+ typ, err := p.parseType()
+ if err != nil {
+ return nil, err
+ }
+ sct := SetColumnType{Type: typ}
+
+ if p.eat("NOT", "NULL") {
+ sct.NotNull = true
+ }
+
+ if p.eat("DEFAULT", "(") {
+ sct.Default, err = p.parseExpr()
+ if err != nil {
+ return nil, err
+ }
+ if err := p.expect(")"); err != nil {
+ return nil, err
+ }
+ }
+
+ return sct, nil
+}
+
+func (p *parser) parseColumnOptions() (ColumnOptions, *parseError) {
+ debugf("parseColumnOptions: %v", p)
+ /*
+ options_def:
+ OPTIONS (allow_commit_timestamp = { true | null })
+ */
+
+ if err := p.expect("OPTIONS"); err != nil {
+ return ColumnOptions{}, err
+ }
+ if err := p.expect("("); err != nil {
+ return ColumnOptions{}, err
+ }
+
+ // TODO: Figure out if column options are case insensitive.
+ // We ignore case for the key (because it is easier) but not the value.
+ var co ColumnOptions
+ if p.eat("allow_commit_timestamp", "=") {
+ tok := p.next()
+ if tok.err != nil {
+ return ColumnOptions{}, tok.err
+ }
+ allowCommitTimestamp := new(bool)
+ switch tok.value {
+ case "true":
+ *allowCommitTimestamp = true
+ case "null":
+ *allowCommitTimestamp = false
+ default:
+ return ColumnOptions{}, p.errorf("got %q, want true or null", tok.value)
+ }
+ co.AllowCommitTimestamp = allowCommitTimestamp
+ }
+
+ if err := p.expect(")"); err != nil {
+ return ColumnOptions{}, err
+ }
+
+ return co, nil
+}
+
+func (p *parser) parseDatabaseOptions() (DatabaseOptions, *parseError) {
+ debugf("parseDatabaseOptions: %v", p)
+ /*
+ options_def:
+ OPTIONS (enable_key_visualizer = { true | null },
+ optimizer_version = { 1 ... 2 | null },
+ version_retention_period = { 'duration' | null })
+ */
+
+ if err := p.expect("OPTIONS"); err != nil {
+ return DatabaseOptions{}, err
+ }
+ if err := p.expect("("); err != nil {
+ return DatabaseOptions{}, err
+ }
+
+ // We ignore case for the key (because it is easier) but not the value.
+ var opts DatabaseOptions
+ for {
+ if p.eat("enable_key_visualizer", "=") {
+ tok := p.next()
+ if tok.err != nil {
+ return DatabaseOptions{}, tok.err
+ }
+ enableKeyVisualizer := new(bool)
+ switch tok.value {
+ case "true":
+ *enableKeyVisualizer = true
+ case "null":
+ *enableKeyVisualizer = false
+ default:
+ return DatabaseOptions{}, p.errorf("invalid enable_key_visualizer_value: %v", tok.value)
+ }
+ opts.EnableKeyVisualizer = enableKeyVisualizer
+ } else if p.eat("optimizer_version", "=") {
+ tok := p.next()
+ if tok.err != nil {
+ return DatabaseOptions{}, tok.err
+ }
+ optimizerVersion := new(int)
+ if tok.value == "null" {
+ *optimizerVersion = 0
+ } else {
+ if tok.typ != int64Token {
+ return DatabaseOptions{}, p.errorf("invalid optimizer_version value: %v", tok.value)
+ }
+ version, err := strconv.Atoi(tok.value)
+ if err != nil {
+ return DatabaseOptions{}, p.errorf("invalid optimizer_version value: %v", tok.value)
+ }
+ *optimizerVersion = version
+ }
+ opts.OptimizerVersion = optimizerVersion
+ } else if p.eat("optimizer_statistics_package", "=") {
+ tok := p.next()
+ if tok.err != nil {
+ return DatabaseOptions{}, tok.err
+ }
+ optimizerStatisticsPackage := new(string)
+ if tok.value == "null" {
+ *optimizerStatisticsPackage = ""
+ } else {
+ if tok.typ != stringToken {
+ return DatabaseOptions{}, p.errorf("invalid optimizer_statistics_package: %v", tok.value)
+ }
+ *optimizerStatisticsPackage = tok.string
+ }
+ opts.OptimizerStatisticsPackage = optimizerStatisticsPackage
+ } else if p.eat("version_retention_period", "=") {
+ tok := p.next()
+ if tok.err != nil {
+ return DatabaseOptions{}, tok.err
+ }
+ retentionPeriod := new(string)
+ if tok.value == "null" {
+ *retentionPeriod = ""
+ } else {
+ if tok.typ != stringToken {
+ return DatabaseOptions{}, p.errorf("invalid version_retention_period: %v", tok.value)
+ }
+ *retentionPeriod = tok.string
+ }
+ opts.VersionRetentionPeriod = retentionPeriod
+ } else if p.eat("default_leader", "=") {
+ tok := p.next()
+ if tok.err != nil {
+ return DatabaseOptions{}, tok.err
+ }
+ defaultLeader := new(string)
+ if tok.value == "null" {
+ *defaultLeader = ""
+ } else {
+ if tok.typ != stringToken {
+ return DatabaseOptions{}, p.errorf("invalid default_leader: %v", tok.value)
+ }
+ *defaultLeader = tok.string
+ }
+ opts.DefaultLeader = defaultLeader
+ } else {
+ tok := p.next()
+ return DatabaseOptions{}, p.errorf("unknown database option: %v", tok.value)
+ }
+ if p.sniff(")") {
+ break
+ }
+ if !p.eat(",") {
+ return DatabaseOptions{}, p.errorf("missing ',' in options list")
+ }
+ }
+ if err := p.expect(")"); err != nil {
+ return DatabaseOptions{}, err
+ }
+
+ return opts, nil
+}
+
+func (p *parser) parseKeyPartList() ([]KeyPart, *parseError) {
+ var list []KeyPart
+ err := p.parseCommaList("(", ")", func(p *parser) *parseError {
+ kp, err := p.parseKeyPart()
+ if err != nil {
+ return err
+ }
+ list = append(list, kp)
+ return nil
+ })
+ return list, err
+}
+
+func (p *parser) parseKeyPart() (KeyPart, *parseError) {
+ debugf("parseKeyPart: %v", p)
+
+ /*
+ key_part:
+ column_name [{ ASC | DESC }]
+ */
+
+ name, err := p.parseTableOrIndexOrColumnName()
+ if err != nil {
+ return KeyPart{}, err
+ }
+
+ kp := KeyPart{Column: name}
+
+ if p.eat("ASC") {
+ // OK.
+ } else if p.eat("DESC") {
+ kp.Desc = true
+ }
+
+ return kp, nil
+}
+
+func (p *parser) parseTableConstraint() (TableConstraint, *parseError) {
+ debugf("parseTableConstraint: %v", p)
+
+ /*
+ table_constraint:
+ [ CONSTRAINT constraint_name ]
+ { check | foreign_key }
+ */
+
+ if p.eat("CONSTRAINT") {
+ pos := p.Pos()
+ // Named constraint.
+ cname, err := p.parseTableOrIndexOrColumnName()
+ if err != nil {
+ return TableConstraint{}, err
+ }
+ c, err := p.parseConstraint()
+ if err != nil {
+ return TableConstraint{}, err
+ }
+ return TableConstraint{
+ Name: cname,
+ Constraint: c,
+ Position: pos,
+ }, nil
+ }
+
+ // Unnamed constraint.
+ c, err := p.parseConstraint()
+ if err != nil {
+ return TableConstraint{}, err
+ }
+ return TableConstraint{
+ Constraint: c,
+ Position: c.Pos(),
+ }, nil
+}
+
+func (p *parser) parseConstraint() (Constraint, *parseError) {
+ if p.sniff("FOREIGN") {
+ fk, err := p.parseForeignKey()
+ return fk, err
+ }
+ c, err := p.parseCheck()
+ return c, err
+}
+
+func (p *parser) parseForeignKey() (ForeignKey, *parseError) {
+ debugf("parseForeignKey: %v", p)
+
+ /*
+ foreign_key:
+ FOREIGN KEY ( column_name [, ... ] ) REFERENCES ref_table ( ref_column [, ... ] ) [ ON DELETE { CASCADE | NO ACTION } ]
+ */
+
+ if err := p.expect("FOREIGN"); err != nil {
+ return ForeignKey{}, err
+ }
+ fk := ForeignKey{Position: p.Pos()}
+ if err := p.expect("KEY"); err != nil {
+ return ForeignKey{}, err
+ }
+ var err *parseError
+ fk.Columns, err = p.parseColumnNameList()
+ if err != nil {
+ return ForeignKey{}, err
+ }
+ if err := p.expect("REFERENCES"); err != nil {
+ return ForeignKey{}, err
+ }
+ fk.RefTable, err = p.parseTableOrIndexOrColumnName()
+ if err != nil {
+ return ForeignKey{}, err
+ }
+ fk.RefColumns, err = p.parseColumnNameList()
+ if err != nil {
+ return ForeignKey{}, err
+ }
+ // The ON DELETE clause is optional; it defaults to NoActionOnDelete.
+ fk.OnDelete = NoActionOnDelete
+ if p.eat("ON", "DELETE") {
+ fk.OnDelete, err = p.parseOnDelete()
+ if err != nil {
+ return ForeignKey{}, err
+ }
+ }
+ return fk, nil
+}
+
+func (p *parser) parseCheck() (Check, *parseError) {
+ debugf("parseCheck: %v", p)
+
+ /*
+ check:
+ CHECK ( expression )
+ */
+
+ if err := p.expect("CHECK"); err != nil {
+ return Check{}, err
+ }
+ c := Check{Position: p.Pos()}
+ if err := p.expect("("); err != nil {
+ return Check{}, err
+ }
+ var err *parseError
+ c.Expr, err = p.parseBoolExpr()
+ if err != nil {
+ return Check{}, err
+ }
+ if err := p.expect(")"); err != nil {
+ return Check{}, err
+ }
+ return c, nil
+}
+
+func (p *parser) parseColumnNameList() ([]ID, *parseError) {
+ var list []ID
+ err := p.parseCommaList("(", ")", func(p *parser) *parseError {
+ n, err := p.parseTableOrIndexOrColumnName()
+ if err != nil {
+ return err
+ }
+ list = append(list, n)
+ return nil
+ })
+ return list, err
+}
+
+func (p *parser) parseIDList() ([]ID, *parseError) {
+ var list []ID
+ for {
+ n, err := p.parseTableOrIndexOrColumnName()
+ if err != nil {
+ return nil, err
+ }
+ list = append(list, n)
+
+ if p.eat(",") {
+ continue
+ }
+ break
+ }
+ return list, nil
+}
+
+func (p *parser) parseCreateChangeStream() (*CreateChangeStream, *parseError) {
+ debugf("parseCreateChangeStream: %v", p)
+
+ /*
+ CREATE CHANGE STREAM change_stream_name
+ [FOR column_or_table_watching_definition[, ... ] ]
+ [
+ OPTIONS (
+ retention_period = timespan,
+ value_capture_type = type
+ )
+ ]
+ */
+ if err := p.expect("CREATE"); err != nil {
+ return nil, err
+ }
+ pos := p.Pos()
+ if err := p.expect("CHANGE"); err != nil {
+ return nil, err
+ }
+ if err := p.expect("STREAM"); err != nil {
+ return nil, err
+ }
+ csname, err := p.parseTableOrIndexOrColumnName()
+ if err != nil {
+ return nil, err
+ }
+
+ cs := &CreateChangeStream{Name: csname, Position: pos}
+
+ if p.sniff("FOR") {
+ watch, watchAllTables, err := p.parseChangeStreamWatches()
+ if err != nil {
+ return nil, err
+ }
+ cs.Watch = watch
+ cs.WatchAllTables = watchAllTables
+ }
+
+ if p.sniff("OPTIONS") {
+ cs.Options, err = p.parseChangeStreamOptions()
+ if err != nil {
+ return nil, err
+ }
+ }
+
+ return cs, nil
+}
+
+func (p *parser) parseAlterChangeStream() (*AlterChangeStream, *parseError) {
+ debugf("parseAlterChangeStream: %v", p)
+
+ if err := p.expect("ALTER"); err != nil {
+ return nil, err
+ }
+ pos := p.Pos()
+ if err := p.expect("CHANGE"); err != nil {
+ return nil, err
+ }
+ if err := p.expect("STREAM"); err != nil {
+ return nil, err
+ }
+ csname, err := p.parseTableOrIndexOrColumnName()
+ if err != nil {
+ return nil, err
+ }
+
+ acs := &AlterChangeStream{Name: csname, Position: pos}
+
+ tok := p.next()
+ if tok.err != nil {
+ return nil, tok.err
+ }
+ switch {
+ default:
+ return nil, p.errorf("got %q, expected SET or DROP", tok.value)
+ case tok.caseEqual("SET"):
+ if p.sniff("OPTIONS") {
+ options, err := p.parseChangeStreamOptions()
+ if err != nil {
+ return nil, err
+ }
+ acs.Alteration = AlterChangeStreamOptions{Options: options}
+ return acs, nil
+ }
+ if p.sniff("FOR") {
+ watch, watchAllTables, err := p.parseChangeStreamWatches()
+ if err != nil {
+ return nil, err
+ }
+ acs.Alteration = AlterWatch{Watch: watch, WatchAllTables: watchAllTables}
+ return acs, nil
+ }
+ return nil, p.errorf("got %q, expected FOR or OPTIONS", p.next())
+ case tok.caseEqual("DROP"):
+ if err := p.expect("FOR", "ALL"); err != nil {
+ return nil, err
+ }
+ acs.Alteration = DropChangeStreamWatch{}
+ return acs, nil
+ }
+}
+
+func (p *parser) parseChangeStreamWatches() ([]WatchDef, bool, *parseError) {
+ debugf("parseChangeStreamWatches: %v", p)
+
+ if err := p.expect("FOR"); err != nil {
+ return nil, false, err
+ }
+
+ if p.eat("ALL") {
+ return nil, true, nil
+ }
+
+ watchDefs := []WatchDef{}
+ for {
+ tname, err := p.parseTableOrIndexOrColumnName()
+ if err != nil {
+ return nil, false, err
+ }
+ pos := p.Pos()
+ wd := WatchDef{Table: tname, Position: pos}
+
+ if p.sniff("(") {
+ columns, err := p.parseColumnNameList()
+ if err != nil {
+ return nil, false, err
+ }
+ wd.Columns = columns
+ } else {
+ wd.WatchAllCols = true
+ }
+
+ watchDefs = append(watchDefs, wd)
+ if p.eat(",") {
+ continue
+ }
+ break
+ }
+
+ return watchDefs, false, nil
+}
+
+func (p *parser) parseChangeStreamOptions() (ChangeStreamOptions, *parseError) {
+ debugf("parseChangeStreamOptions: %v", p)
+ /*
+ options_def:
+ OPTIONS (
+ retention_period = timespan,
+ value_capture_type = type
+ ) */
+
+ if err := p.expect("OPTIONS"); err != nil {
+ return ChangeStreamOptions{}, err
+ }
+ if err := p.expect("("); err != nil {
+ return ChangeStreamOptions{}, err
+ }
+
+ var cso ChangeStreamOptions
+ for {
+ if p.eat("retention_period", "=") {
+ tok := p.next()
+ if tok.err != nil {
+ return ChangeStreamOptions{}, tok.err
+ }
+ retentionPeriod := new(string)
+ if tok.value == "null" {
+ *retentionPeriod = ""
+ } else {
+ if tok.typ != stringToken {
+ return ChangeStreamOptions{}, p.errorf("invalid retention_period: %v", tok.value)
+ }
+ *retentionPeriod = tok.string
+ }
+ cso.RetentionPeriod = retentionPeriod
+ } else if p.eat("value_capture_type", "=") {
+ tok := p.next()
+ if tok.err != nil {
+ return ChangeStreamOptions{}, tok.err
+ }
+ valueCaptureType := new(string)
+ if tok.typ != stringToken {
+ return ChangeStreamOptions{}, p.errorf("invalid value_capture_type: %v", tok.value)
+ }
+ *valueCaptureType = tok.string
+ cso.ValueCaptureType = valueCaptureType
+ } else {
+ tok := p.next()
+ return ChangeStreamOptions{}, p.errorf("unknown change stream option: %v", tok.value)
+ }
+ if p.sniff(")") {
+ break
+ }
+ if !p.eat(",") {
+ return ChangeStreamOptions{}, p.errorf("missing ',' in options list")
+ }
+ }
+
+ if err := p.expect(")"); err != nil {
+ return ChangeStreamOptions{}, err
+ }
+
+ return cso, nil
+}
+
+func (p *parser) parseAlterStatistics() (*AlterStatistics, *parseError) {
+ debugf("parseAlterStatistics: %v", p)
+
+ /*
+ ALTER STATISTICS package_name
+ action
+
+ where package_name is:
+ {a—z}[{a—z|0—9|_|-}+]{a—z|0—9}
+
+ and action is:
+ SET OPTIONS ( options_def )
+
+ and options_def is:
+ { allow_gc = { true | false } }
+ */
+
+ if err := p.expect("ALTER"); err != nil {
+ return nil, err
+ }
+ pos := p.Pos()
+ if err := p.expect("STATISTICS"); err != nil {
+ return nil, err
+ }
+ // This is not 100% correct as package_name identifiers have slightly more
+ // restrictions than table names, but the restrictions are currently not
+ // applied in the spansql parser.
+ // TODO: Apply restrictions for all identifiers.
+ dbname, err := p.parseTableOrIndexOrColumnName()
+ if err != nil {
+ return nil, err
+ }
+ a := &AlterStatistics{Name: dbname, Position: pos}
+
+ tok := p.next()
+ if tok.err != nil {
+ return nil, tok.err
+ }
+ switch {
+ default:
+ return nil, p.errorf("got %q, expected SET", tok.value)
+ case tok.caseEqual("SET"):
+ options, err := p.parseStatisticsOptions()
+ if err != nil {
+ return nil, err
+ }
+ a.Alteration = SetStatisticsOptions{Options: options}
+ return a, nil
+ }
+}
+
+func (p *parser) parseStatisticsOptions() (StatisticsOptions, *parseError) {
+ debugf("parseDatabaseOptions: %v", p)
+ /*
+ options_def is:
+ { allow_gc = { true | false } }
+ */
+
+ if err := p.expect("OPTIONS"); err != nil {
+ return StatisticsOptions{}, err
+ }
+ if err := p.expect("("); err != nil {
+ return StatisticsOptions{}, err
+ }
+
+ // We ignore case for the key (because it is easier) but not the value.
+ var opts StatisticsOptions
+ for {
+ if p.eat("allow_gc", "=") {
+ tok := p.next()
+ if tok.err != nil {
+ return StatisticsOptions{}, tok.err
+ }
+ allowGC := new(bool)
+ switch tok.value {
+ case "true":
+ *allowGC = true
+ case "false":
+ *allowGC = false
+ default:
+ return StatisticsOptions{}, p.errorf("invalid allow_gc: %v", tok.value)
+ }
+ opts.AllowGC = allowGC
+ } else {
+ tok := p.next()
+ return StatisticsOptions{}, p.errorf("unknown statistics option: %v", tok.value)
+ }
+ if p.sniff(")") {
+ break
+ }
+ if !p.eat(",") {
+ return StatisticsOptions{}, p.errorf("missing ',' in options list")
+ }
+ }
+ if err := p.expect(")"); err != nil {
+ return StatisticsOptions{}, err
+ }
+
+ return opts, nil
+}
+
+func (p *parser) parseAlterIndex() (*AlterIndex, *parseError) {
+ debugf("parseAlterIndex: %v", p)
+
+ if err := p.expect("ALTER"); err != nil {
+ return nil, err
+ }
+ pos := p.Pos()
+ if err := p.expect("INDEX"); err != nil {
+ return nil, err
+ }
+ iname, err := p.parseTableOrIndexOrColumnName()
+ if err != nil {
+ return nil, err
+ }
+
+ a := &AlterIndex{Name: iname, Position: pos}
+ tok := p.next()
+ if tok.err != nil {
+ return nil, tok.err
+ }
+ switch {
+ case tok.caseEqual("ADD"):
+ if err := p.expect("STORED", "COLUMN"); err != nil {
+ return nil, err
+ }
+ cname, err := p.parseTableOrIndexOrColumnName()
+ if err != nil {
+ return nil, err
+ }
+ a.Alteration = AddStoredColumn{Name: cname}
+ return a, nil
+ case tok.caseEqual("DROP"):
+ if err := p.expect("STORED", "COLUMN"); err != nil {
+ return nil, err
+ }
+ cname, err := p.parseTableOrIndexOrColumnName()
+ if err != nil {
+ return nil, err
+ }
+ a.Alteration = DropStoredColumn{Name: cname}
+ return a, nil
+ }
+
+ return nil, p.errorf("got %q, expected ADD or DROP", tok.value)
+}
+
+func (p *parser) parseCreateSequence() (*CreateSequence, *parseError) {
+ debugf("parseCreateSequence: %v", p)
+
+ /*
+ CREATE SEQUENCE
+ [ IF NOT EXISTS ] sequence_name
+ [ OPTIONS ( sequence_options ) ]
+ */
+
+ if err := p.expect("CREATE"); err != nil {
+ return nil, err
+ }
+ pos := p.Pos()
+ if err := p.expect("SEQUENCE"); err != nil {
+ return nil, err
+ }
+ var ifNotExists bool
+ if p.eat("IF", "NOT", "EXISTS") {
+ ifNotExists = true
+ }
+ sname, err := p.parseTableOrIndexOrColumnName()
+ if err != nil {
+ return nil, err
+ }
+
+ cs := &CreateSequence{Name: sname, IfNotExists: ifNotExists, Position: pos}
+
+ if p.sniff("OPTIONS") {
+ cs.Options, err = p.parseSequenceOptions()
+ if err != nil {
+ return nil, err
+ }
+ }
+
+ return cs, nil
+}
+
+func (p *parser) parseAlterSequence() (*AlterSequence, *parseError) {
+ debugf("parseAlterSequence: %v", p)
+
+ /*
+ ALTER SEQUENCE sequence_name
+ SET OPTIONS sequence_options
+ */
+
+ if err := p.expect("ALTER"); err != nil {
+ return nil, err
+ }
+ pos := p.Pos()
+ if err := p.expect("SEQUENCE"); err != nil {
+ return nil, err
+ }
+ sname, err := p.parseTableOrIndexOrColumnName()
+ if err != nil {
+ return nil, err
+ }
+
+ as := &AlterSequence{Name: sname, Position: pos}
+
+ tok := p.next()
+ if tok.err != nil {
+ return nil, tok.err
+ }
+ switch {
+ default:
+ return nil, p.errorf("got %q, expected SET", tok.value)
+ case tok.caseEqual("SET"):
+ options, err := p.parseSequenceOptions()
+ if err != nil {
+ return nil, err
+ }
+ as.Alteration = SetSequenceOptions{Options: options}
+ return as, nil
+ }
+}
+
+func (p *parser) parseSequenceOptions() (SequenceOptions, *parseError) {
+ debugf("parseSequenceOptions: %v", p)
+
+ if err := p.expect("OPTIONS", "("); err != nil {
+ return SequenceOptions{}, err
+ }
+
+ // We ignore case for the key (because it is easier) but not the value.
+ var so SequenceOptions
+ for {
+ if p.eat("sequence_kind", "=") {
+ tok := p.next()
+ if tok.err != nil {
+ return SequenceOptions{}, tok.err
+ }
+ if tok.typ != stringToken {
+ return SequenceOptions{}, p.errorf("invalid sequence_kind value: %v", tok.value)
+ }
+ sequenceKind := tok.string
+ so.SequenceKind = &sequenceKind
+ } else if p.eat("skip_range_min", "=") {
+ tok := p.next()
+ if tok.err != nil {
+ return SequenceOptions{}, tok.err
+ }
+ if tok.typ != int64Token {
+ return SequenceOptions{}, p.errorf("invalid skip_range_min value: %v", tok.value)
+ }
+ value, err := strconv.Atoi(tok.value)
+ if err != nil {
+ return SequenceOptions{}, p.errorf("invalid skip_range_min value: %v", tok.value)
+ }
+ so.SkipRangeMin = &value
+ } else if p.eat("skip_range_max", "=") {
+ tok := p.next()
+ if tok.err != nil {
+ return SequenceOptions{}, tok.err
+ }
+ if tok.typ != int64Token {
+ return SequenceOptions{}, p.errorf("invalid skip_range_max value: %v", tok.value)
+ }
+ value, err := strconv.Atoi(tok.value)
+ if err != nil {
+ return SequenceOptions{}, p.errorf("invalid skip_range_max value: %v", tok.value)
+ }
+ so.SkipRangeMax = &value
+ } else if p.eat("start_with_counter", "=") {
+ tok := p.next()
+ if tok.err != nil {
+ return SequenceOptions{}, tok.err
+ }
+ if tok.typ != int64Token {
+ return SequenceOptions{}, p.errorf("invalid start_with_counter value: %v", tok.value)
+ }
+ value, err := strconv.Atoi(tok.value)
+ if err != nil {
+ return SequenceOptions{}, p.errorf("invalid start_with_counter value: %v", tok.value)
+ }
+ so.StartWithCounter = &value
+ } else {
+ tok := p.next()
+ return SequenceOptions{}, p.errorf("unknown sequence option: %v", tok.value)
+ }
+ if p.sniff(")") {
+ break
+ }
+ if !p.eat(",") {
+ return SequenceOptions{}, p.errorf("missing ',' in options list")
+ }
+ }
+ if err := p.expect(")"); err != nil {
+ return SequenceOptions{}, err
+ }
+
+ return so, nil
+}
+
+var baseTypes = map[string]TypeBase{
+ "BOOL": Bool,
+ "INT64": Int64,
+ "FLOAT64": Float64,
+ "NUMERIC": Numeric,
+ "STRING": String,
+ "BYTES": Bytes,
+ "DATE": Date,
+ "TIMESTAMP": Timestamp,
+ "JSON": JSON,
+}
+
+func (p *parser) parseBaseType() (Type, *parseError) {
+ return p.parseBaseOrParameterizedType(false)
+}
+
+func (p *parser) parseType() (Type, *parseError) {
+ return p.parseBaseOrParameterizedType(true)
+}
+
+var extractPartTypes = map[string]TypeBase{
+ "DAY": Int64,
+ "MONTH": Int64,
+ "YEAR": Int64,
+ "DATE": Date,
+}
+
+func (p *parser) parseExtractType() (Type, string, *parseError) {
+ var t Type
+ tok := p.next()
+ if tok.err != nil {
+ return Type{}, "", tok.err
+ }
+ base, ok := extractPartTypes[strings.ToUpper(tok.value)] // valid part types for EXTRACT is keyed by upper case strings.
+ if !ok {
+ return Type{}, "", p.errorf("got %q, want valid EXTRACT types", tok.value)
+ }
+ t.Base = base
+ return t, strings.ToUpper(tok.value), nil
+}
+
+func (p *parser) parseBaseOrParameterizedType(withParam bool) (Type, *parseError) {
+ debugf("parseBaseOrParameterizedType: %v", p)
+
+ /*
+ array_type:
+ ARRAY< scalar_type >
+
+ scalar_type:
+ { BOOL | INT64 | FLOAT64 | NUMERIC | STRING( length ) | BYTES( length ) | DATE | TIMESTAMP | JSON }
+ length:
+ { int64_value | MAX }
+ */
+
+ var t Type
+
+ tok := p.next()
+ if tok.err != nil {
+ return Type{}, tok.err
+ }
+ if tok.caseEqual("ARRAY") {
+ t.Array = true
+ if err := p.expect("<"); err != nil {
+ return Type{}, err
+ }
+ tok = p.next()
+ if tok.err != nil {
+ return Type{}, tok.err
+ }
+ }
+ base, ok := baseTypes[strings.ToUpper(tok.value)] // baseTypes is keyed by upper case strings.
+ if !ok {
+ return Type{}, p.errorf("got %q, want scalar type", tok.value)
+ }
+ t.Base = base
+
+ if withParam && (t.Base == String || t.Base == Bytes) {
+ if err := p.expect("("); err != nil {
+ return Type{}, err
+ }
+
+ tok = p.next()
+ if tok.err != nil {
+ return Type{}, tok.err
+ }
+ if tok.caseEqual("MAX") {
+ t.Len = MaxLen
+ } else if tok.typ == int64Token {
+ n, err := strconv.ParseInt(tok.value, tok.int64Base, 64)
+ if err != nil {
+ return Type{}, p.errorf("%v", err)
+ }
+ t.Len = n
+ } else {
+ return Type{}, p.errorf("got %q, want MAX or int64", tok.value)
+ }
+
+ if err := p.expect(")"); err != nil {
+ return Type{}, err
+ }
+ }
+
+ if t.Array {
+ if err := p.expect(">"); err != nil {
+ return Type{}, err
+ }
+ }
+
+ return t, nil
+}
+
+func (p *parser) parseQuery() (Query, *parseError) {
+ debugf("parseQuery: %v", p)
+
+ /*
+ query_statement:
+ [ table_hint_expr ][ join_hint_expr ]
+ query_expr
+
+ query_expr:
+ { select | ( query_expr ) | query_expr set_op query_expr }
+ [ ORDER BY expression [{ ASC | DESC }] [, ...] ]
+ [ LIMIT count [ OFFSET skip_rows ] ]
+ */
+
+ // TODO: sub-selects, etc.
+
+ if err := p.expect("SELECT"); err != nil {
+ return Query{}, err
+ }
+ p.back()
+ sel, err := p.parseSelect()
+ if err != nil {
+ return Query{}, err
+ }
+ q := Query{Select: sel}
+
+ if p.eat("ORDER", "BY") {
+ for {
+ o, err := p.parseOrder()
+ if err != nil {
+ return Query{}, err
+ }
+ q.Order = append(q.Order, o)
+
+ if !p.eat(",") {
+ break
+ }
+ }
+ }
+
+ if p.eat("LIMIT") {
+ // "only literal or parameter values"
+ // https://cloud.google.com/spanner/docs/query-syntax#limit-clause-and-offset-clause
+
+ lim, err := p.parseLiteralOrParam()
+ if err != nil {
+ return Query{}, err
+ }
+ q.Limit = lim
+
+ if p.eat("OFFSET") {
+ off, err := p.parseLiteralOrParam()
+ if err != nil {
+ return Query{}, err
+ }
+ q.Offset = off
+ }
+ }
+
+ return q, nil
+}
+
+func (p *parser) parseSelect() (Select, *parseError) {
+ debugf("parseSelect: %v", p)
+
+ /*
+ select:
+ SELECT [{ ALL | DISTINCT }]
+ { [ expression. ]* | expression [ [ AS ] alias ] } [, ...]
+ [ FROM from_item [ tablesample_type ] [, ...] ]
+ [ WHERE bool_expression ]
+ [ GROUP BY expression [, ...] ]
+ [ HAVING bool_expression ]
+ */
+ if err := p.expect("SELECT"); err != nil {
+ return Select{}, err
+ }
+
+ var sel Select
+
+ if p.eat("ALL") {
+ // Nothing to do; this is the default.
+ } else if p.eat("DISTINCT") {
+ sel.Distinct = true
+ }
+
+ // Read expressions for the SELECT list.
+ list, aliases, err := p.parseSelectList()
+ if err != nil {
+ return Select{}, err
+ }
+ sel.List, sel.ListAliases = list, aliases
+
+ if p.eat("FROM") {
+ padTS := func() {
+ for len(sel.TableSamples) < len(sel.From) {
+ sel.TableSamples = append(sel.TableSamples, nil)
+ }
+ }
+
+ for {
+ from, err := p.parseSelectFrom()
+ if err != nil {
+ return Select{}, err
+ }
+ sel.From = append(sel.From, from)
+
+ if p.sniff("TABLESAMPLE") {
+ ts, err := p.parseTableSample()
+ if err != nil {
+ return Select{}, err
+ }
+ padTS()
+ sel.TableSamples[len(sel.TableSamples)-1] = &ts
+ }
+
+ if p.eat(",") {
+ continue
+ }
+ break
+ }
+
+ if sel.TableSamples != nil {
+ padTS()
+ }
+ }
+
+ if p.eat("WHERE") {
+ where, err := p.parseBoolExpr()
+ if err != nil {
+ return Select{}, err
+ }
+ sel.Where = where
+ }
+
+ if p.eat("GROUP", "BY") {
+ list, err := p.parseExprList()
+ if err != nil {
+ return Select{}, err
+ }
+ sel.GroupBy = list
+ }
+
+ // TODO: HAVING
+
+ return sel, nil
+}
+
+func (p *parser) parseSelectList() ([]Expr, []ID, *parseError) {
+ var list []Expr
+ var aliases []ID // Only set if any aliases are seen.
+ padAliases := func() {
+ for len(aliases) < len(list) {
+ aliases = append(aliases, "")
+ }
+ }
+
+ for {
+ expr, err := p.parseExpr()
+ if err != nil {
+ return nil, nil, err
+ }
+ list = append(list, expr)
+
+ // TODO: The "AS" keyword is optional.
+ if p.eat("AS") {
+ alias, err := p.parseAlias()
+ if err != nil {
+ return nil, nil, err
+ }
+
+ padAliases()
+ aliases[len(aliases)-1] = alias
+ }
+
+ if p.eat(",") {
+ continue
+ }
+ break
+ }
+ if aliases != nil {
+ padAliases()
+ }
+ return list, aliases, nil
+}
+
+func (p *parser) parseSelectFromTable() (SelectFrom, *parseError) {
+ if p.eat("UNNEST") {
+ if err := p.expect("("); err != nil {
+ return nil, err
+ }
+ e, err := p.parseExpr()
+ if err != nil {
+ return nil, err
+ }
+ if err := p.expect(")"); err != nil {
+ return nil, err
+ }
+ sfu := SelectFromUnnest{Expr: e}
+ if p.eat("AS") { // TODO: The "AS" keyword is optional.
+ alias, err := p.parseAlias()
+ if err != nil {
+ return nil, err
+ }
+ sfu.Alias = alias
+ }
+ // TODO: hint, offset
+ return sfu, nil
+ }
+
+ // A join starts with a from_item, so that can't be detected in advance.
+ // TODO: Support subquery, field_path, array_path, WITH.
+ // TODO: Verify associativity of multile joins.
+
+ tname, err := p.parseTableOrIndexOrColumnName()
+ if err != nil {
+ return nil, err
+ }
+ sf := SelectFromTable{Table: tname}
+ if p.eat("@") {
+ hints, err := p.parseHints(map[string]string{})
+ if err != nil {
+ return nil, err
+ }
+ sf.Hints = hints
+ }
+
+ // TODO: The "AS" keyword is optional.
+ if p.eat("AS") {
+ alias, err := p.parseAlias()
+ if err != nil {
+ return nil, err
+ }
+ sf.Alias = alias
+ }
+ return sf, nil
+}
+
+func (p *parser) parseSelectFromJoin(lhs SelectFrom) (SelectFrom, *parseError) {
+ // Look ahead to see if this is a join.
+ tok := p.next()
+ if tok.err != nil {
+ p.back()
+ return nil, nil
+ }
+ var hashJoin bool // Special case for "HASH JOIN" syntax.
+ if tok.caseEqual("HASH") {
+ hashJoin = true
+ tok = p.next()
+ if tok.err != nil {
+ return nil, tok.err
+ }
+ }
+ var jt JoinType
+ if tok.caseEqual("JOIN") {
+ // This is implicitly an inner join.
+ jt = InnerJoin
+ } else if j, ok := joinKeywords[tok.value]; ok {
+ jt = j
+ switch jt {
+ case FullJoin, LeftJoin, RightJoin:
+ // These join types are implicitly "outer" joins,
+ // so the "OUTER" keyword is optional.
+ p.eat("OUTER")
+ }
+ if err := p.expect("JOIN"); err != nil {
+ return nil, err
+ }
+ } else {
+ // Not a join
+ p.back()
+ return nil, nil
+ }
+ sfj := SelectFromJoin{
+ Type: jt,
+ LHS: lhs,
+ }
+ var hints map[string]string
+ if hashJoin {
+ hints = map[string]string{}
+ hints["JOIN_METHOD"] = "HASH_JOIN"
+ }
+
+ if p.eat("@") {
+ h, err := p.parseHints(hints)
+ if err != nil {
+ return nil, err
+ }
+ hints = h
+ }
+ sfj.Hints = hints
+
+ rhs, err := p.parseSelectFromTable()
+ if err != nil {
+ return nil, err
+ }
+
+ sfj.RHS = rhs
+
+ if p.eat("ON") {
+ sfj.On, err = p.parseBoolExpr()
+ if err != nil {
+ return nil, err
+ }
+ }
+ if p.eat("USING") {
+ if sfj.On != nil {
+ return nil, p.errorf("join may not have both ON and USING clauses")
+ }
+ sfj.Using, err = p.parseColumnNameList()
+ if err != nil {
+ return nil, err
+ }
+ }
+
+ return sfj, nil
+}
+
+func (p *parser) parseSelectFrom() (SelectFrom, *parseError) {
+ debugf("parseSelectFrom: %v", p)
+
+ /*
+ from_item: {
+ table_name [ table_hint_expr ] [ [ AS ] alias ] |
+ join |
+ ( query_expr ) [ table_hint_expr ] [ [ AS ] alias ] |
+ field_path |
+ { UNNEST( array_expression ) | UNNEST( array_path ) | array_path }
+ [ table_hint_expr ] [ [ AS ] alias ] [ WITH OFFSET [ [ AS ] alias ] ] |
+ with_query_name [ table_hint_expr ] [ [ AS ] alias ]
+ }
+
+ join:
+ from_item [ join_type ] [ join_method ] JOIN [ join_hint_expr ] from_item
+ [ ON bool_expression | USING ( join_column [, ...] ) ]
+
+ join_type:
+ { INNER | CROSS | FULL [OUTER] | LEFT [OUTER] | RIGHT [OUTER] }
+ */
+ leftHandSide, err := p.parseSelectFromTable()
+ if err != nil {
+ return nil, err
+ }
+ // Lets keep consuming joins until we no longer find more joins
+ for {
+ sfj, err := p.parseSelectFromJoin(leftHandSide)
+ if err != nil {
+ return nil, err
+ }
+ if sfj == nil {
+ // There was no join to consume
+ break
+ }
+ leftHandSide = sfj
+ }
+ return leftHandSide, nil
+}
+
+var joinKeywords = map[string]JoinType{
+ "INNER": InnerJoin,
+ "CROSS": CrossJoin,
+ "FULL": FullJoin,
+ "LEFT": LeftJoin,
+ "RIGHT": RightJoin,
+}
+
+func (p *parser) parseTableSample() (TableSample, *parseError) {
+ var ts TableSample
+
+ if err := p.expect("TABLESAMPLE"); err != nil {
+ return ts, err
+ }
+
+ tok := p.next()
+ switch {
+ case tok.err != nil:
+ return ts, tok.err
+ case tok.caseEqual("BERNOULLI"):
+ ts.Method = Bernoulli
+ case tok.caseEqual("RESERVOIR"):
+ ts.Method = Reservoir
+ default:
+ return ts, p.errorf("got %q, want BERNOULLI or RESERVOIR", tok.value)
+ }
+
+ if err := p.expect("("); err != nil {
+ return ts, err
+ }
+
+ // The docs say "numeric_value_expression" here,
+ // but that doesn't appear to be defined anywhere.
+ size, err := p.parseExpr()
+ if err != nil {
+ return ts, err
+ }
+ ts.Size = size
+
+ tok = p.next()
+ switch {
+ case tok.err != nil:
+ return ts, tok.err
+ case tok.caseEqual("PERCENT"):
+ ts.SizeType = PercentTableSample
+ case tok.caseEqual("ROWS"):
+ ts.SizeType = RowsTableSample
+ default:
+ return ts, p.errorf("got %q, want PERCENT or ROWS", tok.value)
+ }
+
+ if err := p.expect(")"); err != nil {
+ return ts, err
+ }
+
+ return ts, nil
+}
+
+func (p *parser) parseOrder() (Order, *parseError) {
+ /*
+ expression [{ ASC | DESC }]
+ */
+
+ expr, err := p.parseExpr()
+ if err != nil {
+ return Order{}, err
+ }
+ o := Order{Expr: expr}
+
+ if p.eat("ASC") {
+ // OK.
+ } else if p.eat("DESC") {
+ o.Desc = true
+ }
+
+ return o, nil
+}
+
+func (p *parser) parseLiteralOrParam() (LiteralOrParam, *parseError) {
+ tok := p.next()
+ if tok.err != nil {
+ return nil, tok.err
+ }
+ if tok.typ == int64Token {
+ n, err := strconv.ParseInt(tok.value, tok.int64Base, 64)
+ if err != nil {
+ return nil, p.errorf("%v", err)
+ }
+ return IntegerLiteral(n), nil
+ }
+ // TODO: check character sets.
+ if strings.HasPrefix(tok.value, "@") {
+ return Param(tok.value[1:]), nil
+ }
+ return nil, p.errorf("got %q, want literal or parameter", tok.value)
+}
+
+func (p *parser) parseExprList() ([]Expr, *parseError) {
+ var list []Expr
+ for {
+ expr, err := p.parseExpr()
+ if err != nil {
+ return nil, err
+ }
+ list = append(list, expr)
+
+ if p.eat(",") {
+ continue
+ }
+ break
+ }
+ return list, nil
+}
+
+func (p *parser) parseParenExprList() ([]Expr, *parseError) {
+ return p.parseParenExprListWithParseFunc(func(p *parser) (Expr, *parseError) {
+ return p.parseExpr()
+ })
+}
+
+func (p *parser) parseParenExprListWithParseFunc(f func(*parser) (Expr, *parseError)) ([]Expr, *parseError) {
+ var list []Expr
+ err := p.parseCommaList("(", ")", func(p *parser) *parseError {
+ e, err := f(p)
+ if err != nil {
+ return err
+ }
+ list = append(list, e)
+ return nil
+ })
+ return list, err
+}
+
+// Special argument parser for CAST and SAFE_CAST
+var typedArgParser = func(p *parser) (Expr, *parseError) {
+ e, err := p.parseExpr()
+ if err != nil {
+ return nil, err
+ }
+ if err := p.expect("AS"); err != nil {
+ return nil, err
+ }
+ // typename in cast function must not be parameterized types
+ toType, err := p.parseBaseType()
+ if err != nil {
+ return nil, err
+ }
+ return TypedExpr{
+ Expr: e,
+ Type: toType,
+ }, nil
+}
+
+// Special argument parser for EXTRACT
+var extractArgParser = func(p *parser) (Expr, *parseError) {
+ partType, part, err := p.parseExtractType()
+ if err != nil {
+ return nil, err
+ }
+ if err := p.expect("FROM"); err != nil {
+ return nil, err
+ }
+ e, err := p.parseExpr()
+ if err != nil {
+ return nil, err
+ }
+ // AT TIME ZONE is optional
+ if p.eat("AT", "TIME", "ZONE") {
+ tok := p.next()
+ if tok.err != nil {
+ return nil, err
+ }
+ return ExtractExpr{Part: part, Type: partType, Expr: AtTimeZoneExpr{Expr: e, Zone: tok.string, Type: Type{Base: Timestamp}}}, nil
+ }
+ return ExtractExpr{
+ Part: part,
+ Expr: e,
+ Type: partType,
+ }, nil
+}
+
+var intervalArgParser = func(parseDatePart func(*parser) (string, *parseError)) func(*parser) (Expr, *parseError) {
+ return func(p *parser) (Expr, *parseError) {
+ if p.eat("INTERVAL") {
+ expr, err := p.parseExpr()
+ if err != nil {
+ return nil, err
+ }
+ datePart, err := parseDatePart(p)
+ if err != nil {
+ return nil, err
+ }
+ return IntervalExpr{Expr: expr, DatePart: datePart}, nil
+ }
+ return p.parseExpr()
+ }
+}
+
+var dateIntervalDateParts map[string]bool = map[string]bool{
+ "DAY": true,
+ "WEEK": true,
+ "MONTH": true,
+ "QUARTER": true,
+ "YEAR": true,
+}
+
+func (p *parser) parseDateIntervalDatePart() (string, *parseError) {
+ tok := p.next()
+ if tok.err != nil {
+ return "", tok.err
+ }
+ if dateIntervalDateParts[strings.ToUpper(tok.value)] {
+ return strings.ToUpper(tok.value), nil
+ }
+ return "", p.errorf("got %q, want valid date part names", tok.value)
+}
+
+var timestampIntervalDateParts map[string]bool = map[string]bool{
+ "NANOSECOND": true,
+ "MICROSECOND": true,
+ "MILLISECOND": true,
+ "SECOND": true,
+ "MINUTE": true,
+ "HOUR": true,
+ "DAY": true,
+}
+
+func (p *parser) parseTimestampIntervalDatePart() (string, *parseError) {
+ tok := p.next()
+ if tok.err != nil {
+ return "", tok.err
+ }
+ if timestampIntervalDateParts[strings.ToUpper(tok.value)] {
+ return strings.ToUpper(tok.value), nil
+ }
+ return "", p.errorf("got %q, want valid date part names", tok.value)
+}
+
+// Special argument parser for DATE_ADD, DATE_SUB
+var dateIntervalArgParser = intervalArgParser((*parser).parseDateIntervalDatePart)
+
+// Special argument parser for TIMESTAMP_ADD, TIMESTAMP_SUB
+var timestampIntervalArgParser = intervalArgParser((*parser).parseTimestampIntervalDatePart)
+
+var sequenceArgParser = func(p *parser) (Expr, *parseError) {
+ if p.eat("SEQUENCE") {
+ name, err := p.parseTableOrIndexOrColumnName()
+ if err != nil {
+ return nil, err
+ }
+ return SequenceExpr{Name: name}, nil
+ }
+ return p.parseExpr()
+}
+
+func (p *parser) parseAggregateFunc() (Func, *parseError) {
+ tok := p.next()
+ if tok.err != nil {
+ return Func{}, tok.err
+ }
+ name := strings.ToUpper(tok.value)
+ if err := p.expect("("); err != nil {
+ return Func{}, err
+ }
+ var distinct bool
+ if p.eat("DISTINCT") {
+ distinct = true
+ }
+ args, err := p.parseExprList()
+ if err != nil {
+ return Func{}, err
+ }
+ var nullsHandling NullsHandling
+ if p.eat("IGNORE", "NULLS") {
+ nullsHandling = IgnoreNulls
+ } else if p.eat("RESPECT", "NULLS") {
+ nullsHandling = RespectNulls
+ }
+ var having *AggregateHaving
+ if p.eat("HAVING") {
+ tok := p.next()
+ if tok.err != nil {
+ return Func{}, tok.err
+ }
+ var cond AggregateHavingCondition
+ switch tok.value {
+ case "MAX":
+ cond = HavingMax
+ case "MIN":
+ cond = HavingMin
+ default:
+ return Func{}, p.errorf("got %q, want MAX or MIN", tok.value)
+ }
+ expr, err := p.parseExpr()
+ if err != nil {
+ return Func{}, err
+ }
+ having = &AggregateHaving{
+ Condition: cond,
+ Expr: expr,
+ }
+ }
+ if err := p.expect(")"); err != nil {
+ return Func{}, err
+ }
+ return Func{
+ Name: name,
+ Args: args,
+ Distinct: distinct,
+ NullsHandling: nullsHandling,
+ Having: having,
+ }, nil
+}
+
+/*
+Expressions
+
+Cloud Spanner expressions are not formally specified.
+The set of operators and their precedence is listed in
+https://cloud.google.com/spanner/docs/functions-and-operators#operators.
+
+parseExpr works as a classical recursive descent parser, splitting
+precedence levels into separate methods, where the call stack is in
+ascending order of precedence:
+ parseExpr
+ orParser
+ andParser
+ parseIsOp
+ parseInOp
+ parseComparisonOp
+ parseArithOp: |, ^, &, << and >>, + and -, * and / and ||
+ parseUnaryArithOp: - and ~
+ parseLit
+*/
+
+func (p *parser) parseExpr() (Expr, *parseError) {
+ debugf("parseExpr: %v", p)
+
+ return orParser.parse(p)
+}
+
+// binOpParser is a generic meta-parser for binary operations.
+// It assumes the operation is left associative.
+type binOpParser struct {
+ LHS, RHS func(*parser) (Expr, *parseError)
+ Op string
+ ArgCheck func(Expr) error
+ Combiner func(lhs, rhs Expr) Expr
+}
+
+func (bin binOpParser) parse(p *parser) (Expr, *parseError) {
+ expr, err := bin.LHS(p)
+ if err != nil {
+ return nil, err
+ }
+
+ for {
+ if !p.eat(bin.Op) {
+ break
+ }
+ rhs, err := bin.RHS(p)
+ if err != nil {
+ return nil, err
+ }
+ if bin.ArgCheck != nil {
+ if err := bin.ArgCheck(expr); err != nil {
+ return nil, p.errorf("%v", err)
+ }
+ if err := bin.ArgCheck(rhs); err != nil {
+ return nil, p.errorf("%v", err)
+ }
+ }
+ expr = bin.Combiner(expr, rhs)
+ }
+ return expr, nil
+}
+
+// Break initialisation loop.
+func init() { orParser = orParserShim }
+
+var (
+ boolExprCheck = func(expr Expr) error {
+ if _, ok := expr.(BoolExpr); !ok {
+ return fmt.Errorf("got %T, want a boolean expression", expr)
+ }
+ return nil
+ }
+
+ orParser binOpParser
+
+ orParserShim = binOpParser{
+ LHS: andParser.parse,
+ RHS: andParser.parse,
+ Op: "OR",
+ ArgCheck: boolExprCheck,
+ Combiner: func(lhs, rhs Expr) Expr {
+ return LogicalOp{LHS: lhs.(BoolExpr), Op: Or, RHS: rhs.(BoolExpr)}
+ },
+ }
+ andParser = binOpParser{
+ LHS: (*parser).parseLogicalNot,
+ RHS: (*parser).parseLogicalNot,
+ Op: "AND",
+ ArgCheck: boolExprCheck,
+ Combiner: func(lhs, rhs Expr) Expr {
+ return LogicalOp{LHS: lhs.(BoolExpr), Op: And, RHS: rhs.(BoolExpr)}
+ },
+ }
+
+ bitOrParser = newBinArithParser("|", BitOr, bitXorParser.parse)
+ bitXorParser = newBinArithParser("^", BitXor, bitAndParser.parse)
+ bitAndParser = newBinArithParser("&", BitAnd, bitShrParser.parse)
+ bitShrParser = newBinArithParser(">>", BitShr, bitShlParser.parse)
+ bitShlParser = newBinArithParser("<<", BitShl, subParser.parse)
+ subParser = newBinArithParser("-", Sub, addParser.parse)
+ addParser = newBinArithParser("+", Add, concatParser.parse)
+ concatParser = newBinArithParser("||", Concat, divParser.parse)
+ divParser = newBinArithParser("/", Div, mulParser.parse)
+ mulParser = newBinArithParser("*", Mul, (*parser).parseUnaryArithOp)
+)
+
+func newBinArithParser(opStr string, op ArithOperator, nextPrec func(*parser) (Expr, *parseError)) binOpParser {
+ return binOpParser{
+ LHS: nextPrec,
+ RHS: nextPrec,
+ Op: opStr,
+ // TODO: ArgCheck? numeric inputs only, except for ||.
+ Combiner: func(lhs, rhs Expr) Expr {
+ return ArithOp{LHS: lhs, Op: op, RHS: rhs}
+ },
+ }
+}
+
+func (p *parser) parseLogicalNot() (Expr, *parseError) {
+ if !p.eat("NOT") {
+ return p.parseIsOp()
+ }
+ be, err := p.parseBoolExpr()
+ if err != nil {
+ return nil, err
+ }
+ return LogicalOp{Op: Not, RHS: be}, nil
+}
+
+func (p *parser) parseIsOp() (Expr, *parseError) {
+ debugf("parseIsOp: %v", p)
+
+ expr, err := p.parseInOp()
+ if err != nil {
+ return nil, err
+ }
+
+ if !p.eat("IS") {
+ return expr, nil
+ }
+
+ isOp := IsOp{LHS: expr}
+ if p.eat("NOT") {
+ isOp.Neg = true
+ }
+
+ tok := p.next()
+ if tok.err != nil {
+ return nil, tok.err
+ }
+ switch {
+ case tok.caseEqual("NULL"):
+ isOp.RHS = Null
+ case tok.caseEqual("TRUE"):
+ isOp.RHS = True
+ case tok.caseEqual("FALSE"):
+ isOp.RHS = False
+ default:
+ return nil, p.errorf("got %q, want NULL or TRUE or FALSE", tok.value)
+ }
+
+ return isOp, nil
+}
+
+func (p *parser) parseInOp() (Expr, *parseError) {
+ debugf("parseInOp: %v", p)
+
+ expr, err := p.parseComparisonOp()
+ if err != nil {
+ return nil, err
+ }
+
+ inOp := InOp{LHS: expr}
+ if p.eat("NOT", "IN") {
+ inOp.Neg = true
+ } else if p.eat("IN") {
+ // Okay.
+ } else {
+ return expr, nil
+ }
+
+ if p.eat("UNNEST") {
+ inOp.Unnest = true
+ }
+
+ inOp.RHS, err = p.parseParenExprList()
+ if err != nil {
+ return nil, err
+ }
+ return inOp, nil
+}
+
+var symbolicOperators = map[string]ComparisonOperator{
+ "<": Lt,
+ "<=": Le,
+ ">": Gt,
+ ">=": Ge,
+ "=": Eq,
+ "!=": Ne,
+ "<>": Ne,
+}
+
+func (p *parser) parseComparisonOp() (Expr, *parseError) {
+ debugf("parseComparisonOp: %v", p)
+
+ expr, err := p.parseArithOp()
+ if err != nil {
+ return nil, err
+ }
+
+ for {
+ // There's a need for two token lookahead.
+ var op ComparisonOperator
+ var rhs2 bool
+ if p.eat("NOT", "LIKE") {
+ op = NotLike
+ } else if p.eat("NOT", "BETWEEN") {
+ op, rhs2 = NotBetween, true
+ } else if p.eat("LIKE") {
+ op = Like
+ } else if p.eat("BETWEEN") {
+ op, rhs2 = Between, true
+ } else {
+ // Check for a symbolic operator.
+ tok := p.next()
+ if tok.err != nil {
+ p.back()
+ break
+ }
+ var ok bool
+ op, ok = symbolicOperators[tok.value]
+ if !ok {
+ p.back()
+ break
+ }
+ }
+
+ rhs, err := p.parseArithOp()
+ if err != nil {
+ return nil, err
+ }
+ co := ComparisonOp{LHS: expr, Op: op, RHS: rhs}
+
+ if rhs2 {
+ if err := p.expect("AND"); err != nil {
+ return nil, err
+ }
+ rhs2, err := p.parseArithOp()
+ if err != nil {
+ return nil, err
+ }
+ co.RHS2 = rhs2
+ }
+
+ expr = co
+ }
+ return expr, nil
+}
+
+func (p *parser) parseArithOp() (Expr, *parseError) {
+ return bitOrParser.parse(p)
+}
+
+var unaryArithOperators = map[string]ArithOperator{
+ "-": Neg,
+ "~": BitNot,
+ "+": Plus,
+}
+
+func (p *parser) parseUnaryArithOp() (Expr, *parseError) {
+ tok := p.next()
+ if tok.err != nil {
+ return nil, tok.err
+ }
+
+ op := tok.value
+
+ if op == "-" || op == "+" {
+ // If the next token is a numeric token, combine and parse as a literal.
+ ntok := p.next()
+ if ntok.err == nil {
+ switch ntok.typ {
+ case int64Token:
+ comb := op + ntok.value
+ n, err := strconv.ParseInt(comb, ntok.int64Base, 64)
+ if err != nil {
+ return nil, p.errorf("%v", err)
+ }
+ return IntegerLiteral(n), nil
+ case float64Token:
+ f := ntok.float64
+ if op == "-" {
+ f = -f
+ }
+ return FloatLiteral(f), nil
+ }
+ }
+ // It is not possible for the p.back() lower down to fire
+ // because - and + are in unaryArithOperators.
+ p.back()
+ }
+
+ if op, ok := unaryArithOperators[op]; ok {
+ e, err := p.parseLit()
+ if err != nil {
+ return nil, err
+ }
+ return ArithOp{Op: op, RHS: e}, nil
+ }
+ p.back()
+
+ return p.parseLit()
+}
+
+func (p *parser) parseLit() (Expr, *parseError) {
+ tok := p.next()
+ if tok.err != nil {
+ return nil, tok.err
+ }
+
+ switch tok.typ {
+ case int64Token:
+ n, err := strconv.ParseInt(tok.value, tok.int64Base, 64)
+ if err != nil {
+ return nil, p.errorf("%v", err)
+ }
+ return IntegerLiteral(n), nil
+ case float64Token:
+ return FloatLiteral(tok.float64), nil
+ case stringToken:
+ return StringLiteral(tok.string), nil
+ case bytesToken:
+ return BytesLiteral(tok.string), nil
+ }
+
+ // Handle parenthesized expressions.
+ if tok.value == "(" {
+ e, err := p.parseExpr()
+ if err != nil {
+ return nil, err
+ }
+ if err := p.expect(")"); err != nil {
+ return nil, err
+ }
+ return Paren{Expr: e}, nil
+ }
+
+ // If the literal was an identifier, and there's an open paren next,
+ // this is a function invocation.
+ // The `funcs` map is keyed by upper case strings.
+ if name := strings.ToUpper(tok.value); funcs[name] && p.sniff("(") {
+ if aggregateFuncs[name] {
+ p.back()
+ return p.parseAggregateFunc()
+ }
+ var list []Expr
+ var err *parseError
+ if f, ok := funcArgParsers[name]; ok {
+ list, err = p.parseParenExprListWithParseFunc(f)
+ } else {
+ list, err = p.parseParenExprList()
+ }
+ if err != nil {
+ return nil, err
+ }
+ return Func{
+ Name: name,
+ Args: list,
+ }, nil
+ }
+
+ // Handle some reserved keywords and special tokens that become specific values.
+ switch {
+ case tok.caseEqual("TRUE"):
+ return True, nil
+ case tok.caseEqual("FALSE"):
+ return False, nil
+ case tok.caseEqual("NULL"):
+ return Null, nil
+ case tok.value == "*":
+ return Star, nil
+ default:
+ // TODO: Check IsKeyWord(tok.value), and return a good error?
+ }
+
+ // Handle conditional expressions.
+ switch {
+ case tok.caseEqual("CASE"):
+ p.back()
+ return p.parseCaseExpr()
+ case tok.caseEqual("COALESCE"):
+ p.back()
+ return p.parseCoalesceExpr()
+ case tok.caseEqual("IF"):
+ p.back()
+ return p.parseIfExpr()
+ case tok.caseEqual("IFNULL"):
+ p.back()
+ return p.parseIfNullExpr()
+ case tok.caseEqual("NULLIF"):
+ p.back()
+ return p.parseNullIfExpr()
+ }
+
+ // Handle typed literals.
+ switch {
+ case tok.caseEqual("ARRAY") || tok.value == "[":
+ p.back()
+ return p.parseArrayLit()
+ case tok.caseEqual("DATE"):
+ if p.sniffTokenType(stringToken) {
+ p.back()
+ return p.parseDateLit()
+ }
+ case tok.caseEqual("TIMESTAMP"):
+ if p.sniffTokenType(stringToken) {
+ p.back()
+ return p.parseTimestampLit()
+ }
+ case tok.caseEqual("JSON"):
+ if p.sniffTokenType(stringToken) {
+ p.back()
+ return p.parseJSONLit()
+ }
+ }
+
+ // TODO: struct literals
+
+ // Try a parameter.
+ // TODO: check character sets.
+ if strings.HasPrefix(tok.value, "@") {
+ return Param(tok.value[1:]), nil
+ }
+
+ // Only thing left is a path expression or standalone identifier.
+ p.back()
+ pe, err := p.parsePathExp()
+ if err != nil {
+ return nil, err
+ }
+ if len(pe) == 1 {
+ return pe[0], nil // identifier
+ }
+ return pe, nil
+}
+
+func (p *parser) parseCaseExpr() (Case, *parseError) {
+ if err := p.expect("CASE"); err != nil {
+ return Case{}, err
+ }
+
+ var expr Expr
+ if !p.sniff("WHEN") {
+ var err *parseError
+ expr, err = p.parseExpr()
+ if err != nil {
+ return Case{}, err
+ }
+ }
+
+ when, err := p.parseWhenClause()
+ if err != nil {
+ return Case{}, err
+ }
+ whens := []WhenClause{when}
+ for p.sniff("WHEN") {
+ when, err := p.parseWhenClause()
+ if err != nil {
+ return Case{}, err
+ }
+ whens = append(whens, when)
+ }
+
+ var elseResult Expr
+ if p.sniff("ELSE") {
+ p.eat("ELSE")
+ var err *parseError
+ elseResult, err = p.parseExpr()
+ if err != nil {
+ return Case{}, err
+ }
+ }
+
+ if err := p.expect("END"); err != nil {
+ return Case{}, err
+ }
+
+ return Case{
+ Expr: expr,
+ WhenClauses: whens,
+ ElseResult: elseResult,
+ }, nil
+}
+
+func (p *parser) parseWhenClause() (WhenClause, *parseError) {
+ if err := p.expect("WHEN"); err != nil {
+ return WhenClause{}, err
+ }
+ cond, err := p.parseExpr()
+ if err != nil {
+ return WhenClause{}, err
+ }
+ if err := p.expect("THEN"); err != nil {
+ return WhenClause{}, err
+ }
+ result, err := p.parseExpr()
+ if err != nil {
+ return WhenClause{}, err
+ }
+ return WhenClause{Cond: cond, Result: result}, nil
+}
+
+func (p *parser) parseCoalesceExpr() (Coalesce, *parseError) {
+ if err := p.expect("COALESCE"); err != nil {
+ return Coalesce{}, err
+ }
+ exprList, err := p.parseParenExprList()
+ if err != nil {
+ return Coalesce{}, err
+ }
+ return Coalesce{ExprList: exprList}, nil
+}
+
+func (p *parser) parseIfExpr() (If, *parseError) {
+ if err := p.expect("IF", "("); err != nil {
+ return If{}, err
+ }
+
+ expr, err := p.parseBoolExpr()
+ if err != nil {
+ return If{}, err
+ }
+ if err := p.expect(","); err != nil {
+ return If{}, err
+ }
+
+ trueResult, err := p.parseExpr()
+ if err != nil {
+ return If{}, err
+ }
+ if err := p.expect(","); err != nil {
+ return If{}, err
+ }
+
+ elseResult, err := p.parseExpr()
+ if err != nil {
+ return If{}, err
+ }
+ if err := p.expect(")"); err != nil {
+ return If{}, err
+ }
+
+ return If{Expr: expr, TrueResult: trueResult, ElseResult: elseResult}, nil
+}
+
+func (p *parser) parseIfNullExpr() (IfNull, *parseError) {
+ if err := p.expect("IFNULL", "("); err != nil {
+ return IfNull{}, err
+ }
+
+ expr, err := p.parseExpr()
+ if err != nil {
+ return IfNull{}, err
+ }
+ if err := p.expect(","); err != nil {
+ return IfNull{}, err
+ }
+
+ nullResult, err := p.parseExpr()
+ if err != nil {
+ return IfNull{}, err
+ }
+ if err := p.expect(")"); err != nil {
+ return IfNull{}, err
+ }
+
+ return IfNull{Expr: expr, NullResult: nullResult}, nil
+}
+
+func (p *parser) parseNullIfExpr() (NullIf, *parseError) {
+ if err := p.expect("NULLIF", "("); err != nil {
+ return NullIf{}, err
+ }
+
+ expr, err := p.parseExpr()
+ if err != nil {
+ return NullIf{}, err
+ }
+ if err := p.expect(","); err != nil {
+ return NullIf{}, err
+ }
+
+ exprToMatch, err := p.parseExpr()
+ if err != nil {
+ return NullIf{}, err
+ }
+ if err := p.expect(")"); err != nil {
+ return NullIf{}, err
+ }
+
+ return NullIf{Expr: expr, ExprToMatch: exprToMatch}, nil
+}
+
+func (p *parser) parseArrayLit() (Array, *parseError) {
+ // ARRAY keyword is optional.
+ // TODO: If it is present, consume any <T> after it.
+ p.eat("ARRAY")
+
+ var arr Array
+ err := p.parseCommaList("[", "]", func(p *parser) *parseError {
+ e, err := p.parseLit()
+ if err != nil {
+ return err
+ }
+ // TODO: Do type consistency checking here?
+ arr = append(arr, e)
+ return nil
+ })
+ return arr, err
+}
+
+// TODO: There should be exported Parse{Date,Timestamp}Literal package-level funcs
+// to support spannertest coercing plain string literals when used in a typed context.
+// Those should wrap parseDateLit and parseTimestampLit below.
+
+func (p *parser) parseDateLit() (DateLiteral, *parseError) {
+ if err := p.expect("DATE"); err != nil {
+ return DateLiteral{}, err
+ }
+ s, err := p.parseStringLit()
+ if err != nil {
+ return DateLiteral{}, err
+ }
+ d, perr := civil.ParseDate(string(s))
+ if perr != nil {
+ return DateLiteral{}, p.errorf("bad date literal %q: %v", s, perr)
+ }
+ // TODO: Enforce valid range.
+ return DateLiteral(d), nil
+}
+
+// TODO: A manual parser is probably better than this.
+// There are a lot of variations that this does not handle.
+var timestampFormats = []string{
+ // 'YYYY-[M]M-[D]D [[H]H:[M]M:[S]S[.DDDDDD] [timezone]]'
+ "2006-01-02",
+ "2006-01-02 15:04:05",
+ "2006-01-02 15:04:05.000000",
+ "2006-01-02 15:04:05-07:00",
+ "2006-01-02 15:04:05.000000-07:00",
+}
+
+var defaultLocation = func() *time.Location {
+ // The docs say "America/Los_Angeles" is the default.
+ // Use that if we can load it, but fall back on UTC if we don't have timezone data.
+ loc, err := time.LoadLocation("America/Los_Angeles")
+ if err == nil {
+ return loc
+ }
+ return time.UTC
+}()
+
+func (p *parser) parseTimestampLit() (TimestampLiteral, *parseError) {
+ if err := p.expect("TIMESTAMP"); err != nil {
+ return TimestampLiteral{}, err
+ }
+ s, err := p.parseStringLit()
+ if err != nil {
+ return TimestampLiteral{}, err
+ }
+ for _, format := range timestampFormats {
+ t, err := time.ParseInLocation(format, string(s), defaultLocation)
+ if err == nil {
+ // TODO: Enforce valid range.
+ return TimestampLiteral(t), nil
+ }
+ }
+ return TimestampLiteral{}, p.errorf("invalid timestamp literal %q", s)
+}
+
+func (p *parser) parseJSONLit() (JSONLiteral, *parseError) {
+ if err := p.expect("JSON"); err != nil {
+ return JSONLiteral{}, err
+ }
+ s, err := p.parseStringLit()
+ if err != nil {
+ return JSONLiteral{}, err
+ }
+ // It is not guaranteed that the returned JSONLiteral is a valid JSON document
+ // to avoid error due to parsing SQL generated with an invalid JSONLiteral like JSONLiteral("")
+ return JSONLiteral(s), nil
+}
+
+func (p *parser) parseStringLit() (StringLiteral, *parseError) {
+ tok := p.next()
+ if tok.err != nil {
+ return "", tok.err
+ }
+ if tok.typ != stringToken {
+ return "", p.errorf("got %q, want string literal", tok.value)
+ }
+ return StringLiteral(tok.string), nil
+}
+
+func (p *parser) parsePathExp() (PathExp, *parseError) {
+ var pe PathExp
+ for {
+ tok := p.next()
+ if tok.err != nil {
+ return nil, tok.err
+ }
+ switch tok.typ {
+ case quotedID:
+ pe = append(pe, ID(tok.string))
+ case unquotedID:
+ pe = append(pe, ID(tok.value))
+ default:
+ // TODO: Is this correct?
+ return nil, p.errorf("expected identifer")
+ }
+ if !p.eat(".") {
+ break
+ }
+ }
+ return pe, nil
+}
+
+func (p *parser) parseBoolExpr() (BoolExpr, *parseError) {
+ expr, err := p.parseExpr()
+ if err != nil {
+ return nil, err
+ }
+ be, ok := expr.(BoolExpr)
+ if !ok {
+ return nil, p.errorf("got non-bool expression %T", expr)
+ }
+ return be, nil
+}
+
+func (p *parser) parseAlias() (ID, *parseError) {
+ // The docs don't specify what lexical token is valid for an alias,
+ // but it seems likely that it is an identifier.
+ return p.parseTableOrIndexOrColumnName()
+}
+
+func (p *parser) parseHints(hints map[string]string) (map[string]string, *parseError) {
+ if hints == nil {
+ hints = map[string]string{}
+ }
+ if err := p.expect("{"); err != nil {
+ return nil, err
+ }
+ for {
+ if p.sniff("}") {
+ break
+ }
+ tok := p.next()
+ if tok.err != nil {
+ return nil, tok.err
+ }
+ k := tok.value
+ if err := p.expect("="); err != nil {
+ return nil, err
+ }
+ tok = p.next()
+ if tok.err != nil {
+ return nil, tok.err
+ }
+ v := tok.value
+ hints[k] = v
+ if !p.eat(",") {
+ break
+ }
+ }
+ if err := p.expect("}"); err != nil {
+ return nil, err
+ }
+ return hints, nil
+}
+
+func (p *parser) parseTableOrIndexOrColumnName() (ID, *parseError) {
+ /*
+ table_name and column_name and index_name and role_name:
+ {a—z|A—Z}[{a—z|A—Z|0—9|_}+]
+ */
+
+ tok := p.next()
+ if tok.err != nil {
+ return "", tok.err
+ }
+ switch tok.typ {
+ case quotedID:
+ return ID(tok.string), nil
+ case unquotedID:
+ // TODO: enforce restrictions
+ return ID(tok.value), nil
+ default:
+ return "", p.errorf("expected identifier")
+ }
+}
+
+func (p *parser) parseOnDelete() (OnDelete, *parseError) {
+ /*
+ CASCADE
+ NO ACTION
+ */
+
+ tok := p.next()
+ if tok.err != nil {
+ return 0, tok.err
+ }
+ if tok.caseEqual("CASCADE") {
+ return CascadeOnDelete, nil
+ }
+ if !tok.caseEqual("NO") {
+ return 0, p.errorf("got %q, want NO or CASCADE", tok.value)
+ }
+ if err := p.expect("ACTION"); err != nil {
+ return 0, err
+ }
+ return NoActionOnDelete, nil
+}
+
+func (p *parser) parseRowDeletionPolicy() (RowDeletionPolicy, *parseError) {
+ if err := p.expect("(", "OLDER_THAN", "("); err != nil {
+ return RowDeletionPolicy{}, err
+ }
+ cname, err := p.parseTableOrIndexOrColumnName()
+ if err != nil {
+ return RowDeletionPolicy{}, err
+ }
+ if err := p.expect(",", "INTERVAL"); err != nil {
+ return RowDeletionPolicy{}, err
+ }
+ tok := p.next()
+ if tok.err != nil {
+ return RowDeletionPolicy{}, tok.err
+ }
+ if tok.typ != int64Token {
+ return RowDeletionPolicy{}, p.errorf("got %q, expected int64 token", tok.value)
+ }
+ n, serr := strconv.ParseInt(tok.value, tok.int64Base, 64)
+ if serr != nil {
+ return RowDeletionPolicy{}, p.errorf("%v", serr)
+ }
+ if err := p.expect("DAY", ")", ")"); err != nil {
+ return RowDeletionPolicy{}, err
+ }
+ return RowDeletionPolicy{
+ Column: cname,
+ NumDays: n,
+ }, nil
+}
+
+// parseCommaList parses a comma-separated list enclosed by bra and ket,
+// delegating to f for the individual element parsing.
+// Only invoke this with symbols as bra/ket; they are matched literally, not case insensitively.
+func (p *parser) parseCommaList(bra, ket string, f func(*parser) *parseError) *parseError {
+ if err := p.expect(bra); err != nil {
+ return err
+ }
+ for {
+ if p.eat(ket) {
+ return nil
+ }
+
+ err := f(p)
+ if err != nil {
+ return err
+ }
+
+ // ket or "," should be next.
+ tok := p.next()
+ if tok.err != nil {
+ return err
+ }
+ if tok.value == ket {
+ return nil
+ } else if tok.value == "," {
+ continue
+ } else {
+ return p.errorf(`got %q, want %q or ","`, tok.value, ket)
+ }
+ }
+}
+
+// parseCommaListWithEnds parses a comma-separated list to expected ends,
+// delegating to f for the individual element parsing.
+// Only invoke this with symbols as end; they are matched case insensitively.
+func (p *parser) parseCommaListWithEnds(f func(*parser) *parseError, end ...string) *parseError {
+ if p.eat(end...) {
+ return nil
+ }
+ for {
+ err := f(p)
+ if err != nil {
+ return err
+ }
+ if p.eat(end...) {
+ return nil
+ }
+
+ tok := p.next()
+ if tok.err != nil {
+ return err
+ }
+ if tok.value == "," {
+ continue
+ } else if tok.value == ";" {
+ return nil
+ }
+ }
+}
diff --git a/vendor/cloud.google.com/go/spanner/spansql/sql.go b/vendor/cloud.google.com/go/spanner/spansql/sql.go
new file mode 100644
index 000000000..577a45e2e
--- /dev/null
+++ b/vendor/cloud.google.com/go/spanner/spansql/sql.go
@@ -0,0 +1,1183 @@
+/*
+Copyright 2019 Google LLC
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package spansql
+
+// This file holds SQL methods for rendering the types in types.go
+// as the SQL dialect that this package parses.
+//
+// Every exported type has an SQL method that returns a string.
+// Some also have an addSQL method that efficiently builds that string
+// in a provided strings.Builder.
+
+import (
+ "fmt"
+ "sort"
+ "strconv"
+ "strings"
+ "time"
+)
+
+func buildSQL(x interface{ addSQL(*strings.Builder) }) string {
+ var sb strings.Builder
+ x.addSQL(&sb)
+ return sb.String()
+}
+
+func (ct CreateTable) SQL() string {
+ str := "CREATE TABLE "
+ if ct.IfNotExists {
+ str += "IF NOT EXISTS "
+ }
+ str += ct.Name.SQL() + " (\n"
+ for _, c := range ct.Columns {
+ str += " " + c.SQL() + ",\n"
+ }
+ for _, tc := range ct.Constraints {
+ str += " " + tc.SQL() + ",\n"
+ }
+ if len(ct.Synonym) > 0 {
+ str += " SYNONYM(" + ct.Synonym.SQL() + "),\n"
+ }
+ str += ") PRIMARY KEY("
+ for i, c := range ct.PrimaryKey {
+ if i > 0 {
+ str += ", "
+ }
+ str += c.SQL()
+ }
+ str += ")"
+ if il := ct.Interleave; il != nil {
+ str += ",\n INTERLEAVE IN PARENT " + il.Parent.SQL() + " ON DELETE " + il.OnDelete.SQL()
+ }
+ if rdp := ct.RowDeletionPolicy; rdp != nil {
+ str += ",\n " + rdp.SQL()
+ }
+ return str
+}
+
+func (ci CreateIndex) SQL() string {
+ str := "CREATE"
+ if ci.Unique {
+ str += " UNIQUE"
+ }
+ if ci.NullFiltered {
+ str += " NULL_FILTERED"
+ }
+ str += " INDEX "
+ if ci.IfNotExists {
+ str += "IF NOT EXISTS "
+ }
+ str += ci.Name.SQL() + " ON " + ci.Table.SQL() + "("
+ for i, c := range ci.Columns {
+ if i > 0 {
+ str += ", "
+ }
+ str += c.SQL()
+ }
+ str += ")"
+ if len(ci.Storing) > 0 {
+ str += " STORING (" + idList(ci.Storing, ", ") + ")"
+ }
+ if ci.Interleave != "" {
+ str += ", INTERLEAVE IN " + ci.Interleave.SQL()
+ }
+ return str
+}
+
+func (cv CreateView) SQL() string {
+ str := "CREATE"
+ if cv.OrReplace {
+ str += " OR REPLACE"
+ }
+ str += " VIEW " + cv.Name.SQL() + " SQL SECURITY " + cv.SecurityType.SQL() + " AS " + cv.Query.SQL()
+ return str
+}
+
+func (st SecurityType) SQL() string {
+ switch st {
+ case Invoker:
+ return "INVOKER"
+ case Definer:
+ return "DEFINER"
+ }
+ panic("unknown SecurityType")
+}
+
+func (cr CreateRole) SQL() string {
+ return "CREATE ROLE " + cr.Name.SQL()
+}
+
+func (cs CreateChangeStream) SQL() string {
+ str := "CREATE CHANGE STREAM "
+ str += cs.Name.SQL()
+ if cs.WatchAllTables {
+ str += " FOR ALL"
+ } else {
+ for i, table := range cs.Watch {
+ if i == 0 {
+ str += " FOR "
+ } else {
+ str += ", "
+ }
+ str += table.SQL()
+ }
+ }
+ if cs.Options != (ChangeStreamOptions{}) {
+ str += " " + cs.Options.SQL()
+ }
+
+ return str
+}
+
+func (w WatchDef) SQL() string {
+ str := w.Table.SQL()
+ if !w.WatchAllCols {
+ str += "("
+ for i, c := range w.Columns {
+ if i > 0 {
+ str += ", "
+ }
+ str += c.SQL()
+ }
+ str += ")"
+ }
+ return str
+}
+
+func (dt DropTable) SQL() string {
+ str := "DROP TABLE "
+ if dt.IfExists {
+ str += "IF EXISTS "
+ }
+ str += dt.Name.SQL()
+ return str
+}
+
+func (di DropIndex) SQL() string {
+ str := "DROP INDEX "
+ if di.IfExists {
+ str += "IF EXISTS "
+ }
+ str += di.Name.SQL()
+ return str
+}
+
+func (dv DropView) SQL() string {
+ return "DROP VIEW " + dv.Name.SQL()
+}
+
+func (dr DropRole) SQL() string {
+ return "DROP ROLE " + dr.Name.SQL()
+}
+
+func (gr GrantRole) SQL() string {
+ sql := "GRANT "
+ if gr.Privileges != nil {
+ for i, priv := range gr.Privileges {
+ if i > 0 {
+ sql += ", "
+ }
+ sql += priv.Type.SQL()
+ if priv.Columns != nil {
+ sql += "(" + idList(priv.Columns, ", ") + ")"
+ }
+ }
+ sql += " ON TABLE " + idList(gr.TableNames, ", ")
+ } else if len(gr.TvfNames) > 0 {
+ sql += "EXECUTE ON TABLE FUNCTION " + idList(gr.TvfNames, ", ")
+ } else if len(gr.ViewNames) > 0 {
+ sql += "SELECT ON VIEW " + idList(gr.ViewNames, ", ")
+ } else if len(gr.ChangeStreamNames) > 0 {
+ sql += "SELECT ON CHANGE STREAM " + idList(gr.ChangeStreamNames, ", ")
+ } else {
+ sql += "ROLE " + idList(gr.GrantRoleNames, ", ")
+ }
+ sql += " TO ROLE " + idList(gr.ToRoleNames, ", ")
+ return sql
+}
+
+func (rr RevokeRole) SQL() string {
+ sql := "REVOKE "
+ if rr.Privileges != nil {
+ for i, priv := range rr.Privileges {
+ if i > 0 {
+ sql += ", "
+ }
+ sql += priv.Type.SQL()
+ if priv.Columns != nil {
+ sql += "(" + idList(priv.Columns, ", ") + ")"
+ }
+ }
+ sql += " ON TABLE " + idList(rr.TableNames, ", ")
+ } else if len(rr.TvfNames) > 0 {
+ sql += "EXECUTE ON TABLE FUNCTION " + idList(rr.TvfNames, ", ")
+ } else if len(rr.ViewNames) > 0 {
+ sql += "SELECT ON VIEW " + idList(rr.ViewNames, ", ")
+ } else if len(rr.ChangeStreamNames) > 0 {
+ sql += "SELECT ON CHANGE STREAM " + idList(rr.ChangeStreamNames, ", ")
+ } else {
+ sql += "ROLE " + idList(rr.RevokeRoleNames, ", ")
+ }
+ sql += " FROM ROLE " + idList(rr.FromRoleNames, ", ")
+ return sql
+}
+
+func (dc DropChangeStream) SQL() string {
+ return "DROP CHANGE STREAM " + dc.Name.SQL()
+}
+
+func (acs AlterChangeStream) SQL() string {
+ return "ALTER CHANGE STREAM " + acs.Name.SQL() + " " + acs.Alteration.SQL()
+}
+
+func (scsw AlterWatch) SQL() string {
+ str := "SET FOR "
+ if scsw.WatchAllTables {
+ return str + "ALL"
+ }
+ for i, table := range scsw.Watch {
+ if i > 0 {
+ str += ", "
+ }
+ str += table.SQL()
+ }
+ return str
+}
+
+func (ao AlterChangeStreamOptions) SQL() string {
+ return "SET " + ao.Options.SQL()
+}
+
+func (dcsw DropChangeStreamWatch) SQL() string {
+ return "DROP FOR ALL"
+}
+
+func (cso ChangeStreamOptions) SQL() string {
+ str := "OPTIONS ("
+ hasOpt := false
+ if cso.RetentionPeriod != nil {
+ hasOpt = true
+ str += fmt.Sprintf("retention_period='%s'", *cso.RetentionPeriod)
+ }
+ if cso.ValueCaptureType != nil {
+ if hasOpt {
+ str += ", "
+ }
+ hasOpt = true
+ str += fmt.Sprintf("value_capture_type='%s'", *cso.ValueCaptureType)
+ }
+ str += ")"
+ return str
+}
+
+func (at AlterTable) SQL() string {
+ return "ALTER TABLE " + at.Name.SQL() + " " + at.Alteration.SQL()
+}
+
+func (ac AddColumn) SQL() string {
+ str := "ADD COLUMN "
+ if ac.IfNotExists {
+ str += "IF NOT EXISTS "
+ }
+ str += ac.Def.SQL()
+ return str
+}
+
+func (dc DropColumn) SQL() string {
+ return "DROP COLUMN " + dc.Name.SQL()
+}
+
+func (ac AddConstraint) SQL() string {
+ return "ADD " + ac.Constraint.SQL()
+}
+
+func (dc DropConstraint) SQL() string {
+ return "DROP CONSTRAINT " + dc.Name.SQL()
+}
+
+func (rt RenameTo) SQL() string {
+ str := "RENAME TO " + rt.ToName.SQL()
+ if len(rt.Synonym) > 0 {
+ str += ", ADD SYNONYM " + rt.Synonym.SQL()
+ }
+ return str
+}
+
+func (as AddSynonym) SQL() string {
+ return "ADD SYNONYM " + as.Name.SQL()
+}
+
+func (ds DropSynonym) SQL() string {
+ return "DROP SYNONYM " + ds.Name.SQL()
+}
+
+func (sod SetOnDelete) SQL() string {
+ return "SET ON DELETE " + sod.Action.SQL()
+}
+
+func (od OnDelete) SQL() string {
+ switch od {
+ case NoActionOnDelete:
+ return "NO ACTION"
+ case CascadeOnDelete:
+ return "CASCADE"
+ }
+ panic("unknown OnDelete")
+}
+
+func (ac AlterColumn) SQL() string {
+ return "ALTER COLUMN " + ac.Name.SQL() + " " + ac.Alteration.SQL()
+}
+
+func (ardp AddRowDeletionPolicy) SQL() string {
+ return "ADD " + ardp.RowDeletionPolicy.SQL()
+}
+
+func (rrdp ReplaceRowDeletionPolicy) SQL() string {
+ return "REPLACE " + rrdp.RowDeletionPolicy.SQL()
+}
+
+func (drdp DropRowDeletionPolicy) SQL() string {
+ return "DROP ROW DELETION POLICY"
+}
+
+func (sct SetColumnType) SQL() string {
+ str := sct.Type.SQL()
+ if sct.NotNull {
+ str += " NOT NULL"
+ }
+ if sct.Default != nil {
+ str += " DEFAULT (" + sct.Default.SQL() + ")"
+ }
+ return str
+}
+
+func (sco SetColumnOptions) SQL() string {
+ // TODO: not clear what to do for no options.
+ return "SET " + sco.Options.SQL()
+}
+
+func (sd SetDefault) SQL() string {
+ return "SET DEFAULT (" + sd.Default.SQL() + ")"
+}
+
+func (dp DropDefault) SQL() string {
+ return "DROP DEFAULT"
+}
+
+func (co ColumnOptions) SQL() string {
+ str := "OPTIONS ("
+ if co.AllowCommitTimestamp != nil {
+ if *co.AllowCommitTimestamp {
+ str += "allow_commit_timestamp = true"
+ } else {
+ str += "allow_commit_timestamp = null"
+ }
+ }
+ str += ")"
+ return str
+}
+
+func (rt RenameTable) SQL() string {
+ str := "RENAME TABLE "
+ for i, op := range rt.TableRenameOps {
+ if i > 0 {
+ str += ", "
+ }
+ str += op.FromName.SQL() + " TO " + op.ToName.SQL()
+ }
+ return str
+}
+
+func (ad AlterDatabase) SQL() string {
+ return "ALTER DATABASE " + ad.Name.SQL() + " " + ad.Alteration.SQL()
+}
+
+func (sdo SetDatabaseOptions) SQL() string {
+ return "SET " + sdo.Options.SQL()
+}
+
+func (do DatabaseOptions) SQL() string {
+ str := "OPTIONS ("
+ hasOpt := false
+ if do.OptimizerVersion != nil {
+ hasOpt = true
+ if *do.OptimizerVersion == 0 {
+ str += "optimizer_version=null"
+ } else {
+ str += fmt.Sprintf("optimizer_version=%v", *do.OptimizerVersion)
+ }
+ }
+ if do.OptimizerStatisticsPackage != nil {
+ if hasOpt {
+ str += ", "
+ }
+ hasOpt = true
+ if *do.OptimizerStatisticsPackage == "" {
+ str += "optimizer_statistics_package=null"
+ } else {
+ str += fmt.Sprintf("optimizer_statistics_package='%s'", *do.OptimizerStatisticsPackage)
+ }
+ }
+ if do.VersionRetentionPeriod != nil {
+ if hasOpt {
+ str += ", "
+ }
+ hasOpt = true
+ if *do.VersionRetentionPeriod == "" {
+ str += "version_retention_period=null"
+ } else {
+ str += fmt.Sprintf("version_retention_period='%s'", *do.VersionRetentionPeriod)
+ }
+ }
+ if do.EnableKeyVisualizer != nil {
+ if hasOpt {
+ str += ", "
+ }
+ hasOpt = true
+ if *do.EnableKeyVisualizer {
+ str += "enable_key_visualizer=true"
+ } else {
+ str += "enable_key_visualizer=null"
+ }
+ }
+ if do.DefaultLeader != nil {
+ if hasOpt {
+ str += ", "
+ }
+ hasOpt = true
+ if *do.DefaultLeader == "" {
+ str += "default_leader=null"
+ } else {
+ str += fmt.Sprintf("default_leader='%s'", *do.DefaultLeader)
+ }
+ }
+ str += ")"
+ return str
+}
+
+func (as AlterStatistics) SQL() string {
+ return "ALTER STATISTICS " + as.Name.SQL() + " " + as.Alteration.SQL()
+}
+
+func (sso SetStatisticsOptions) SQL() string {
+ return "SET " + sso.Options.SQL()
+}
+
+func (sa StatisticsOptions) SQL() string {
+ str := "OPTIONS ("
+ if sa.AllowGC != nil {
+ str += fmt.Sprintf("allow_gc=%v", *sa.AllowGC)
+ }
+ str += ")"
+ return str
+}
+
+func (ai AlterIndex) SQL() string {
+ return "ALTER INDEX " + ai.Name.SQL() + " " + ai.Alteration.SQL()
+}
+
+func (asc AddStoredColumn) SQL() string {
+ return "ADD STORED COLUMN " + asc.Name.SQL()
+}
+
+func (dsc DropStoredColumn) SQL() string {
+ return "DROP STORED COLUMN " + dsc.Name.SQL()
+}
+
+func (cs CreateSequence) SQL() string {
+ str := "CREATE SEQUENCE "
+ if cs.IfNotExists {
+ str += "IF NOT EXISTS "
+ }
+ return str + cs.Name.SQL() + " " + cs.Options.SQL()
+}
+
+func (as AlterSequence) SQL() string {
+ return "ALTER SEQUENCE " + as.Name.SQL() + " " + as.Alteration.SQL()
+}
+
+func (sa SetSequenceOptions) SQL() string {
+ return "SET " + sa.Options.SQL()
+}
+
+func (so SequenceOptions) SQL() string {
+ str := "OPTIONS ("
+ hasOpt := false
+ if so.SequenceKind != nil {
+ hasOpt = true
+ str += fmt.Sprintf("sequence_kind='%s'", *so.SequenceKind)
+ }
+ if so.SkipRangeMin != nil {
+ if hasOpt {
+ str += ", "
+ }
+ hasOpt = true
+ str += fmt.Sprintf("skip_range_min=%v", *so.SkipRangeMin)
+ }
+ if so.SkipRangeMax != nil {
+ if hasOpt {
+ str += ", "
+ }
+ hasOpt = true
+ str += fmt.Sprintf("skip_range_max=%v", *so.SkipRangeMax)
+ }
+ if so.StartWithCounter != nil {
+ if hasOpt {
+ str += ", "
+ }
+ hasOpt = true
+ str += fmt.Sprintf("start_with_counter=%v", *so.StartWithCounter)
+ }
+ return str + ")"
+}
+
+func (do DropSequence) SQL() string {
+ str := "DROP SEQUENCE "
+ if do.IfExists {
+ str += "IF EXISTS "
+ }
+ return str + do.Name.SQL()
+}
+
+func (d *Delete) SQL() string {
+ return "DELETE FROM " + d.Table.SQL() + " WHERE " + d.Where.SQL()
+}
+
+func (u *Update) SQL() string {
+ str := "UPDATE " + u.Table.SQL() + " SET "
+ for i, item := range u.Items {
+ if i > 0 {
+ str += ", "
+ }
+ str += item.Column.SQL() + " = "
+ if item.Value != nil {
+ str += item.Value.SQL()
+ } else {
+ str += "DEFAULT"
+ }
+ }
+ str += " WHERE " + u.Where.SQL()
+ return str
+}
+
+func (i *Insert) SQL() string {
+ str := "INSERT INTO " + i.Table.SQL() + " ("
+ for i, column := range i.Columns {
+ if i > 0 {
+ str += ", "
+ }
+ str += column.SQL()
+ }
+ str += ") "
+ str += i.Input.SQL()
+ return str
+}
+
+func (v Values) SQL() string {
+ str := "VALUES "
+ for j, values := range v {
+ if j > 0 {
+ str += ", "
+ }
+ str += "("
+
+ for k, value := range values {
+ if k > 0 {
+ str += ", "
+ }
+ str += value.SQL()
+ }
+ str += ")"
+ }
+ return str
+}
+
+func (cd ColumnDef) SQL() string {
+ str := cd.Name.SQL() + " " + cd.Type.SQL()
+ if cd.NotNull {
+ str += " NOT NULL"
+ }
+ if cd.Default != nil {
+ str += " DEFAULT (" + cd.Default.SQL() + ")"
+ }
+ if cd.Generated != nil {
+ str += " AS (" + cd.Generated.SQL() + ") STORED"
+ }
+ if cd.Options != (ColumnOptions{}) {
+ str += " " + cd.Options.SQL()
+ }
+ return str
+}
+
+func (tc TableConstraint) SQL() string {
+ var str string
+ if tc.Name != "" {
+ str += "CONSTRAINT " + tc.Name.SQL() + " "
+ }
+ str += tc.Constraint.SQL()
+ return str
+}
+
+func (rdp RowDeletionPolicy) SQL() string {
+ return "ROW DELETION POLICY ( OLDER_THAN ( " + rdp.Column.SQL() + ", INTERVAL " + strconv.FormatInt(rdp.NumDays, 10) + " DAY ))"
+}
+
+func (fk ForeignKey) SQL() string {
+ str := "FOREIGN KEY (" + idList(fk.Columns, ", ")
+ str += ") REFERENCES " + fk.RefTable.SQL() + " ("
+ str += idList(fk.RefColumns, ", ") + ")"
+ str += " ON DELETE " + fk.OnDelete.SQL()
+ return str
+}
+
+func (c Check) SQL() string {
+ return "CHECK (" + c.Expr.SQL() + ")"
+}
+
+func (t Type) SQL() string {
+ str := t.Base.SQL()
+ if t.Len > 0 && (t.Base == String || t.Base == Bytes) {
+ str += "("
+ if t.Len == MaxLen {
+ str += "MAX"
+ } else {
+ str += strconv.FormatInt(t.Len, 10)
+ }
+ str += ")"
+ }
+ if t.Array {
+ str = "ARRAY<" + str + ">"
+ }
+ return str
+}
+
+func (tb TypeBase) SQL() string {
+ switch tb {
+ case Bool:
+ return "BOOL"
+ case Int64:
+ return "INT64"
+ case Float64:
+ return "FLOAT64"
+ case Numeric:
+ return "NUMERIC"
+ case String:
+ return "STRING"
+ case Bytes:
+ return "BYTES"
+ case Date:
+ return "DATE"
+ case Timestamp:
+ return "TIMESTAMP"
+ case JSON:
+ return "JSON"
+ }
+ panic("unknown TypeBase")
+}
+
+func (pt PrivilegeType) SQL() string {
+ switch pt {
+ case PrivilegeTypeSelect:
+ return "SELECT"
+ case PrivilegeTypeInsert:
+ return "INSERT"
+ case PrivilegeTypeUpdate:
+ return "UPDATE"
+ case PrivilegeTypeDelete:
+ return "DELETE"
+ }
+ panic("unknown PrivilegeType")
+}
+func (kp KeyPart) SQL() string {
+ str := kp.Column.SQL()
+ if kp.Desc {
+ str += " DESC"
+ }
+ return str
+}
+
+func (q Query) SQL() string { return buildSQL(q) }
+func (q Query) addSQL(sb *strings.Builder) {
+ q.Select.addSQL(sb)
+ if len(q.Order) > 0 {
+ sb.WriteString(" ORDER BY ")
+ for i, o := range q.Order {
+ if i > 0 {
+ sb.WriteString(", ")
+ }
+ o.addSQL(sb)
+ }
+ }
+ if q.Limit != nil {
+ sb.WriteString(" LIMIT ")
+ sb.WriteString(q.Limit.SQL())
+ if q.Offset != nil {
+ sb.WriteString(" OFFSET ")
+ sb.WriteString(q.Offset.SQL())
+ }
+ }
+}
+
+func (sel Select) SQL() string { return buildSQL(sel) }
+func (sel Select) addSQL(sb *strings.Builder) {
+ sb.WriteString("SELECT ")
+ if sel.Distinct {
+ sb.WriteString("DISTINCT ")
+ }
+ for i, e := range sel.List {
+ if i > 0 {
+ sb.WriteString(", ")
+ }
+ e.addSQL(sb)
+ if len(sel.ListAliases) > 0 {
+ alias := sel.ListAliases[i]
+ if alias != "" {
+ sb.WriteString(" AS ")
+ sb.WriteString(alias.SQL())
+ }
+ }
+ }
+ if len(sel.From) > 0 {
+ sb.WriteString(" FROM ")
+ for i, f := range sel.From {
+ if i > 0 {
+ sb.WriteString(", ")
+ }
+ sb.WriteString(f.SQL())
+ }
+ }
+ if sel.Where != nil {
+ sb.WriteString(" WHERE ")
+ sel.Where.addSQL(sb)
+ }
+ if len(sel.GroupBy) > 0 {
+ sb.WriteString(" GROUP BY ")
+ addExprList(sb, sel.GroupBy, ", ")
+ }
+}
+
+func (sft SelectFromTable) SQL() string {
+ str := sft.Table.SQL()
+ if len(sft.Hints) > 0 {
+ str += "@{"
+ kvs := make([]string, len(sft.Hints))
+ i := 0
+ for k, v := range sft.Hints {
+ kvs[i] = fmt.Sprintf("%s=%s", k, v)
+ i++
+ }
+ sort.Strings(kvs)
+ str += strings.Join(kvs, ",")
+ str += "}"
+ }
+
+ if sft.Alias != "" {
+ str += " AS " + sft.Alias.SQL()
+ }
+ return str
+}
+
+func (sfj SelectFromJoin) SQL() string {
+ // TODO: The grammar permits arbitrary nesting. Does this need to add parens?
+ str := sfj.LHS.SQL() + " " + joinTypes[sfj.Type] + " JOIN "
+ // TODO: hints go here
+ str += sfj.RHS.SQL()
+ if sfj.On != nil {
+ str += " ON " + sfj.On.SQL()
+ } else if len(sfj.Using) > 0 {
+ str += " USING (" + idList(sfj.Using, ", ") + ")"
+ }
+ return str
+}
+
+var joinTypes = map[JoinType]string{
+ InnerJoin: "INNER",
+ CrossJoin: "CROSS",
+ FullJoin: "FULL",
+ LeftJoin: "LEFT",
+ RightJoin: "RIGHT",
+}
+
+func (sfu SelectFromUnnest) SQL() string {
+ str := "UNNEST(" + sfu.Expr.SQL() + ")"
+ if sfu.Alias != "" {
+ str += " AS " + sfu.Alias.SQL()
+ }
+ return str
+}
+
+func (o Order) SQL() string { return buildSQL(o) }
+func (o Order) addSQL(sb *strings.Builder) {
+ o.Expr.addSQL(sb)
+ if o.Desc {
+ sb.WriteString(" DESC")
+ }
+}
+
+var arithOps = map[ArithOperator]string{
+ // Binary operators only; unary operators are handled first.
+ Mul: "*",
+ Div: "/",
+ Concat: "||",
+ Add: "+",
+ Sub: "-",
+ BitShl: "<<",
+ BitShr: ">>",
+ BitAnd: "&",
+ BitXor: "^",
+ BitOr: "|",
+}
+
+func (ao ArithOp) SQL() string { return buildSQL(ao) }
+func (ao ArithOp) addSQL(sb *strings.Builder) {
+ // Extra parens inserted to ensure the correct precedence.
+
+ switch ao.Op {
+ case Neg:
+ sb.WriteString("-(")
+ ao.RHS.addSQL(sb)
+ sb.WriteString(")")
+ return
+ case Plus:
+ sb.WriteString("+(")
+ ao.RHS.addSQL(sb)
+ sb.WriteString(")")
+ return
+ case BitNot:
+ sb.WriteString("~(")
+ ao.RHS.addSQL(sb)
+ sb.WriteString(")")
+ return
+ }
+ op, ok := arithOps[ao.Op]
+ if !ok {
+ panic("unknown ArithOp")
+ }
+ sb.WriteString("(")
+ ao.LHS.addSQL(sb)
+ sb.WriteString(")")
+ sb.WriteString(op)
+ sb.WriteString("(")
+ ao.RHS.addSQL(sb)
+ sb.WriteString(")")
+}
+
+func (lo LogicalOp) SQL() string { return buildSQL(lo) }
+func (lo LogicalOp) addSQL(sb *strings.Builder) {
+ switch lo.Op {
+ default:
+ panic("unknown LogicalOp")
+ case And:
+ lo.LHS.addSQL(sb)
+ sb.WriteString(" AND ")
+ case Or:
+ lo.LHS.addSQL(sb)
+ sb.WriteString(" OR ")
+ case Not:
+ sb.WriteString("NOT ")
+ }
+ lo.RHS.addSQL(sb)
+}
+
+var compOps = map[ComparisonOperator]string{
+ Lt: "<",
+ Le: "<=",
+ Gt: ">",
+ Ge: ">=",
+ Eq: "=",
+ Ne: "!=",
+ Like: "LIKE",
+ NotLike: "NOT LIKE",
+ Between: "BETWEEN",
+ NotBetween: "NOT BETWEEN",
+}
+
+func (co ComparisonOp) SQL() string { return buildSQL(co) }
+func (co ComparisonOp) addSQL(sb *strings.Builder) {
+ op, ok := compOps[co.Op]
+ if !ok {
+ panic("unknown ComparisonOp")
+ }
+ co.LHS.addSQL(sb)
+ sb.WriteString(" ")
+ sb.WriteString(op)
+ sb.WriteString(" ")
+ co.RHS.addSQL(sb)
+ if co.Op == Between || co.Op == NotBetween {
+ sb.WriteString(" AND ")
+ co.RHS2.addSQL(sb)
+ }
+}
+
+func (io InOp) SQL() string { return buildSQL(io) }
+func (io InOp) addSQL(sb *strings.Builder) {
+ io.LHS.addSQL(sb)
+ if io.Neg {
+ sb.WriteString(" NOT")
+ }
+ sb.WriteString(" IN ")
+ if io.Unnest {
+ sb.WriteString("UNNEST")
+ }
+ sb.WriteString("(")
+ addExprList(sb, io.RHS, ", ")
+ sb.WriteString(")")
+}
+
+func (io IsOp) SQL() string { return buildSQL(io) }
+func (io IsOp) addSQL(sb *strings.Builder) {
+ io.LHS.addSQL(sb)
+ sb.WriteString(" IS ")
+ if io.Neg {
+ sb.WriteString("NOT ")
+ }
+ io.RHS.addSQL(sb)
+}
+
+func (f Func) SQL() string { return buildSQL(f) }
+func (f Func) addSQL(sb *strings.Builder) {
+ sb.WriteString(f.Name)
+ sb.WriteString("(")
+ if f.Distinct {
+ sb.WriteString("DISTINCT ")
+ }
+ addExprList(sb, f.Args, ", ")
+ switch f.NullsHandling {
+ case RespectNulls:
+ sb.WriteString(" RESPECT NULLS")
+ case IgnoreNulls:
+ sb.WriteString(" IGNORE NULLS")
+ }
+ if ah := f.Having; ah != nil {
+ sb.WriteString(" HAVING")
+ switch ah.Condition {
+ case HavingMax:
+ sb.WriteString(" MAX")
+ case HavingMin:
+ sb.WriteString(" MIN")
+ }
+ sb.WriteString(" ")
+ sb.WriteString(ah.Expr.SQL())
+ }
+ sb.WriteString(")")
+}
+
+func (te TypedExpr) SQL() string { return buildSQL(te) }
+func (te TypedExpr) addSQL(sb *strings.Builder) {
+ te.Expr.addSQL(sb)
+ sb.WriteString(" AS ")
+ sb.WriteString(te.Type.SQL())
+}
+
+func (ee ExtractExpr) SQL() string { return buildSQL(ee) }
+func (ee ExtractExpr) addSQL(sb *strings.Builder) {
+ sb.WriteString(ee.Part)
+ sb.WriteString(" FROM ")
+ ee.Expr.addSQL(sb)
+}
+
+func (aze AtTimeZoneExpr) SQL() string { return buildSQL(aze) }
+func (aze AtTimeZoneExpr) addSQL(sb *strings.Builder) {
+ aze.Expr.addSQL(sb)
+ sb.WriteString(" AT TIME ZONE ")
+ sb.WriteString(aze.Zone)
+}
+
+func (ie IntervalExpr) SQL() string { return buildSQL(ie) }
+func (ie IntervalExpr) addSQL(sb *strings.Builder) {
+ sb.WriteString("INTERVAL")
+ sb.WriteString(" ")
+ ie.Expr.addSQL(sb)
+ sb.WriteString(" ")
+ sb.WriteString(ie.DatePart)
+}
+
+func (se SequenceExpr) SQL() string { return buildSQL(se) }
+func (se SequenceExpr) addSQL(sb *strings.Builder) {
+ sb.WriteString("SEQUENCE ")
+ sb.WriteString(se.Name.SQL())
+}
+
+func idList(l []ID, join string) string {
+ var ss []string
+ for _, s := range l {
+ ss = append(ss, s.SQL())
+ }
+ return strings.Join(ss, join)
+}
+
+func addExprList(sb *strings.Builder, l []Expr, join string) {
+ for i, s := range l {
+ if i > 0 {
+ sb.WriteString(join)
+ }
+ s.addSQL(sb)
+ }
+}
+
+func addIDList(sb *strings.Builder, l []ID, join string) {
+ for i, s := range l {
+ if i > 0 {
+ sb.WriteString(join)
+ }
+ s.addSQL(sb)
+ }
+}
+
+func (pe PathExp) SQL() string { return buildSQL(pe) }
+func (pe PathExp) addSQL(sb *strings.Builder) {
+ addIDList(sb, []ID(pe), ".")
+}
+
+func (p Paren) SQL() string { return buildSQL(p) }
+func (p Paren) addSQL(sb *strings.Builder) {
+ sb.WriteString("(")
+ p.Expr.addSQL(sb)
+ sb.WriteString(")")
+}
+
+func (a Array) SQL() string { return buildSQL(a) }
+func (a Array) addSQL(sb *strings.Builder) {
+ sb.WriteString("[")
+ addExprList(sb, []Expr(a), ", ")
+ sb.WriteString("]")
+}
+
+func (id ID) SQL() string { return buildSQL(id) }
+func (id ID) addSQL(sb *strings.Builder) {
+ // https://cloud.google.com/spanner/docs/lexical#identifiers
+
+ // TODO: If there are non-letters/numbers/underscores then this also needs quoting.
+
+ // Naming Convention: Must be enclosed in backticks (`) if it's a reserved keyword or contains a hyphen.
+ if IsKeyword(string(id)) || strings.Contains(string(id), "-") {
+ // TODO: Escaping may be needed here.
+ sb.WriteString("`")
+ sb.WriteString(string(id))
+ sb.WriteString("`")
+ return
+ }
+
+ sb.WriteString(string(id))
+}
+
+func (p Param) SQL() string { return buildSQL(p) }
+func (p Param) addSQL(sb *strings.Builder) {
+ sb.WriteString("@")
+ sb.WriteString(string(p))
+}
+
+func (c Case) SQL() string { return buildSQL(c) }
+func (c Case) addSQL(sb *strings.Builder) {
+ sb.WriteString("CASE ")
+ if c.Expr != nil {
+ fmt.Fprintf(sb, "%s ", c.Expr.SQL())
+ }
+ for _, w := range c.WhenClauses {
+ fmt.Fprintf(sb, "WHEN %s THEN %s ", w.Cond.SQL(), w.Result.SQL())
+ }
+ if c.ElseResult != nil {
+ fmt.Fprintf(sb, "ELSE %s ", c.ElseResult.SQL())
+ }
+ sb.WriteString("END")
+}
+
+func (c Coalesce) SQL() string { return buildSQL(c) }
+func (c Coalesce) addSQL(sb *strings.Builder) {
+ sb.WriteString("COALESCE(")
+ for i, expr := range c.ExprList {
+ if i > 0 {
+ sb.WriteString(", ")
+ }
+ expr.addSQL(sb)
+ }
+ sb.WriteString(")")
+}
+
+func (i If) SQL() string { return buildSQL(i) }
+func (i If) addSQL(sb *strings.Builder) {
+ sb.WriteString("IF(")
+ i.Expr.addSQL(sb)
+ sb.WriteString(", ")
+ i.TrueResult.addSQL(sb)
+ sb.WriteString(", ")
+ i.ElseResult.addSQL(sb)
+ sb.WriteString(")")
+}
+
+func (in IfNull) SQL() string { return buildSQL(in) }
+func (in IfNull) addSQL(sb *strings.Builder) {
+ sb.WriteString("IFNULL(")
+ in.Expr.addSQL(sb)
+ sb.WriteString(", ")
+ in.NullResult.addSQL(sb)
+ sb.WriteString(")")
+}
+
+func (ni NullIf) SQL() string { return buildSQL(ni) }
+func (ni NullIf) addSQL(sb *strings.Builder) {
+ sb.WriteString("NULLIF(")
+ ni.Expr.addSQL(sb)
+ sb.WriteString(", ")
+ ni.ExprToMatch.addSQL(sb)
+ sb.WriteString(")")
+}
+
+func (b BoolLiteral) SQL() string { return buildSQL(b) }
+func (b BoolLiteral) addSQL(sb *strings.Builder) {
+ if b {
+ sb.WriteString("TRUE")
+ } else {
+ sb.WriteString("FALSE")
+ }
+}
+
+func (NullLiteral) SQL() string { return buildSQL(NullLiteral(0)) }
+func (NullLiteral) addSQL(sb *strings.Builder) { sb.WriteString("NULL") }
+
+func (StarExpr) SQL() string { return buildSQL(StarExpr(0)) }
+func (StarExpr) addSQL(sb *strings.Builder) { sb.WriteString("*") }
+
+func (il IntegerLiteral) SQL() string { return buildSQL(il) }
+func (il IntegerLiteral) addSQL(sb *strings.Builder) { fmt.Fprintf(sb, "%d", il) }
+
+func (fl FloatLiteral) SQL() string { return buildSQL(fl) }
+func (fl FloatLiteral) addSQL(sb *strings.Builder) { fmt.Fprintf(sb, "%g", fl) }
+
+// TODO: provide correct string quote method and use it.
+
+func (sl StringLiteral) SQL() string { return buildSQL(sl) }
+func (sl StringLiteral) addSQL(sb *strings.Builder) { fmt.Fprintf(sb, "%q", sl) }
+
+func (bl BytesLiteral) SQL() string { return buildSQL(bl) }
+func (bl BytesLiteral) addSQL(sb *strings.Builder) { fmt.Fprintf(sb, "B%q", bl) }
+
+func (dl DateLiteral) SQL() string { return buildSQL(dl) }
+func (dl DateLiteral) addSQL(sb *strings.Builder) {
+ fmt.Fprintf(sb, "DATE '%04d-%02d-%02d'", dl.Year, dl.Month, dl.Day)
+}
+
+func (tl TimestampLiteral) SQL() string { return buildSQL(tl) }
+func (tl TimestampLiteral) addSQL(sb *strings.Builder) {
+ fmt.Fprintf(sb, "TIMESTAMP '%s'", time.Time(tl).Format("2006-01-02 15:04:05.000000-07:00"))
+}
+
+func (jl JSONLiteral) SQL() string { return buildSQL(jl) }
+func (jl JSONLiteral) addSQL(sb *strings.Builder) {
+ fmt.Fprintf(sb, "JSON '%s'", jl)
+}
diff --git a/vendor/cloud.google.com/go/spanner/spansql/types.go b/vendor/cloud.google.com/go/spanner/spansql/types.go
new file mode 100644
index 000000000..481d83f10
--- /dev/null
+++ b/vendor/cloud.google.com/go/spanner/spansql/types.go
@@ -0,0 +1,1394 @@
+/*
+Copyright 2019 Google LLC
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package spansql
+
+// This file holds the type definitions for the SQL dialect.
+
+import (
+ "fmt"
+ "math"
+ "sort"
+ "strings"
+ "time"
+
+ "cloud.google.com/go/civil"
+)
+
+// TODO: More Position fields throughout; maybe in Query/Select.
+
+// CreateTable represents a CREATE TABLE statement.
+// https://cloud.google.com/spanner/docs/data-definition-language#create_table
+type CreateTable struct {
+ Name ID
+ IfNotExists bool
+ Columns []ColumnDef
+ Constraints []TableConstraint
+ PrimaryKey []KeyPart
+ Interleave *Interleave
+ RowDeletionPolicy *RowDeletionPolicy
+ Synonym ID // may be empty
+
+ Position Position // position of the "CREATE" token
+}
+
+func (ct *CreateTable) String() string { return fmt.Sprintf("%#v", ct) }
+func (*CreateTable) isDDLStmt() {}
+func (ct *CreateTable) Pos() Position { return ct.Position }
+func (ct *CreateTable) clearOffset() {
+ for i := range ct.Columns {
+ // Mutate in place.
+ ct.Columns[i].clearOffset()
+ }
+ for i := range ct.Constraints {
+ // Mutate in place.
+ ct.Constraints[i].clearOffset()
+ }
+ ct.Position.Offset = 0
+}
+
+// TableConstraint represents a constraint on a table.
+type TableConstraint struct {
+ Name ID // may be empty
+ Constraint Constraint
+
+ Position Position // position of the "CONSTRAINT" token, or Constraint.Pos()
+}
+
+func (tc TableConstraint) Pos() Position { return tc.Position }
+func (tc *TableConstraint) clearOffset() {
+ switch c := tc.Constraint.(type) {
+ case ForeignKey:
+ c.clearOffset()
+ tc.Constraint = c
+ case Check:
+ c.clearOffset()
+ tc.Constraint = c
+ }
+ tc.Position.Offset = 0
+}
+
+type Constraint interface {
+ isConstraint()
+ SQL() string
+ Node
+}
+
+// Interleave represents an interleave clause of a CREATE TABLE statement.
+type Interleave struct {
+ Parent ID
+ OnDelete OnDelete
+}
+
+// RowDeletionPolicy represents an row deletion policy clause of a CREATE, ALTER TABLE statement.
+type RowDeletionPolicy struct {
+ Column ID
+ NumDays int64
+}
+
+// CreateIndex represents a CREATE INDEX statement.
+// https://cloud.google.com/spanner/docs/data-definition-language#create-index
+type CreateIndex struct {
+ Name ID
+ Table ID
+ Columns []KeyPart
+
+ Unique bool
+ NullFiltered bool
+ IfNotExists bool
+
+ Storing []ID
+ Interleave ID
+
+ Position Position // position of the "CREATE" token
+}
+
+func (ci *CreateIndex) String() string { return fmt.Sprintf("%#v", ci) }
+func (*CreateIndex) isDDLStmt() {}
+func (ci *CreateIndex) Pos() Position { return ci.Position }
+func (ci *CreateIndex) clearOffset() { ci.Position.Offset = 0 }
+
+// CreateView represents a CREATE [OR REPLACE] VIEW statement.
+// https://cloud.google.com/spanner/docs/data-definition-language#view_statements
+type CreateView struct {
+ Name ID
+ OrReplace bool
+ SecurityType SecurityType
+ Query Query
+
+ Position Position // position of the "CREATE" token
+}
+
+func (cv *CreateView) String() string { return fmt.Sprintf("%#v", cv) }
+func (*CreateView) isDDLStmt() {}
+func (cv *CreateView) Pos() Position { return cv.Position }
+func (cv *CreateView) clearOffset() { cv.Position.Offset = 0 }
+
+type SecurityType int
+
+const (
+ Invoker SecurityType = iota
+ Definer
+)
+
+// CreateRole represents a CREATE Role statement.
+// https://cloud.google.com/spanner/docs/reference/standard-sql/data-definition-language#create_role
+type CreateRole struct {
+ Name ID
+
+ Position Position // position of the "CREATE" token
+}
+
+func (cr *CreateRole) String() string { return fmt.Sprintf("%#v", cr) }
+func (*CreateRole) isDDLStmt() {}
+func (cr *CreateRole) Pos() Position { return cr.Position }
+func (cr *CreateRole) clearOffset() { cr.Position.Offset = 0 }
+
+// DropTable represents a DROP TABLE statement.
+// https://cloud.google.com/spanner/docs/data-definition-language#drop_table
+type DropTable struct {
+ Name ID
+ IfExists bool
+
+ Position Position // position of the "DROP" token
+}
+
+func (dt *DropTable) String() string { return fmt.Sprintf("%#v", dt) }
+func (*DropTable) isDDLStmt() {}
+func (dt *DropTable) Pos() Position { return dt.Position }
+func (dt *DropTable) clearOffset() { dt.Position.Offset = 0 }
+
+// DropIndex represents a DROP INDEX statement.
+// https://cloud.google.com/spanner/docs/data-definition-language#drop-index
+type DropIndex struct {
+ Name ID
+ IfExists bool
+
+ Position Position // position of the "DROP" token
+}
+
+func (di *DropIndex) String() string { return fmt.Sprintf("%#v", di) }
+func (*DropIndex) isDDLStmt() {}
+func (di *DropIndex) Pos() Position { return di.Position }
+func (di *DropIndex) clearOffset() { di.Position.Offset = 0 }
+
+// DropView represents a DROP VIEW statement.
+// https://cloud.google.com/spanner/docs/data-definition-language#drop-view
+type DropView struct {
+ Name ID
+
+ Position Position // position of the "DROP" token
+}
+
+func (dv *DropView) String() string { return fmt.Sprintf("%#v", dv) }
+func (*DropView) isDDLStmt() {}
+func (dv *DropView) Pos() Position { return dv.Position }
+func (dv *DropView) clearOffset() { dv.Position.Offset = 0 }
+
+// DropRole represents a DROP ROLE statement.
+// https://cloud.google.com/spanner/docs/reference/standard-sql/data-definition-language#drop_role
+type DropRole struct {
+ Name ID
+
+ Position Position // position of the "DROP" token
+}
+
+func (dr *DropRole) String() string { return fmt.Sprintf("%#v", dr) }
+func (*DropRole) isDDLStmt() {}
+func (dr *DropRole) Pos() Position { return dr.Position }
+func (dr *DropRole) clearOffset() { dr.Position.Offset = 0 }
+
+// GrantRole represents a GRANT statement.
+// https://cloud.google.com/spanner/docs/reference/standard-sql/data-definition-language#grant_statement
+type GrantRole struct {
+ ToRoleNames []ID
+ GrantRoleNames []ID
+ Privileges []Privilege
+ TableNames []ID
+ TvfNames []ID
+ ViewNames []ID
+ ChangeStreamNames []ID
+
+ Position Position // position of the "GRANT" token
+}
+
+func (gr *GrantRole) String() string { return fmt.Sprintf("%#v", gr) }
+func (*GrantRole) isDDLStmt() {}
+func (gr *GrantRole) Pos() Position { return gr.Position }
+func (gr *GrantRole) clearOffset() { gr.Position.Offset = 0 }
+
+// RevokeRole represents a REVOKE statement.
+// https://cloud.google.com/spanner/docs/reference/standard-sql/data-definition-language#revoke_statement
+type RevokeRole struct {
+ FromRoleNames []ID
+ RevokeRoleNames []ID
+ Privileges []Privilege
+ TableNames []ID
+ TvfNames []ID
+ ViewNames []ID
+ ChangeStreamNames []ID
+ Position Position // position of the "REVOKE" token
+}
+
+func (rr *RevokeRole) String() string { return fmt.Sprintf("%#v", rr) }
+func (*RevokeRole) isDDLStmt() {}
+func (rr *RevokeRole) Pos() Position { return rr.Position }
+func (rr *RevokeRole) clearOffset() { rr.Position.Offset = 0 }
+
+// Privilege represents privilege to grant or revoke.
+type Privilege struct {
+ Type PrivilegeType
+ Columns []ID
+}
+
+// AlterTable represents an ALTER TABLE statement.
+// https://cloud.google.com/spanner/docs/data-definition-language#alter_table
+type AlterTable struct {
+ Name ID
+ Alteration TableAlteration
+
+ Position Position // position of the "ALTER" token
+}
+
+func (at *AlterTable) String() string { return fmt.Sprintf("%#v", at) }
+func (*AlterTable) isDDLStmt() {}
+func (at *AlterTable) Pos() Position { return at.Position }
+func (at *AlterTable) clearOffset() {
+ switch alt := at.Alteration.(type) {
+ case AddColumn:
+ alt.Def.clearOffset()
+ at.Alteration = alt
+ case AddConstraint:
+ alt.Constraint.clearOffset()
+ at.Alteration = alt
+ }
+ at.Position.Offset = 0
+}
+
+// TableAlteration is satisfied by AddColumn, DropColumn, AddConstraint,
+// DropConstraint, SetOnDelete, AlterColumn,
+// AddRowDeletionPolicy, ReplaceRowDeletionPolicy, DropRowDeletionPolicy,
+// RenameTo, AddSynonym, and DropSynonym.
+type TableAlteration interface {
+ isTableAlteration()
+ SQL() string
+}
+
+func (AddColumn) isTableAlteration() {}
+func (DropColumn) isTableAlteration() {}
+func (AddConstraint) isTableAlteration() {}
+func (DropConstraint) isTableAlteration() {}
+func (SetOnDelete) isTableAlteration() {}
+func (AlterColumn) isTableAlteration() {}
+func (AddRowDeletionPolicy) isTableAlteration() {}
+func (ReplaceRowDeletionPolicy) isTableAlteration() {}
+func (DropRowDeletionPolicy) isTableAlteration() {}
+func (RenameTo) isTableAlteration() {}
+func (AddSynonym) isTableAlteration() {}
+func (DropSynonym) isTableAlteration() {}
+
+type (
+ AddColumn struct {
+ IfNotExists bool
+ Def ColumnDef
+ }
+ DropColumn struct{ Name ID }
+ AddConstraint struct{ Constraint TableConstraint }
+ DropConstraint struct{ Name ID }
+ SetOnDelete struct{ Action OnDelete }
+ AlterColumn struct {
+ Name ID
+ Alteration ColumnAlteration
+ }
+)
+
+type (
+ AddRowDeletionPolicy struct{ RowDeletionPolicy RowDeletionPolicy }
+ ReplaceRowDeletionPolicy struct{ RowDeletionPolicy RowDeletionPolicy }
+ DropRowDeletionPolicy struct{}
+)
+
+// ColumnAlteration is satisfied by SetColumnType and SetColumnOptions.
+type ColumnAlteration interface {
+ isColumnAlteration()
+ SQL() string
+}
+
+func (SetColumnType) isColumnAlteration() {}
+func (SetColumnOptions) isColumnAlteration() {}
+func (SetDefault) isColumnAlteration() {}
+func (DropDefault) isColumnAlteration() {}
+
+type SetColumnType struct {
+ Type Type
+ NotNull bool
+ Default Expr
+}
+
+type SetColumnOptions struct{ Options ColumnOptions }
+
+type SetDefault struct {
+ Default Expr
+}
+
+type DropDefault struct{}
+
+type OnDelete int
+
+const (
+ NoActionOnDelete OnDelete = iota
+ CascadeOnDelete
+)
+
+type (
+ RenameTo struct {
+ ToName ID
+ Synonym ID // may be empty
+ }
+ AddSynonym struct{ Name ID }
+ DropSynonym struct{ Name ID }
+)
+
+// RenameTable represents a RENAME TABLE statement.
+type RenameTable struct {
+ TableRenameOps []TableRenameOp
+
+ Position Position // position of the "RENAME" token
+}
+
+type TableRenameOp struct {
+ FromName ID
+ ToName ID
+}
+
+func (rt *RenameTable) String() string { return fmt.Sprintf("%#v", rt) }
+func (*RenameTable) isDDLStmt() {}
+func (rt *RenameTable) Pos() Position { return rt.Position }
+func (rt *RenameTable) clearOffset() { rt.Position.Offset = 0 }
+
+// AlterDatabase represents an ALTER DATABASE statement.
+// https://cloud.google.com/spanner/docs/data-definition-language#alter-database
+type AlterDatabase struct {
+ Name ID
+ Alteration DatabaseAlteration
+
+ Position Position // position of the "ALTER" token
+}
+
+func (ad *AlterDatabase) String() string { return fmt.Sprintf("%#v", ad) }
+func (*AlterDatabase) isDDLStmt() {}
+func (ad *AlterDatabase) Pos() Position { return ad.Position }
+func (ad *AlterDatabase) clearOffset() { ad.Position.Offset = 0 }
+
+type DatabaseAlteration interface {
+ isDatabaseAlteration()
+ SQL() string
+}
+
+type SetDatabaseOptions struct{ Options DatabaseOptions }
+
+func (SetDatabaseOptions) isDatabaseAlteration() {}
+
+// DatabaseOptions represents options on a database as part of a
+// ALTER DATABASE statement.
+type DatabaseOptions struct {
+ OptimizerVersion *int
+ OptimizerStatisticsPackage *string
+ VersionRetentionPeriod *string
+ EnableKeyVisualizer *bool
+ DefaultLeader *string
+}
+
+// Delete represents a DELETE statement.
+// https://cloud.google.com/spanner/docs/dml-syntax#delete-statement
+type Delete struct {
+ Table ID
+ Where BoolExpr
+
+ // TODO: Alias
+}
+
+func (d *Delete) String() string { return fmt.Sprintf("%#v", d) }
+func (*Delete) isDMLStmt() {}
+
+// Insert represents an INSERT statement.
+// https://cloud.google.com/spanner/docs/dml-syntax#insert-statement
+type Insert struct {
+ Table ID
+ Columns []ID
+ Input ValuesOrSelect
+}
+
+// Values represents one or more lists of expressions passed to an `INSERT` statement.
+type Values [][]Expr
+
+func (v Values) isValuesOrSelect() {}
+func (v Values) String() string { return fmt.Sprintf("%#v", v) }
+
+type ValuesOrSelect interface {
+ isValuesOrSelect()
+ SQL() string
+}
+
+func (Select) isValuesOrSelect() {}
+
+func (i *Insert) String() string { return fmt.Sprintf("%#v", i) }
+func (*Insert) isDMLStmt() {}
+
+// Update represents an UPDATE statement.
+// https://cloud.google.com/spanner/docs/dml-syntax#update-statement
+type Update struct {
+ Table ID
+ Items []UpdateItem
+ Where BoolExpr
+
+ // TODO: Alias
+}
+
+func (u *Update) String() string { return fmt.Sprintf("%#v", u) }
+func (*Update) isDMLStmt() {}
+
+type UpdateItem struct {
+ Column ID
+ Value Expr // or nil for DEFAULT
+}
+
+// ColumnDef represents a column definition as part of a CREATE TABLE
+// or ALTER TABLE statement.
+type ColumnDef struct {
+ Name ID
+ Type Type
+ NotNull bool
+
+ Default Expr // set if this column has a default value
+ Generated Expr // set of this is a generated column
+
+ Options ColumnOptions
+
+ Position Position // position of the column name
+}
+
+func (cd ColumnDef) Pos() Position { return cd.Position }
+func (cd *ColumnDef) clearOffset() { cd.Position.Offset = 0 }
+
+// ColumnOptions represents options on a column as part of a
+// CREATE TABLE or ALTER TABLE statement.
+type ColumnOptions struct {
+ // AllowCommitTimestamp represents a column OPTIONS.
+ // `true` if query is `OPTIONS (allow_commit_timestamp = true)`
+ // `false` if query is `OPTIONS (allow_commit_timestamp = null)`
+ // `nil` if there are no OPTIONS
+ AllowCommitTimestamp *bool
+}
+
+// ForeignKey represents a foreign key definition as part of a CREATE TABLE
+// or ALTER TABLE statement.
+type ForeignKey struct {
+ Columns []ID
+ RefTable ID
+ RefColumns []ID
+ OnDelete OnDelete
+
+ Position Position // position of the "FOREIGN" token
+}
+
+func (fk ForeignKey) Pos() Position { return fk.Position }
+func (fk *ForeignKey) clearOffset() { fk.Position.Offset = 0 }
+func (ForeignKey) isConstraint() {}
+
+// Check represents a check constraint as part of a CREATE TABLE
+// or ALTER TABLE statement.
+type Check struct {
+ Expr BoolExpr
+
+ Position Position // position of the "CHECK" token
+}
+
+func (c Check) Pos() Position { return c.Position }
+func (c *Check) clearOffset() { c.Position.Offset = 0 }
+func (Check) isConstraint() {}
+
+// Type represents a column type.
+type Type struct {
+ Array bool
+ Base TypeBase // Bool, Int64, Float64, Numeric, String, Bytes, Date, Timestamp
+ Len int64 // if Base is String or Bytes; may be MaxLen
+}
+
+// MaxLen is a sentinel for Type's Len field, representing the MAX value.
+const MaxLen = math.MaxInt64
+
+type TypeBase int
+
+const (
+ Bool TypeBase = iota
+ Int64
+ Float64
+ Numeric
+ String
+ Bytes
+ Date
+ Timestamp
+ JSON
+)
+
+type PrivilegeType int
+
+const (
+ PrivilegeTypeSelect PrivilegeType = iota
+ PrivilegeTypeInsert
+ PrivilegeTypeUpdate
+ PrivilegeTypeDelete
+)
+
+// KeyPart represents a column specification as part of a primary key or index definition.
+type KeyPart struct {
+ Column ID
+ Desc bool
+}
+
+// Query represents a query statement.
+// https://cloud.google.com/spanner/docs/query-syntax#sql-syntax
+type Query struct {
+ Select Select
+ Order []Order
+
+ Limit, Offset LiteralOrParam
+}
+
+// Select represents a SELECT statement.
+// https://cloud.google.com/spanner/docs/query-syntax#select-list
+type Select struct {
+ Distinct bool
+ List []Expr
+ From []SelectFrom
+ Where BoolExpr
+ GroupBy []Expr
+ // TODO: Having
+
+ // When the FROM clause has TABLESAMPLE operators,
+ // TableSamples will be populated 1:1 with From;
+ // FROM clauses without will have a nil value.
+ TableSamples []*TableSample
+
+ // If the SELECT list has explicit aliases ("AS alias"),
+ // ListAliases will be populated 1:1 with List;
+ // aliases that are present will be non-empty.
+ ListAliases []ID
+}
+
+// SelectFrom represents the FROM clause of a SELECT.
+// https://cloud.google.com/spanner/docs/query-syntax#from_clause
+type SelectFrom interface {
+ isSelectFrom()
+ SQL() string
+}
+
+// SelectFromTable is a SelectFrom that specifies a table to read from.
+type SelectFromTable struct {
+ Table ID
+ Alias ID // empty if not aliased
+ Hints map[string]string
+}
+
+func (SelectFromTable) isSelectFrom() {}
+
+// SelectFromJoin is a SelectFrom that joins two other SelectFroms.
+// https://cloud.google.com/spanner/docs/query-syntax#join_types
+type SelectFromJoin struct {
+ Type JoinType
+ LHS, RHS SelectFrom
+
+ // Join condition.
+ // At most one of {On,Using} may be set.
+ On BoolExpr
+ Using []ID
+
+ // Hints are suggestions for how to evaluate a join.
+ // https://cloud.google.com/spanner/docs/query-syntax#join-hints
+ Hints map[string]string
+}
+
+func (SelectFromJoin) isSelectFrom() {}
+
+type JoinType int
+
+const (
+ InnerJoin JoinType = iota
+ CrossJoin
+ FullJoin
+ LeftJoin
+ RightJoin
+)
+
+// SelectFromUnnest is a SelectFrom that yields a virtual table from an array.
+// https://cloud.google.com/spanner/docs/query-syntax#unnest
+type SelectFromUnnest struct {
+ Expr Expr
+ Alias ID // empty if not aliased
+
+ // TODO: Implicit
+}
+
+func (SelectFromUnnest) isSelectFrom() {}
+
+// TODO: SelectFromSubquery, etc.
+
+type Order struct {
+ Expr Expr
+ Desc bool
+}
+
+type TableSample struct {
+ Method TableSampleMethod
+ Size Expr
+ SizeType TableSampleSizeType
+}
+
+type TableSampleMethod int
+
+const (
+ Bernoulli TableSampleMethod = iota
+ Reservoir
+)
+
+type TableSampleSizeType int
+
+const (
+ PercentTableSample TableSampleSizeType = iota
+ RowsTableSample
+)
+
+type BoolExpr interface {
+ isBoolExpr()
+ Expr
+}
+
+type Expr interface {
+ isExpr()
+ SQL() string
+ addSQL(*strings.Builder)
+}
+
+// LiteralOrParam is implemented by integer literal and parameter values.
+type LiteralOrParam interface {
+ isLiteralOrParam()
+ SQL() string
+}
+
+type ArithOp struct {
+ Op ArithOperator
+ LHS, RHS Expr // only RHS is set for Neg, Plus, BitNot
+}
+
+func (ArithOp) isExpr() {}
+
+type ArithOperator int
+
+const (
+ Neg ArithOperator = iota // unary -
+ Plus // unary +
+ BitNot // unary ~
+ Mul // *
+ Div // /
+ Concat // ||
+ Add // +
+ Sub // -
+ BitShl // <<
+ BitShr // >>
+ BitAnd // &
+ BitXor // ^
+ BitOr // |
+)
+
+type LogicalOp struct {
+ Op LogicalOperator
+ LHS, RHS BoolExpr // only RHS is set for Not
+}
+
+func (LogicalOp) isBoolExpr() {}
+func (LogicalOp) isExpr() {}
+
+type LogicalOperator int
+
+const (
+ And LogicalOperator = iota
+ Or
+ Not
+)
+
+type ComparisonOp struct {
+ Op ComparisonOperator
+ LHS, RHS Expr
+
+ // RHS2 is the third operand for BETWEEN.
+ // "<LHS> BETWEEN <RHS> AND <RHS2>".
+ RHS2 Expr
+}
+
+func (ComparisonOp) isBoolExpr() {}
+func (ComparisonOp) isExpr() {}
+
+type ComparisonOperator int
+
+const (
+ Lt ComparisonOperator = iota
+ Le
+ Gt
+ Ge
+ Eq
+ Ne // both "!=" and "<>"
+ Like
+ NotLike
+ Between
+ NotBetween
+)
+
+type InOp struct {
+ LHS Expr
+ Neg bool
+ RHS []Expr
+ Unnest bool
+
+ // TODO: support subquery form
+}
+
+func (InOp) isBoolExpr() {} // usually
+func (InOp) isExpr() {}
+
+type IsOp struct {
+ LHS Expr
+ Neg bool
+ RHS IsExpr
+}
+
+func (IsOp) isBoolExpr() {}
+func (IsOp) isExpr() {}
+
+type IsExpr interface {
+ isIsExpr()
+ Expr
+}
+
+// PathExp represents a path expression.
+//
+// The grammar for path expressions is not defined (see b/169017423 internally),
+// so this captures the most common form only, namely a dotted sequence of identifiers.
+type PathExp []ID
+
+func (PathExp) isExpr() {}
+
+// Func represents a function call.
+type Func struct {
+ Name string // not ID
+ Args []Expr
+
+ Distinct bool
+ NullsHandling NullsHandling
+ Having *AggregateHaving
+}
+
+func (Func) isBoolExpr() {} // possibly bool
+func (Func) isExpr() {}
+
+// TypedExpr represents a typed expression in the form `expr AS type_name`, e.g. `'17' AS INT64`.
+type TypedExpr struct {
+ Type Type
+ Expr Expr
+}
+
+func (TypedExpr) isBoolExpr() {} // possibly bool
+func (TypedExpr) isExpr() {}
+
+type ExtractExpr struct {
+ Part string
+ Type Type
+ Expr Expr
+}
+
+func (ExtractExpr) isBoolExpr() {} // possibly bool
+func (ExtractExpr) isExpr() {}
+
+type AtTimeZoneExpr struct {
+ Expr Expr
+ Type Type
+ Zone string
+}
+
+func (AtTimeZoneExpr) isBoolExpr() {} // possibly bool
+func (AtTimeZoneExpr) isExpr() {}
+
+type IntervalExpr struct {
+ Expr Expr
+ DatePart string
+}
+
+func (IntervalExpr) isBoolExpr() {} // possibly bool
+func (IntervalExpr) isExpr() {}
+
+type SequenceExpr struct {
+ Name ID
+}
+
+func (SequenceExpr) isExpr() {}
+
+// NullsHandling represents the method of dealing with NULL values in aggregate functions.
+type NullsHandling int
+
+const (
+ NullsHandlingUnspecified NullsHandling = iota
+ RespectNulls
+ IgnoreNulls
+)
+
+// AggregateHaving represents the HAVING clause specific to aggregate functions, restricting rows based on a maximal or minimal value.
+type AggregateHaving struct {
+ Condition AggregateHavingCondition
+ Expr Expr
+}
+
+// AggregateHavingCondition represents the condition (MAX or MIN) for the AggregateHaving clause.
+type AggregateHavingCondition int
+
+const (
+ HavingMax AggregateHavingCondition = iota
+ HavingMin
+)
+
+// Paren represents a parenthesised expression.
+type Paren struct {
+ Expr Expr
+}
+
+func (Paren) isBoolExpr() {} // possibly bool
+func (Paren) isExpr() {}
+
+// Array represents an array literal.
+type Array []Expr
+
+func (Array) isExpr() {}
+
+// ID represents an identifier.
+// https://cloud.google.com/spanner/docs/lexical#identifiers
+type ID string
+
+func (ID) isBoolExpr() {} // possibly bool
+func (ID) isExpr() {}
+
+// Param represents a query parameter.
+type Param string
+
+func (Param) isBoolExpr() {} // possibly bool
+func (Param) isExpr() {}
+func (Param) isLiteralOrParam() {}
+
+type Case struct {
+ Expr Expr
+ WhenClauses []WhenClause
+ ElseResult Expr
+}
+
+func (Case) isBoolExpr() {} // possibly bool
+func (Case) isExpr() {}
+
+type WhenClause struct {
+ Cond Expr
+ Result Expr
+}
+
+type Coalesce struct {
+ ExprList []Expr
+}
+
+func (Coalesce) isBoolExpr() {} // possibly bool
+func (Coalesce) isExpr() {}
+
+type If struct {
+ Expr Expr
+ TrueResult Expr
+ ElseResult Expr
+}
+
+func (If) isBoolExpr() {} // possibly bool
+func (If) isExpr() {}
+
+type IfNull struct {
+ Expr Expr
+ NullResult Expr
+}
+
+func (IfNull) isBoolExpr() {} // possibly bool
+func (IfNull) isExpr() {}
+
+type NullIf struct {
+ Expr Expr
+ ExprToMatch Expr
+}
+
+func (NullIf) isBoolExpr() {} // possibly bool
+func (NullIf) isExpr() {}
+
+type BoolLiteral bool
+
+const (
+ True = BoolLiteral(true)
+ False = BoolLiteral(false)
+)
+
+func (BoolLiteral) isBoolExpr() {}
+func (BoolLiteral) isIsExpr() {}
+func (BoolLiteral) isExpr() {}
+
+type NullLiteral int
+
+const Null = NullLiteral(0)
+
+func (NullLiteral) isIsExpr() {}
+func (NullLiteral) isExpr() {}
+
+// IntegerLiteral represents an integer literal.
+// https://cloud.google.com/spanner/docs/lexical#integer-literals
+type IntegerLiteral int64
+
+func (IntegerLiteral) isLiteralOrParam() {}
+func (IntegerLiteral) isExpr() {}
+
+// FloatLiteral represents a floating point literal.
+// https://cloud.google.com/spanner/docs/lexical#floating-point-literals
+type FloatLiteral float64
+
+func (FloatLiteral) isExpr() {}
+
+// StringLiteral represents a string literal.
+// https://cloud.google.com/spanner/docs/lexical#string-and-bytes-literals
+type StringLiteral string
+
+func (StringLiteral) isExpr() {}
+
+// BytesLiteral represents a bytes literal.
+// https://cloud.google.com/spanner/docs/lexical#string-and-bytes-literals
+type BytesLiteral string
+
+func (BytesLiteral) isExpr() {}
+
+// DateLiteral represents a date literal.
+// https://cloud.google.com/spanner/docs/lexical#date_literals
+type DateLiteral civil.Date
+
+func (DateLiteral) isExpr() {}
+
+// TimestampLiteral represents a timestamp literal.
+// https://cloud.google.com/spanner/docs/lexical#timestamp_literals
+type TimestampLiteral time.Time
+
+func (TimestampLiteral) isExpr() {}
+
+// JSONLiteral represents a JSON literal
+// https://cloud.google.com/spanner/docs/reference/standard-sql/lexical#json_literals
+type JSONLiteral []byte
+
+func (JSONLiteral) isExpr() {}
+
+type StarExpr int
+
+// Star represents a "*" in an expression.
+const Star = StarExpr(0)
+
+func (StarExpr) isExpr() {}
+
+type statements interface {
+ setFilename(string)
+ getComments() []*Comment
+ addComment(*Comment)
+}
+
+// DDL
+// https://cloud.google.com/spanner/docs/data-definition-language#ddl_syntax
+
+// DDL represents a Data Definition Language (DDL) file.
+type DDL struct {
+ List []DDLStmt
+
+ Filename string // if known at parse time
+
+ Comments []*Comment // all comments, sorted by position
+}
+
+func (d *DDL) clearOffset() {
+ for _, stmt := range d.List {
+ stmt.clearOffset()
+ }
+ for _, c := range d.Comments {
+ c.clearOffset()
+ }
+}
+
+func (d *DDL) setFilename(filename string) {
+ d.Filename = filename
+}
+
+func (d *DDL) addComment(comment *Comment) {
+ d.Comments = append(d.Comments, comment)
+}
+
+func (d *DDL) getComments() []*Comment {
+ return d.Comments
+}
+
+// DML
+// https://cloud.google.com/spanner/docs/reference/standard-sql/dml-syntax
+
+// DML represents a Data Manipulation Language (DML) file.
+type DML struct {
+ List []DMLStmt
+
+ Filename string // if known at parse time
+
+ Comments []*Comment // all comments, sorted by position
+}
+
+func (d *DML) clearOffset() {
+ for _, c := range d.Comments {
+ c.clearOffset()
+ }
+}
+
+func (d *DML) setFilename(filename string) {
+ d.Filename = filename
+}
+
+func (d *DML) addComment(comment *Comment) {
+ d.Comments = append(d.Comments, comment)
+}
+
+func (d *DML) getComments() []*Comment {
+ return d.Comments
+}
+
+// DDLStmt is satisfied by a type that can appear in a DDL.
+type DDLStmt interface {
+ isDDLStmt()
+ clearOffset()
+ SQL() string
+ Node
+}
+
+// DMLStmt is satisfied by a type that is a DML statement.
+type DMLStmt interface {
+ isDMLStmt()
+ SQL() string
+}
+
+// Comment represents a comment.
+type Comment struct {
+ Marker string // Opening marker; one of "#", "--", "/*".
+ Isolated bool // Whether this comment is on its own line.
+ // Start and End are the position of the opening and terminating marker.
+ Start, End Position
+ Text []string
+}
+
+func (c *Comment) String() string { return fmt.Sprintf("%#v", c) }
+func (c *Comment) Pos() Position { return c.Start }
+func (c *Comment) clearOffset() { c.Start.Offset, c.End.Offset = 0, 0 }
+
+// Node is implemented by concrete types in this package that represent things
+// appearing in a DDL file.
+type Node interface {
+ Pos() Position
+ // clearOffset() is not included here because some types like ColumnDef
+ // have the method on their pointer type rather than their natural value type.
+ // This method is only invoked from within this package, so it isn't
+ // important to enforce such things.
+}
+
+// Position describes a source position in an input DDL file.
+// It is only valid if the line number is positive.
+type Position struct {
+ Line int // 1-based line number
+ Offset int // 0-based byte offset
+}
+
+func (pos Position) IsValid() bool { return pos.Line > 0 }
+func (pos Position) String() string {
+ if pos.Line == 0 {
+ return ":<invalid>"
+ }
+ return fmt.Sprintf(":%d", pos.Line)
+}
+
+// LeadingComment returns the comment that immediately precedes a node,
+// or nil if there's no such comment.
+func (d *DDL) LeadingComment(n Node) *Comment {
+ return getLeadingComment(d, n)
+}
+
+// InlineComment returns the comment on the same line as a node,
+// or nil if there's no inline comment.
+// The returned comment is guaranteed to be a single line.
+func (d *DDL) InlineComment(n Node) *Comment {
+ return getInlineComment(d, n)
+}
+
+// LeadingComment returns the comment that immediately precedes a node,
+// or nil if there's no such comment.
+func (d *DML) LeadingComment(n Node) *Comment {
+ return getLeadingComment(d, n)
+}
+
+// InlineComment returns the comment on the same line as a node,
+// or nil if there's no inline comment.
+// The returned comment is guaranteed to be a single line.
+func (d *DML) InlineComment(n Node) *Comment {
+ return getInlineComment(d, n)
+}
+
+func getLeadingComment(stmts statements, n Node) *Comment {
+ // Get the comment whose End position is on the previous line.
+ lineEnd := n.Pos().Line - 1
+ comments := stmts.getComments()
+ ci := sort.Search(len(comments), func(i int) bool {
+ return comments[i].End.Line >= lineEnd
+ })
+ if ci >= len(comments) || comments[ci].End.Line != lineEnd {
+ return nil
+ }
+ if !comments[ci].Isolated {
+ // This is an inline comment for a previous node.
+ return nil
+ }
+ return comments[ci]
+}
+
+func getInlineComment(stmts statements, n Node) *Comment {
+ // TODO: Do we care about comments like this?
+ // string name = 1; /* foo
+ // bar */
+
+ pos := n.Pos()
+ comments := stmts.getComments()
+ ci := sort.Search(len(comments), func(i int) bool {
+ return comments[i].Start.Line >= pos.Line
+ })
+ if ci >= len(comments) {
+ return nil
+ }
+ c := comments[ci]
+ if c.Start.Line != pos.Line {
+ return nil
+ }
+ if c.Start.Line != c.End.Line || len(c.Text) != 1 {
+ // Multi-line comment; don't return it.
+ return nil
+ }
+ return c
+}
+
+// CreateChangeStream represents a CREATE CHANGE STREAM statement.
+// https://cloud.google.com/spanner/docs/change-streams/manage
+type CreateChangeStream struct {
+ Name ID
+ Watch []WatchDef
+ WatchAllTables bool
+ Options ChangeStreamOptions
+
+ Position Position
+}
+
+func (cs *CreateChangeStream) String() string { return fmt.Sprintf("%#v", cs) }
+func (*CreateChangeStream) isDDLStmt() {}
+func (cs *CreateChangeStream) Pos() Position { return cs.Position }
+func (cs *CreateChangeStream) clearOffset() {
+ for i := range cs.Watch {
+ // Mutate in place.
+ cs.Watch[i].clearOffset()
+ }
+ cs.Position.Offset = 0
+}
+
+// AlterChangeStream represents a ALTER CHANGE STREAM statement.
+type AlterChangeStream struct {
+ Name ID
+ Alteration ChangeStreamAlteration
+
+ Position Position
+}
+
+func (acs *AlterChangeStream) String() string { return fmt.Sprintf("%#v", acs) }
+func (*AlterChangeStream) isDDLStmt() {}
+func (acs *AlterChangeStream) Pos() Position { return acs.Position }
+func (acs *AlterChangeStream) clearOffset() {
+ acs.Position.Offset = 0
+}
+
+type ChangeStreamAlteration interface {
+ isChangeStreamAlteration()
+ SQL() string
+}
+
+func (AlterWatch) isChangeStreamAlteration() {}
+func (DropChangeStreamWatch) isChangeStreamAlteration() {}
+func (AlterChangeStreamOptions) isChangeStreamAlteration() {}
+
+type (
+ AlterWatch struct {
+ WatchAllTables bool
+ Watch []WatchDef
+ }
+ DropChangeStreamWatch struct{}
+ AlterChangeStreamOptions struct{ Options ChangeStreamOptions }
+)
+
+// DropChangeStream represents a DROP CHANGE STREAM statement.
+type DropChangeStream struct {
+ Name ID
+
+ Position Position
+}
+
+func (dc *DropChangeStream) String() string { return fmt.Sprintf("%#v", dc) }
+func (*DropChangeStream) isDDLStmt() {}
+func (dc *DropChangeStream) Pos() Position { return dc.Position }
+func (dc *DropChangeStream) clearOffset() { dc.Position.Offset = 0 }
+
+type WatchDef struct {
+ Table ID
+ Columns []ID
+ WatchAllCols bool
+
+ Position Position
+}
+
+func (wd WatchDef) Pos() Position { return wd.Position }
+func (wd *WatchDef) clearOffset() { wd.Position.Offset = 0 }
+
+type ChangeStreamOptions struct {
+ RetentionPeriod *string
+ ValueCaptureType *string
+}
+
+// AlterStatistics represents an ALTER STATISTICS statement.
+// https://cloud.google.com/spanner/docs/data-definition-language#alter-statistics
+type AlterStatistics struct {
+ Name ID
+ Alteration StatisticsAlteration
+
+ Position Position // position of the "ALTER" token
+}
+
+func (as *AlterStatistics) String() string { return fmt.Sprintf("%#v", as) }
+func (*AlterStatistics) isDDLStmt() {}
+func (as *AlterStatistics) Pos() Position { return as.Position }
+func (as *AlterStatistics) clearOffset() { as.Position.Offset = 0 }
+
+type StatisticsAlteration interface {
+ isStatisticsAlteration()
+ SQL() string
+}
+
+type SetStatisticsOptions struct{ Options StatisticsOptions }
+
+func (SetStatisticsOptions) isStatisticsAlteration() {}
+
+// StatisticsOptions represents options on a statistics as part of a ALTER STATISTICS statement.
+type StatisticsOptions struct {
+ AllowGC *bool
+}
+
+type AlterIndex struct {
+ Name ID
+ Alteration IndexAlteration
+
+ Position Position // position of the "ALTER" token
+}
+
+func (as *AlterIndex) String() string { return fmt.Sprintf("%#v", as) }
+func (*AlterIndex) isDDLStmt() {}
+func (as *AlterIndex) Pos() Position { return as.Position }
+func (as *AlterIndex) clearOffset() { as.Position.Offset = 0 }
+
+type IndexAlteration interface {
+ isIndexAlteration()
+ SQL() string
+}
+
+func (AddStoredColumn) isIndexAlteration() {}
+func (DropStoredColumn) isIndexAlteration() {}
+
+type (
+ AddStoredColumn struct{ Name ID }
+ DropStoredColumn struct{ Name ID }
+)
+
+// CreateSequence represents an ALTER SEQUENCE statement.
+// https://cloud.google.com/spanner/docs/reference/standard-sql/data-definition-language#create-sequence
+type CreateSequence struct {
+ Name ID
+ IfNotExists bool
+ Options SequenceOptions
+
+ Position Position
+}
+
+func (cs *CreateSequence) String() string { return fmt.Sprintf("%#v", cs) }
+func (*CreateSequence) isDDLStmt() {}
+func (cs *CreateSequence) Pos() Position { return cs.Position }
+func (cs *CreateSequence) clearOffset() { cs.Position.Offset = 0 }
+
+// AlterSequence represents an ALTER SEQUENCE statement.
+// https://cloud.google.com/spanner/docs/reference/standard-sql/data-definition-language#alter-sequence
+type AlterSequence struct {
+ Name ID
+ Alteration SequenceAlteration
+
+ Position Position
+}
+
+func (as *AlterSequence) String() string { return fmt.Sprintf("%#v", as) }
+func (*AlterSequence) isDDLStmt() {}
+func (as *AlterSequence) Pos() Position { return as.Position }
+func (as *AlterSequence) clearOffset() { as.Position.Offset = 0 }
+
+type SequenceAlteration interface {
+ isSequenceAlteration()
+ SQL() string
+}
+
+type SetSequenceOptions struct{ Options SequenceOptions }
+
+func (SetSequenceOptions) isSequenceAlteration() {}
+
+// SequenceOptions represents options on a sequence as part of a CREATE SEQUENCE and ALTER SEQUENCE statement.
+type SequenceOptions struct {
+ SequenceKind *string
+ SkipRangeMin *int
+ SkipRangeMax *int
+ StartWithCounter *int
+}
+
+// DropSequence represents a DROP SEQUENCE statement.
+// https://cloud.google.com/spanner/docs/reference/standard-sql/data-definition-language#drop-sequence
+type DropSequence struct {
+ Name ID
+ IfExists bool
+
+ Position Position
+}
+
+func (ds *DropSequence) String() string { return fmt.Sprintf("%#v", ds) }
+func (*DropSequence) isDDLStmt() {}
+func (ds *DropSequence) Pos() Position { return ds.Position }
+func (ds *DropSequence) clearOffset() { ds.Position.Offset = 0 }