aboutsummaryrefslogtreecommitdiffstats
path: root/vendor/cloud.google.com/go/spanner
diff options
context:
space:
mode:
authorTaras Madan <tarasmadan@google.com>2025-01-22 16:07:17 +0100
committerTaras Madan <tarasmadan@google.com>2025-01-23 10:42:36 +0000
commit7b4377ad9d8a7205416df8d6217ef2b010f89481 (patch)
treee6fec4fd12ff807a16d847923f501075bf71d16c /vendor/cloud.google.com/go/spanner
parent475a4c203afb8b7d3af51c4fd32bb170ff32a45e (diff)
vendor: delete
Diffstat (limited to 'vendor/cloud.google.com/go/spanner')
-rw-r--r--vendor/cloud.google.com/go/spanner/CHANGES.md1062
-rw-r--r--vendor/cloud.google.com/go/spanner/LICENSE202
-rw-r--r--vendor/cloud.google.com/go/spanner/README.md116
-rw-r--r--vendor/cloud.google.com/go/spanner/admin/database/apiv1/auxiliary.go636
-rw-r--r--vendor/cloud.google.com/go/spanner/admin/database/apiv1/backup.go61
-rw-r--r--vendor/cloud.google.com/go/spanner/admin/database/apiv1/database.go120
-rw-r--r--vendor/cloud.google.com/go/spanner/admin/database/apiv1/database_admin_client.go4194
-rw-r--r--vendor/cloud.google.com/go/spanner/admin/database/apiv1/databasepb/backup.pb.go2446
-rw-r--r--vendor/cloud.google.com/go/spanner/admin/database/apiv1/databasepb/backup_schedule.pb.go1080
-rw-r--r--vendor/cloud.google.com/go/spanner/admin/database/apiv1/databasepb/common.pb.go567
-rw-r--r--vendor/cloud.google.com/go/spanner/admin/database/apiv1/databasepb/spanner_database_admin.pb.go4807
-rw-r--r--vendor/cloud.google.com/go/spanner/admin/database/apiv1/doc.go128
-rw-r--r--vendor/cloud.google.com/go/spanner/admin/database/apiv1/init.go39
-rw-r--r--vendor/cloud.google.com/go/spanner/admin/database/apiv1/path_funcs.go49
-rw-r--r--vendor/cloud.google.com/go/spanner/admin/database/apiv1/version.go23
-rw-r--r--vendor/cloud.google.com/go/spanner/admin/instance/apiv1/auxiliary.go664
-rw-r--r--vendor/cloud.google.com/go/spanner/admin/instance/apiv1/doc.go125
-rw-r--r--vendor/cloud.google.com/go/spanner/admin/instance/apiv1/init.go39
-rw-r--r--vendor/cloud.google.com/go/spanner/admin/instance/apiv1/instance_admin_client.go3660
-rw-r--r--vendor/cloud.google.com/go/spanner/admin/instance/apiv1/instancepb/common.pb.go279
-rw-r--r--vendor/cloud.google.com/go/spanner/admin/instance/apiv1/instancepb/spanner_instance_admin.pb.go6928
-rw-r--r--vendor/cloud.google.com/go/spanner/admin/instance/apiv1/path_funcs.go61
-rw-r--r--vendor/cloud.google.com/go/spanner/admin/instance/apiv1/version.go23
-rw-r--r--vendor/cloud.google.com/go/spanner/apiv1/auxiliary.go69
-rw-r--r--vendor/cloud.google.com/go/spanner/apiv1/doc.go123
-rw-r--r--vendor/cloud.google.com/go/spanner/apiv1/info.go24
-rw-r--r--vendor/cloud.google.com/go/spanner/apiv1/path_funcs.go53
-rw-r--r--vendor/cloud.google.com/go/spanner/apiv1/spanner_client.go2445
-rw-r--r--vendor/cloud.google.com/go/spanner/apiv1/spanner_client_options.go25
-rw-r--r--vendor/cloud.google.com/go/spanner/apiv1/spannerpb/commit_response.pb.go268
-rw-r--r--vendor/cloud.google.com/go/spanner/apiv1/spannerpb/keys.pb.go476
-rw-r--r--vendor/cloud.google.com/go/spanner/apiv1/spannerpb/mutation.pb.go497
-rw-r--r--vendor/cloud.google.com/go/spanner/apiv1/spannerpb/query_plan.pb.go595
-rw-r--r--vendor/cloud.google.com/go/spanner/apiv1/spannerpb/result_set.pb.go698
-rw-r--r--vendor/cloud.google.com/go/spanner/apiv1/spannerpb/spanner.pb.go5303
-rw-r--r--vendor/cloud.google.com/go/spanner/apiv1/spannerpb/transaction.pb.go1278
-rw-r--r--vendor/cloud.google.com/go/spanner/apiv1/spannerpb/type.pb.go630
-rw-r--r--vendor/cloud.google.com/go/spanner/apiv1/version.go23
-rw-r--r--vendor/cloud.google.com/go/spanner/batch.go481
-rw-r--r--vendor/cloud.google.com/go/spanner/client.go1188
-rw-r--r--vendor/cloud.google.com/go/spanner/doc.go357
-rw-r--r--vendor/cloud.google.com/go/spanner/emulator_test.sh51
-rw-r--r--vendor/cloud.google.com/go/spanner/errors.go228
-rw-r--r--vendor/cloud.google.com/go/spanner/errors112.go34
-rw-r--r--vendor/cloud.google.com/go/spanner/errors113.go34
-rw-r--r--vendor/cloud.google.com/go/spanner/internal/version.go18
-rw-r--r--vendor/cloud.google.com/go/spanner/key.go435
-rw-r--r--vendor/cloud.google.com/go/spanner/mutation.go454
-rw-r--r--vendor/cloud.google.com/go/spanner/ot_metrics.go263
-rw-r--r--vendor/cloud.google.com/go/spanner/pdml.go147
-rw-r--r--vendor/cloud.google.com/go/spanner/protoutils.go165
-rw-r--r--vendor/cloud.google.com/go/spanner/read.go826
-rw-r--r--vendor/cloud.google.com/go/spanner/retry.go156
-rw-r--r--vendor/cloud.google.com/go/spanner/row.go575
-rw-r--r--vendor/cloud.google.com/go/spanner/session.go1971
-rw-r--r--vendor/cloud.google.com/go/spanner/sessionclient.go433
-rw-r--r--vendor/cloud.google.com/go/spanner/spansql/fuzz.go29
-rw-r--r--vendor/cloud.google.com/go/spanner/spansql/keywords.go322
-rw-r--r--vendor/cloud.google.com/go/spanner/spansql/parser.go4696
-rw-r--r--vendor/cloud.google.com/go/spanner/spansql/sql.go1183
-rw-r--r--vendor/cloud.google.com/go/spanner/spansql/types.go1394
-rw-r--r--vendor/cloud.google.com/go/spanner/statement.go83
-rw-r--r--vendor/cloud.google.com/go/spanner/stats.go377
-rw-r--r--vendor/cloud.google.com/go/spanner/timestampbound.go242
-rw-r--r--vendor/cloud.google.com/go/spanner/transaction.go1956
-rw-r--r--vendor/cloud.google.com/go/spanner/value.go4934
66 files changed, 0 insertions, 62848 deletions
diff --git a/vendor/cloud.google.com/go/spanner/CHANGES.md b/vendor/cloud.google.com/go/spanner/CHANGES.md
deleted file mode 100644
index 05453b9d0..000000000
--- a/vendor/cloud.google.com/go/spanner/CHANGES.md
+++ /dev/null
@@ -1,1062 +0,0 @@
-# Changes
-
-## [1.67.0](https://github.com/googleapis/google-cloud-go/compare/spanner/v1.66.0...spanner/v1.67.0) (2024-08-15)
-
-
-### Features
-
-* **spanner/admin/database:** Add resource reference annotation to backup schedules ([#10677](https://github.com/googleapis/google-cloud-go/issues/10677)) ([6593c0d](https://github.com/googleapis/google-cloud-go/commit/6593c0d62d48751c857bce3d3f858127467a4489))
-* **spanner/admin/instance:** Add edition field to the instance proto ([6593c0d](https://github.com/googleapis/google-cloud-go/commit/6593c0d62d48751c857bce3d3f858127467a4489))
-* **spanner:** Support commit options in mutation operations. ([#10668](https://github.com/googleapis/google-cloud-go/issues/10668)) ([62a56f9](https://github.com/googleapis/google-cloud-go/commit/62a56f953d3b8fe82083c42926831c2728312b9c))
-
-
-### Bug Fixes
-
-* **spanner/test/opentelemetry/test:** Update google.golang.org/api to v0.191.0 ([5b32644](https://github.com/googleapis/google-cloud-go/commit/5b32644eb82eb6bd6021f80b4fad471c60fb9d73))
-* **spanner:** Update google.golang.org/api to v0.191.0 ([5b32644](https://github.com/googleapis/google-cloud-go/commit/5b32644eb82eb6bd6021f80b4fad471c60fb9d73))
-
-
-### Documentation
-
-* **spanner/admin/database:** Add an example to filter backups based on schedule name ([6593c0d](https://github.com/googleapis/google-cloud-go/commit/6593c0d62d48751c857bce3d3f858127467a4489))
-
-## [1.66.0](https://github.com/googleapis/google-cloud-go/compare/spanner/v1.65.0...spanner/v1.66.0) (2024-08-07)
-
-
-### Features
-
-* **spanner/admin/database:** Add support for Cloud Spanner Incremental Backups ([d949cc0](https://github.com/googleapis/google-cloud-go/commit/d949cc0e5d44af62154d9d5fd393f25a852f93ed))
-* **spanner:** Add support of multiplexed session support in writeAtleastOnce mutations ([#10646](https://github.com/googleapis/google-cloud-go/issues/10646)) ([54009ea](https://github.com/googleapis/google-cloud-go/commit/54009eab1c3b11a28531ad9e621917d01c9e5339))
-* **spanner:** Add support of using multiplexed session with ReadOnlyTransactions ([#10269](https://github.com/googleapis/google-cloud-go/issues/10269)) ([7797022](https://github.com/googleapis/google-cloud-go/commit/7797022e51d1ac07b8d919c421a8bfdf34a1d53c))
-
-## [1.65.0](https://github.com/googleapis/google-cloud-go/compare/spanner/v1.64.0...spanner/v1.65.0) (2024-07-29)
-
-
-### Features
-
-* **spanner/admin/database:** Add support for Cloud Spanner Scheduled Backups ([3b15f9d](https://github.com/googleapis/google-cloud-go/commit/3b15f9db9e0ee3bff3d8d5aafc82cdc2a31d60fc))
-* **spanner:** Add RESOURCE_EXHAUSTED to retryable transaction codes ([#10412](https://github.com/googleapis/google-cloud-go/issues/10412)) ([29b52dc](https://github.com/googleapis/google-cloud-go/commit/29b52dc40f3d1a6ffe7fa40e6142d8035c0d95ee))
-
-
-### Bug Fixes
-
-* **spanner/test:** Bump google.golang.org/api@v0.187.0 ([8fa9e39](https://github.com/googleapis/google-cloud-go/commit/8fa9e398e512fd8533fd49060371e61b5725a85b))
-* **spanner/test:** Bump google.golang.org/grpc@v1.64.1 ([8ecc4e9](https://github.com/googleapis/google-cloud-go/commit/8ecc4e9622e5bbe9b90384d5848ab816027226c5))
-* **spanner/test:** Update dependencies ([257c40b](https://github.com/googleapis/google-cloud-go/commit/257c40bd6d7e59730017cf32bda8823d7a232758))
-* **spanner:** Bump google.golang.org/api@v0.187.0 ([8fa9e39](https://github.com/googleapis/google-cloud-go/commit/8fa9e398e512fd8533fd49060371e61b5725a85b))
-* **spanner:** Bump google.golang.org/grpc@v1.64.1 ([8ecc4e9](https://github.com/googleapis/google-cloud-go/commit/8ecc4e9622e5bbe9b90384d5848ab816027226c5))
-* **spanner:** Fix negative values for max_in_use_sessions metrics [#10449](https://github.com/googleapis/google-cloud-go/issues/10449) ([#10508](https://github.com/googleapis/google-cloud-go/issues/10508)) ([4e180f4](https://github.com/googleapis/google-cloud-go/commit/4e180f4539012eb6e3d1d2788e68b291ef7230c3))
-* **spanner:** HealthCheck should not decrement num_in_use sessions ([#10480](https://github.com/googleapis/google-cloud-go/issues/10480)) ([9b2b47f](https://github.com/googleapis/google-cloud-go/commit/9b2b47f107153d624d56709d9a8e6a6b72c39447))
-* **spanner:** Update dependencies ([257c40b](https://github.com/googleapis/google-cloud-go/commit/257c40bd6d7e59730017cf32bda8823d7a232758))
-
-## [1.64.0](https://github.com/googleapis/google-cloud-go/compare/spanner/v1.63.0...spanner/v1.64.0) (2024-06-29)
-
-
-### Features
-
-* **spanner:** Add field lock_hint in spanner.proto ([3df3c04](https://github.com/googleapis/google-cloud-go/commit/3df3c04f0dffad3fa2fe272eb7b2c263801b9ada))
-* **spanner:** Add field order_by in spanner.proto ([3df3c04](https://github.com/googleapis/google-cloud-go/commit/3df3c04f0dffad3fa2fe272eb7b2c263801b9ada))
-* **spanner:** Add LockHint feature ([#10382](https://github.com/googleapis/google-cloud-go/issues/10382)) ([64bdcb1](https://github.com/googleapis/google-cloud-go/commit/64bdcb1a6a462d41a62d3badea6814425e271f22))
-* **spanner:** Add OrderBy feature ([#10289](https://github.com/googleapis/google-cloud-go/issues/10289)) ([07b8bd2](https://github.com/googleapis/google-cloud-go/commit/07b8bd2f5dc738e0293305dfc459c13632d5ea65))
-* **spanner:** Add support of checking row not found errors from ReadRow and ReadRowUsingIndex ([#10405](https://github.com/googleapis/google-cloud-go/issues/10405)) ([5cb0c26](https://github.com/googleapis/google-cloud-go/commit/5cb0c26013eeb3bbe51174bee628a20c2ec775e0))
-
-
-### Bug Fixes
-
-* **spanner:** Fix data-race caused by TrackSessionHandle ([#10321](https://github.com/googleapis/google-cloud-go/issues/10321)) ([23c5fff](https://github.com/googleapis/google-cloud-go/commit/23c5fffd06bcde408db50a981c015921cd4ecf0e)), refs [#10320](https://github.com/googleapis/google-cloud-go/issues/10320)
-* **spanner:** Fix negative values for max_in_use_sessions metrics ([#10449](https://github.com/googleapis/google-cloud-go/issues/10449)) ([a1e198a](https://github.com/googleapis/google-cloud-go/commit/a1e198a9b18bd2f92c3438e4f609412047f8ccf4))
-* **spanner:** Prevent possible panic for Session not found errors ([#10386](https://github.com/googleapis/google-cloud-go/issues/10386)) ([ba9711f](https://github.com/googleapis/google-cloud-go/commit/ba9711f87ec871153ae00cfd0827bce17c31ee9c)), refs [#10385](https://github.com/googleapis/google-cloud-go/issues/10385)
-
-## [1.63.0](https://github.com/googleapis/google-cloud-go/compare/spanner/v1.62.0...spanner/v1.63.0) (2024-05-24)
-
-
-### Features
-
-* **spanner:** Fix schema naming ([#10194](https://github.com/googleapis/google-cloud-go/issues/10194)) ([215e0c8](https://github.com/googleapis/google-cloud-go/commit/215e0c8125ea05246c834984bde1ca698c7dde4c))
-* **spanner:** Update go mod to use latest grpc lib ([#10218](https://github.com/googleapis/google-cloud-go/issues/10218)) ([adf91f9](https://github.com/googleapis/google-cloud-go/commit/adf91f9fd37faa39ec7c6f9200273220f65d2a82))
-
-## [1.62.0](https://github.com/googleapis/google-cloud-go/compare/spanner/v1.61.0...spanner/v1.62.0) (2024-05-15)
-
-
-### Features
-
-* **spanner/admin/database:** Add support for multi region encryption config ([3e25053](https://github.com/googleapis/google-cloud-go/commit/3e250530567ee81ed4f51a3856c5940dbec35289))
-* **spanner/executor:** Add QueryCancellationAction message in executor protos ([292e812](https://github.com/googleapis/google-cloud-go/commit/292e81231b957ae7ac243b47b8926564cee35920))
-* **spanner:** Add `RESOURCE_EXHAUSTED` to the list of retryable error codes ([1d757c6](https://github.com/googleapis/google-cloud-go/commit/1d757c66478963d6cbbef13fee939632c742759c))
-* **spanner:** Add support for Proto Columns ([#9315](https://github.com/googleapis/google-cloud-go/issues/9315)) ([3ffbbbe](https://github.com/googleapis/google-cloud-go/commit/3ffbbbe50225684f4211c6dbe3ca25acb3d02b8e))
-
-
-### Bug Fixes
-
-* **spanner:** Add ARRAY keywords to keywords ([#10079](https://github.com/googleapis/google-cloud-go/issues/10079)) ([8e675cd](https://github.com/googleapis/google-cloud-go/commit/8e675cd0ccf12c6912209aa5c56092db3716c40d))
-* **spanner:** Handle unused errors ([#10067](https://github.com/googleapis/google-cloud-go/issues/10067)) ([a0c097c](https://github.com/googleapis/google-cloud-go/commit/a0c097c724b609cfa428e69f89075f02a3782a7b))
-* **spanner:** Remove json-iterator dependency ([#10099](https://github.com/googleapis/google-cloud-go/issues/10099)) ([3917cca](https://github.com/googleapis/google-cloud-go/commit/3917ccac57c403b3b4d07514ac10a66a86e298c0)), refs [#9380](https://github.com/googleapis/google-cloud-go/issues/9380)
-* **spanner:** Update staleness bound ([#10118](https://github.com/googleapis/google-cloud-go/issues/10118)) ([c07f1e4](https://github.com/googleapis/google-cloud-go/commit/c07f1e47c06387b696abb1edbfa339b391ec1fd5))
-
-## [1.61.0](https://github.com/googleapis/google-cloud-go/compare/spanner/v1.60.0...spanner/v1.61.0) (2024-04-30)
-
-
-### Features
-
-* **spanner/admin/instance:** Adding `EXPECTED_FULFILLMENT_PERIOD` to the indicate instance creation times (with `FULFILLMENT_PERIOD_NORMAL` or `FULFILLMENT_PERIOD_EXTENDED` ENUM) with the extended instance creation time triggered by On-Demand Capacity... ([#9693](https://github.com/googleapis/google-cloud-go/issues/9693)) ([aa93790](https://github.com/googleapis/google-cloud-go/commit/aa93790132ba830b4c97d217ef02764e2fb1b8ea))
-* **spanner/executor:** Add SessionPoolOptions, SpannerOptions protos in executor protos ([2cdc40a](https://github.com/googleapis/google-cloud-go/commit/2cdc40a0b4288f5ab5f2b2b8f5c1d6453a9c81ec))
-* **spanner:** Add support for change streams transaction exclusion option ([#9779](https://github.com/googleapis/google-cloud-go/issues/9779)) ([979ce94](https://github.com/googleapis/google-cloud-go/commit/979ce94758442b1224a78a4f3b1f5d592ab51660))
-* **spanner:** Support MultiEndpoint ([#9565](https://github.com/googleapis/google-cloud-go/issues/9565)) ([0ac0d26](https://github.com/googleapis/google-cloud-go/commit/0ac0d265abedf946b05294ef874a892b2c5d6067))
-
-
-### Bug Fixes
-
-* **spanner/test/opentelemetry/test:** Bump x/net to v0.24.0 ([ba31ed5](https://github.com/googleapis/google-cloud-go/commit/ba31ed5fda2c9664f2e1cf972469295e63deb5b4))
-* **spanner:** Bump x/net to v0.24.0 ([ba31ed5](https://github.com/googleapis/google-cloud-go/commit/ba31ed5fda2c9664f2e1cf972469295e63deb5b4))
-* **spanner:** Fix uint8 conversion ([9221c7f](https://github.com/googleapis/google-cloud-go/commit/9221c7fa12cef9d5fb7ddc92f41f1d6204971c7b))
-
-## [1.60.0](https://github.com/googleapis/google-cloud-go/compare/spanner/v1.59.0...spanner/v1.60.0) (2024-03-19)
-
-
-### Features
-
-* **spanner:** Allow attempt direct path xds via env var ([e4b663c](https://github.com/googleapis/google-cloud-go/commit/e4b663cdcb6e010c5a8ac791e5624407aaa191b3))
-
-## [1.59.0](https://github.com/googleapis/google-cloud-go/compare/spanner/v1.58.0...spanner/v1.59.0) (2024-03-13)
-
-
-### Features
-
-* **spanner/spansql:** Support Table rename & Table synonym ([#9275](https://github.com/googleapis/google-cloud-go/issues/9275)) ([9b97ce7](https://github.com/googleapis/google-cloud-go/commit/9b97ce75d36980fdaa06f15b0398b7b65e0d6082))
-* **spanner:** Add support of float32 type ([#9525](https://github.com/googleapis/google-cloud-go/issues/9525)) ([87d7ea9](https://github.com/googleapis/google-cloud-go/commit/87d7ea97787a56b18506b53e9b26d037f92759ca))
-
-
-### Bug Fixes
-
-* **spanner:** Add JSON_PARSE_ARRAY to funcNames slice ([#9557](https://github.com/googleapis/google-cloud-go/issues/9557)) ([f799597](https://github.com/googleapis/google-cloud-go/commit/f79959722352ead48bfb3efb3001fddd3a56db65))
-
-## [1.58.0](https://github.com/googleapis/google-cloud-go/compare/spanner/v1.57.0...spanner/v1.58.0) (2024-03-06)
-
-
-### Features
-
-* **spanner/admin/instance:** Add instance partition support to spanner instance proto ([ae1f547](https://github.com/googleapis/google-cloud-go/commit/ae1f5472bff1b476c3fd58e590ec135185446daf))
-* **spanner:** Add field for multiplexed session in spanner.proto ([a86aa8e](https://github.com/googleapis/google-cloud-go/commit/a86aa8e962b77d152ee6cdd433ad94967150ef21))
-* **spanner:** SelectAll struct spanner tag annotation match should be case-insensitive ([#9460](https://github.com/googleapis/google-cloud-go/issues/9460)) ([6cd6a73](https://github.com/googleapis/google-cloud-go/commit/6cd6a73be87a261729d3b6b45f3d28be93c3fdb3))
-* **spanner:** Update TransactionOptions to include new option exclude_txn_from_change_streams ([0195fe9](https://github.com/googleapis/google-cloud-go/commit/0195fe9292274ff9d86c71079a8e96ed2e5f9331))
-
-## [1.57.0](https://github.com/googleapis/google-cloud-go/compare/spanner/v1.56.0...spanner/v1.57.0) (2024-02-13)
-
-
-### Features
-
-* **spanner:** Add OpenTelemetry implementation ([#9254](https://github.com/googleapis/google-cloud-go/issues/9254)) ([fc51cc2](https://github.com/googleapis/google-cloud-go/commit/fc51cc2ac71e8fb0b3e381379dc343630ed441e7))
-* **spanner:** Support max_commit_delay in Spanner transactions ([#9299](https://github.com/googleapis/google-cloud-go/issues/9299)) ([a8078f0](https://github.com/googleapis/google-cloud-go/commit/a8078f0b841281bd439c548db9d303f6b5ce54e6))
-
-
-### Bug Fixes
-
-* **spanner:** Enable universe domain resolution options ([fd1d569](https://github.com/googleapis/google-cloud-go/commit/fd1d56930fa8a747be35a224611f4797b8aeb698))
-* **spanner:** Internal test package should import local version ([#9416](https://github.com/googleapis/google-cloud-go/issues/9416)) ([f377281](https://github.com/googleapis/google-cloud-go/commit/f377281a73553af9a9a2bee2181efe2e354e1c68))
-* **spanner:** SelectAll struct fields match should be case-insensitive ([#9417](https://github.com/googleapis/google-cloud-go/issues/9417)) ([7ff5356](https://github.com/googleapis/google-cloud-go/commit/7ff535672b868e6cba54abdf5dd92b9199e4d1d4))
-* **spanner:** Support time.Time and other custom types using SelectAll ([#9382](https://github.com/googleapis/google-cloud-go/issues/9382)) ([dc21234](https://github.com/googleapis/google-cloud-go/commit/dc21234268b08a4a21b2b3a1ed9ed74d65a289f0))
-
-
-### Documentation
-
-* **spanner:** Update the comment regarding eligible SQL shapes for PartitionQuery ([e60a6ba](https://github.com/googleapis/google-cloud-go/commit/e60a6ba01acf2ef2e8d12e23ed5c6e876edeb1b7))
-
-## [1.56.0](https://github.com/googleapis/google-cloud-go/compare/spanner/v1.55.0...spanner/v1.56.0) (2024-01-30)
-
-
-### Features
-
-* **spanner/admin/database:** Add proto descriptors for proto and enum types in create/update/get database ddl requests ([97d62c7](https://github.com/googleapis/google-cloud-go/commit/97d62c7a6a305c47670ea9c147edc444f4bf8620))
-* **spanner/spansql:** Add support for CREATE VIEW with SQL SECURITY DEFINER ([#8754](https://github.com/googleapis/google-cloud-go/issues/8754)) ([5f156e8](https://github.com/googleapis/google-cloud-go/commit/5f156e8c88f4729f569ee5b4ac9378dda3907997))
-* **spanner:** Add FLOAT32 enum to TypeCode ([97d62c7](https://github.com/googleapis/google-cloud-go/commit/97d62c7a6a305c47670ea9c147edc444f4bf8620))
-* **spanner:** Add max_commit_delay API ([af2f8b4](https://github.com/googleapis/google-cloud-go/commit/af2f8b4f3401c0b12dadb2c504aa0f902aee76de))
-* **spanner:** Add proto and enum types ([00b9900](https://github.com/googleapis/google-cloud-go/commit/00b990061592a20a181e61faa6964b45205b76a7))
-* **spanner:** Add SelectAll method to decode from Spanner iterator.Rows to golang struct ([#9206](https://github.com/googleapis/google-cloud-go/issues/9206)) ([802088f](https://github.com/googleapis/google-cloud-go/commit/802088f1322752bb9ce9bab1315c3fed6b3a99aa))
-
-## [1.55.0](https://github.com/googleapis/google-cloud-go/compare/spanner/v1.54.0...spanner/v1.55.0) (2024-01-08)
-
-
-### Features
-
-* **spanner:** Add directed reads feature ([#7668](https://github.com/googleapis/google-cloud-go/issues/7668)) ([a42604a](https://github.com/googleapis/google-cloud-go/commit/a42604a3a6ea90c38a2ff90d036a79fd070174fd))
-
-## [1.54.0](https://github.com/googleapis/google-cloud-go/compare/spanner/v1.53.1...spanner/v1.54.0) (2023-12-14)
-
-
-### Features
-
-* **spanner/executor:** Add autoscaling config in the instance to support autoscaling in systests ([29effe6](https://github.com/googleapis/google-cloud-go/commit/29effe600e16f24a127a1422ec04263c4f7a600a))
-* **spanner:** New clients ([#9127](https://github.com/googleapis/google-cloud-go/issues/9127)) ([2c97389](https://github.com/googleapis/google-cloud-go/commit/2c97389ddacdfc140a06f74498cc2753bb040a4d))
-
-
-### Bug Fixes
-
-* **spanner:** Use json.Number for decoding unknown values from spanner ([#9054](https://github.com/googleapis/google-cloud-go/issues/9054)) ([40d1392](https://github.com/googleapis/google-cloud-go/commit/40d139297bd484408c63c9d6ad1d7035d9673c1c))
-
-## [1.53.1](https://github.com/googleapis/google-cloud-go/compare/spanner/v1.53.0...spanner/v1.53.1) (2023-12-01)
-
-
-### Bug Fixes
-
-* **spanner:** Handle nil error when cleaning up long running session ([#9052](https://github.com/googleapis/google-cloud-go/issues/9052)) ([a93bc26](https://github.com/googleapis/google-cloud-go/commit/a93bc2696bf9ae60aae93af0e8c4911b58514d31))
-* **spanner:** MarshalJSON function caused errors for certain values ([#9063](https://github.com/googleapis/google-cloud-go/issues/9063)) ([afe7c98](https://github.com/googleapis/google-cloud-go/commit/afe7c98036c198995075530d4228f1f4ae3f1222))
-
-## [1.53.0](https://github.com/googleapis/google-cloud-go/compare/spanner/v1.52.0...spanner/v1.53.0) (2023-11-15)
-
-
-### Features
-
-* **spanner:** Enable long running transaction clean up ([#8969](https://github.com/googleapis/google-cloud-go/issues/8969)) ([5d181bb](https://github.com/googleapis/google-cloud-go/commit/5d181bb3a6fea55b8d9d596213516129006bdae2))
-
-## [1.52.0](https://github.com/googleapis/google-cloud-go/compare/spanner/v1.51.0...spanner/v1.52.0) (2023-11-14)
-
-
-### Features
-
-* **spanner:** Add directed_read_option in spanner.proto ([#8950](https://github.com/googleapis/google-cloud-go/issues/8950)) ([24e410e](https://github.com/googleapis/google-cloud-go/commit/24e410efbb6add2d33ecfb6ad98b67dc8894e578))
-* **spanner:** Add DML, DQL, Mutation, Txn Actions and Utility methods for executor framework ([#8976](https://github.com/googleapis/google-cloud-go/issues/8976)) ([ca76671](https://github.com/googleapis/google-cloud-go/commit/ca7667194007394bdcade8058fa84c1fe19c06b1))
-* **spanner:** Add lastUseTime property to session ([#8942](https://github.com/googleapis/google-cloud-go/issues/8942)) ([b560cfc](https://github.com/googleapis/google-cloud-go/commit/b560cfcf967ff6dec0cd6ac4b13045470945f30b))
-* **spanner:** Add method ([#8945](https://github.com/googleapis/google-cloud-go/issues/8945)) ([411a51e](https://github.com/googleapis/google-cloud-go/commit/411a51e320fe21ffe830cdaa6bb4e4d77f7a996b))
-* **spanner:** Add methods to return Row fields ([#8953](https://github.com/googleapis/google-cloud-go/issues/8953)) ([e22e70f](https://github.com/googleapis/google-cloud-go/commit/e22e70f44f83aab4f8b89af28fcd24216d2e740e))
-* **spanner:** Add PG.OID type cod annotation ([#8749](https://github.com/googleapis/google-cloud-go/issues/8749)) ([ffb0dda](https://github.com/googleapis/google-cloud-go/commit/ffb0ddabf3d9822ba8120cabaf25515fd32e9615))
-* **spanner:** Admin, Batch, Partition actions for executor framework ([#8932](https://github.com/googleapis/google-cloud-go/issues/8932)) ([b2db89e](https://github.com/googleapis/google-cloud-go/commit/b2db89e03a125cde31a7ea86eecc3fbb08ebd281))
-* **spanner:** Auto-generated executor framework proto changes ([#8713](https://github.com/googleapis/google-cloud-go/issues/8713)) ([2ca939c](https://github.com/googleapis/google-cloud-go/commit/2ca939cba4bc240f2bfca7d5683708fd3a94fd74))
-* **spanner:** BatchWrite ([#8652](https://github.com/googleapis/google-cloud-go/issues/8652)) ([507d232](https://github.com/googleapis/google-cloud-go/commit/507d232cdb09bd941ebfe800bdd4bfc020346f5d))
-* **spanner:** Executor framework server and worker proxy ([#8714](https://github.com/googleapis/google-cloud-go/issues/8714)) ([6b931ee](https://github.com/googleapis/google-cloud-go/commit/6b931eefb9aa4a18758788167bdcf9e2fad1d7b9))
-* **spanner:** Fix falkiness ([#8977](https://github.com/googleapis/google-cloud-go/issues/8977)) ([ca8d3cb](https://github.com/googleapis/google-cloud-go/commit/ca8d3cbf80f7fc2f47beb53b95138040c83097db))
-* **spanner:** Long running transaction clean up - disabled ([#8177](https://github.com/googleapis/google-cloud-go/issues/8177)) ([461d11e](https://github.com/googleapis/google-cloud-go/commit/461d11e913414e9de822e5f1acdf19c8f3f953d5))
-* **spanner:** Update code for session leaks cleanup ([#8978](https://github.com/googleapis/google-cloud-go/issues/8978)) ([cc83515](https://github.com/googleapis/google-cloud-go/commit/cc83515d0c837c8b1596a97b6f09d519a0f75f72))
-
-
-### Bug Fixes
-
-* **spanner:** Bump google.golang.org/api to v0.149.0 ([8d2ab9f](https://github.com/googleapis/google-cloud-go/commit/8d2ab9f320a86c1c0fab90513fc05861561d0880))
-* **spanner:** Expose Mutations field in MutationGroup ([#8923](https://github.com/googleapis/google-cloud-go/issues/8923)) ([42180cf](https://github.com/googleapis/google-cloud-go/commit/42180cf1134885188270f75126a65fa71b03c033))
-* **spanner:** Update grpc-go to v1.56.3 ([343cea8](https://github.com/googleapis/google-cloud-go/commit/343cea8c43b1e31ae21ad50ad31d3b0b60143f8c))
-* **spanner:** Update grpc-go to v1.59.0 ([81a97b0](https://github.com/googleapis/google-cloud-go/commit/81a97b06cb28b25432e4ece595c55a9857e960b7))
-
-
-### Documentation
-
-* **spanner:** Updated comment formatting ([24e410e](https://github.com/googleapis/google-cloud-go/commit/24e410efbb6add2d33ecfb6ad98b67dc8894e578))
-
-## [1.51.0](https://github.com/googleapis/google-cloud-go/compare/spanner/v1.50.0...spanner/v1.51.0) (2023-10-17)
-
-
-### Features
-
-* **spanner/admin/instance:** Add autoscaling config to the instance proto ([#8701](https://github.com/googleapis/google-cloud-go/issues/8701)) ([56ce871](https://github.com/googleapis/google-cloud-go/commit/56ce87195320634b07ae0b012efcc5f2b3813fb0))
-
-
-### Bug Fixes
-
-* **spanner:** Update golang.org/x/net to v0.17.0 ([174da47](https://github.com/googleapis/google-cloud-go/commit/174da47254fefb12921bbfc65b7829a453af6f5d))
-
-## [1.50.0](https://github.com/googleapis/google-cloud-go/compare/spanner/v1.49.0...spanner/v1.50.0) (2023-10-03)
-
-
-### Features
-
-* **spanner/spansql:** Add support for aggregate functions ([#8498](https://github.com/googleapis/google-cloud-go/issues/8498)) ([d440d75](https://github.com/googleapis/google-cloud-go/commit/d440d75f19286653afe4bc81a5f2efcfc4fa152c))
-* **spanner/spansql:** Add support for bit functions, sequence functions and GENERATE_UUID ([#8482](https://github.com/googleapis/google-cloud-go/issues/8482)) ([3789882](https://github.com/googleapis/google-cloud-go/commit/3789882c8b30a6d3100a56c1dcc8844952605637))
-* **spanner/spansql:** Add support for SEQUENCE statements ([#8481](https://github.com/googleapis/google-cloud-go/issues/8481)) ([ccd0205](https://github.com/googleapis/google-cloud-go/commit/ccd020598921f1b5550587c95b4ceddf580705bb))
-* **spanner:** Add BatchWrite API ([02a899c](https://github.com/googleapis/google-cloud-go/commit/02a899c95eb9660128506cf94525c5a75bedb308))
-* **spanner:** Allow non-default service accounts ([#8488](https://github.com/googleapis/google-cloud-go/issues/8488)) ([c90dd00](https://github.com/googleapis/google-cloud-go/commit/c90dd00350fa018dbc5f0af5aabce80e80be0b90))
-
-## [1.49.0](https://github.com/googleapis/google-cloud-go/compare/spanner/v1.48.0...spanner/v1.49.0) (2023-08-24)
-
-
-### Features
-
-* **spanner/spannertest:** Support INSERT DML ([#7820](https://github.com/googleapis/google-cloud-go/issues/7820)) ([3dda7b2](https://github.com/googleapis/google-cloud-go/commit/3dda7b27ec536637d8ebaa20937fc8019c930481))
-
-
-### Bug Fixes
-
-* **spanner:** Transaction was started in a different session ([#8467](https://github.com/googleapis/google-cloud-go/issues/8467)) ([6c21558](https://github.com/googleapis/google-cloud-go/commit/6c21558f75628908a70de79c62aff2851e756e7b))
-
-## [1.48.0](https://github.com/googleapis/google-cloud-go/compare/spanner/v1.47.0...spanner/v1.48.0) (2023-08-18)
-
-
-### Features
-
-* **spanner/spansql:** Add complete set of math functions ([#8246](https://github.com/googleapis/google-cloud-go/issues/8246)) ([d7a238e](https://github.com/googleapis/google-cloud-go/commit/d7a238eca2a9b08e968cea57edc3708694673e22))
-* **spanner/spansql:** Add support for foreign key actions ([#8296](https://github.com/googleapis/google-cloud-go/issues/8296)) ([d78b851](https://github.com/googleapis/google-cloud-go/commit/d78b8513b13a9a2c04b8097f0d89f85dcfd73797))
-* **spanner/spansql:** Add support for IF NOT EXISTS and IF EXISTS clause ([#8245](https://github.com/googleapis/google-cloud-go/issues/8245)) ([96840ab](https://github.com/googleapis/google-cloud-go/commit/96840ab1232bbdb788e37f81cf113ee0f1b4e8e7))
-* **spanner:** Add integration tests for Bit Reversed Sequences ([#7924](https://github.com/googleapis/google-cloud-go/issues/7924)) ([9b6e7c6](https://github.com/googleapis/google-cloud-go/commit/9b6e7c6061dc69683d7f558faed7f4249da5b7cb))
-
-
-### Bug Fixes
-
-* **spanner:** Reset buffer after abort on first SQL statement ([#8440](https://github.com/googleapis/google-cloud-go/issues/8440)) ([d980b42](https://github.com/googleapis/google-cloud-go/commit/d980b42f33968ef25061be50e18038d73b0503b6))
-* **spanner:** REST query UpdateMask bug ([df52820](https://github.com/googleapis/google-cloud-go/commit/df52820b0e7721954809a8aa8700b93c5662dc9b))
-
-## [1.47.0](https://github.com/googleapis/google-cloud-go/compare/spanner/v1.46.0...spanner/v1.47.0) (2023-06-20)
-
-
-### Features
-
-* **spanner/admin/database:** Add DdlStatementActionInfo and add actions to UpdateDatabaseDdlMetadata ([01eff11](https://github.com/googleapis/google-cloud-go/commit/01eff11eedb3edde69cc33db23e26be6a7e42f10))
-* **spanner:** Add databoost property for batch transactions ([#8152](https://github.com/googleapis/google-cloud-go/issues/8152)) ([fc49c78](https://github.com/googleapis/google-cloud-go/commit/fc49c78c9503c6dd4cbcba8c15e887415a744136))
-* **spanner:** Add tests for database roles in PG dialect ([#7898](https://github.com/googleapis/google-cloud-go/issues/7898)) ([dc84649](https://github.com/googleapis/google-cloud-go/commit/dc84649c546fe09b0bab09991086c156bd78cb3f))
-* **spanner:** Enable client to server compression ([#7899](https://github.com/googleapis/google-cloud-go/issues/7899)) ([3a047d2](https://github.com/googleapis/google-cloud-go/commit/3a047d2a449b0316a9000539ec9797e47cdd5c91))
-* **spanner:** Update all direct dependencies ([b340d03](https://github.com/googleapis/google-cloud-go/commit/b340d030f2b52a4ce48846ce63984b28583abde6))
-
-
-### Bug Fixes
-
-* **spanner:** Fix TestRetryInfoTransactionOutcomeUnknownError flaky behaviour ([#7959](https://github.com/googleapis/google-cloud-go/issues/7959)) ([f037795](https://github.com/googleapis/google-cloud-go/commit/f03779538f949fb4ad93d5247d3c6b3e5b21091a))
-
-## [1.46.0](https://github.com/googleapis/google-cloud-go/compare/spanner/v1.45.1...spanner/v1.46.0) (2023-05-12)
-
-
-### Features
-
-* **spanner/admin/database:** Add support for UpdateDatabase in Cloud Spanner ([#7917](https://github.com/googleapis/google-cloud-go/issues/7917)) ([83870f5](https://github.com/googleapis/google-cloud-go/commit/83870f55035d6692e22264b209e39e07fe2823b9))
-* **spanner:** Make leader aware routing default enabled for supported RPC requests. ([#7912](https://github.com/googleapis/google-cloud-go/issues/7912)) ([d0d3755](https://github.com/googleapis/google-cloud-go/commit/d0d37550911f37e09ea9204d0648fb64ff3204ff))
-
-
-### Bug Fixes
-
-* **spanner:** Update grpc to v1.55.0 ([1147ce0](https://github.com/googleapis/google-cloud-go/commit/1147ce02a990276ca4f8ab7a1ab65c14da4450ef))
-
-## [1.45.1](https://github.com/googleapis/google-cloud-go/compare/spanner/v1.45.0...spanner/v1.45.1) (2023-04-21)
-
-
-### Bug Fixes
-
-* **spanner/spannertest:** Send transaction id in result metadata ([#7809](https://github.com/googleapis/google-cloud-go/issues/7809)) ([e3bbd5f](https://github.com/googleapis/google-cloud-go/commit/e3bbd5f10b3922ab2eb50cb39daccd7bc1891892))
-* **spanner:** Context timeout should be wrapped correctly ([#7744](https://github.com/googleapis/google-cloud-go/issues/7744)) ([f8e22f6](https://github.com/googleapis/google-cloud-go/commit/f8e22f6cbba10fc262e87b4d06d5c1289d877503))
-
-## [1.45.0](https://github.com/googleapis/google-cloud-go/compare/spanner/v1.44.0...spanner/v1.45.0) (2023-04-10)
-
-
-### Features
-
-* **spanner/spansql:** Add support for missing DDL syntax for ALTER CHANGE STREAM ([#7429](https://github.com/googleapis/google-cloud-go/issues/7429)) ([d34fe02](https://github.com/googleapis/google-cloud-go/commit/d34fe02cfa31520f88dedbd41bbc887e8faa857f))
-* **spanner/spansql:** Support fine-grained access control DDL syntax ([#6691](https://github.com/googleapis/google-cloud-go/issues/6691)) ([a7edf6b](https://github.com/googleapis/google-cloud-go/commit/a7edf6b5c62d02b7d5199fc83d435f6a37a8eac5))
-* **spanner/spansql:** Support grant/revoke view, change stream, table function ([#7533](https://github.com/googleapis/google-cloud-go/issues/7533)) ([9c61215](https://github.com/googleapis/google-cloud-go/commit/9c612159647d540e694ec9e84cab5cdd1c94d2b8))
-* **spanner:** Add x-goog-spanner-route-to-leader header to Spanner RPC contexts for RW/PDML transactions. ([#7500](https://github.com/googleapis/google-cloud-go/issues/7500)) ([fcab05f](https://github.com/googleapis/google-cloud-go/commit/fcab05faa5026896af76b762eed5b7b6b2e7ee07))
-* **spanner:** Adding new fields for Serverless analytics ([69067f8](https://github.com/googleapis/google-cloud-go/commit/69067f8c0075099a84dd9d40e438711881710784))
-* **spanner:** Enable custom decoding for list value ([#7463](https://github.com/googleapis/google-cloud-go/issues/7463)) ([3aeadcd](https://github.com/googleapis/google-cloud-go/commit/3aeadcd97eaf2707c2f6e288c8b72ef29f49a185))
-* **spanner:** Update iam and longrunning deps ([91a1f78](https://github.com/googleapis/google-cloud-go/commit/91a1f784a109da70f63b96414bba8a9b4254cddd))
-
-
-### Bug Fixes
-
-* **spanner/spansql:** Fix SQL for CREATE CHANGE STREAM TableName; case ([#7514](https://github.com/googleapis/google-cloud-go/issues/7514)) ([fc5fd86](https://github.com/googleapis/google-cloud-go/commit/fc5fd8652771aeca73e7a28ee68134155a5a9499))
-* **spanner:** Correcting the proto field Id for field data_boost_enabled ([00fff3a](https://github.com/googleapis/google-cloud-go/commit/00fff3a58bed31274ab39af575876dab91d708c9))
-
-## [1.44.0](https://github.com/googleapis/google-cloud-go/compare/spanner/v1.43.0...spanner/v1.44.0) (2023-02-01)
-
-
-### Features
-
-* **spanner/spansql:** Add support for ALTER INDEX statement ([#7287](https://github.com/googleapis/google-cloud-go/issues/7287)) ([fbe1bd4](https://github.com/googleapis/google-cloud-go/commit/fbe1bd4d0806302a48ff4a5822867757893a5f2d))
-* **spanner/spansql:** Add support for managing the optimizer statistics package ([#7283](https://github.com/googleapis/google-cloud-go/issues/7283)) ([e528221](https://github.com/googleapis/google-cloud-go/commit/e52822139e2821a11873c2d6af85a5fea07700e8))
-* **spanner:** Add support for Optimistic Concurrency Control ([#7332](https://github.com/googleapis/google-cloud-go/issues/7332)) ([48ba16f](https://github.com/googleapis/google-cloud-go/commit/48ba16f3a09893a3527a22838ad1e9ff829da15b))
-
-## [1.43.0](https://github.com/googleapis/google-cloud-go/compare/spanner/v1.42.0...spanner/v1.43.0) (2023-01-19)
-
-
-### Features
-
-* **spanner/spansql:** Add support for change stream value_capture_type option ([#7201](https://github.com/googleapis/google-cloud-go/issues/7201)) ([27b3398](https://github.com/googleapis/google-cloud-go/commit/27b33988f078779c2d641f776a11b2095a5ccc51))
-* **spanner/spansql:** Support `default_leader` database option ([#7187](https://github.com/googleapis/google-cloud-go/issues/7187)) ([88adaa2](https://github.com/googleapis/google-cloud-go/commit/88adaa216832467560c19e61528b5ce5f1e5ff76))
-* **spanner:** Add REST client ([06a54a1](https://github.com/googleapis/google-cloud-go/commit/06a54a16a5866cce966547c51e203b9e09a25bc0))
-* **spanner:** Inline begin transaction for ReadWriteTransactions ([#7149](https://github.com/googleapis/google-cloud-go/issues/7149)) ([2ce3606](https://github.com/googleapis/google-cloud-go/commit/2ce360644439a386aeaad7df5f47541667bd621b))
-
-
-### Bug Fixes
-
-* **spanner:** Fix integration tests data race ([#7229](https://github.com/googleapis/google-cloud-go/issues/7229)) ([a741024](https://github.com/googleapis/google-cloud-go/commit/a741024abd6fb1f073831503c2717b2a44226a59))
-
-## [1.42.0](https://github.com/googleapis/google-cloud-go/compare/spanner/v1.41.0...spanner/v1.42.0) (2022-12-14)
-
-
-### Features
-
-* **spanner:** Add database roles ([#5701](https://github.com/googleapis/google-cloud-go/issues/5701)) ([6bb95ef](https://github.com/googleapis/google-cloud-go/commit/6bb95efb7997692a52c321e787e633a5045b21f8))
-* **spanner:** Rewrite signatures and type in terms of new location ([620e6d8](https://github.com/googleapis/google-cloud-go/commit/620e6d828ad8641663ae351bfccfe46281e817ad))
-
-
-### Bug Fixes
-
-* **spanner:** Fallback to check grpc error message if ResourceType is nil for checking sessionNotFound errors ([#7163](https://github.com/googleapis/google-cloud-go/issues/7163)) ([2552e09](https://github.com/googleapis/google-cloud-go/commit/2552e092cff01e0d6b80fefaa7877f77e36db6be))
-
-## [1.41.0](https://github.com/googleapis/google-cloud-go/compare/spanner/v1.40.0...spanner/v1.41.0) (2022-12-01)
-
-
-### Features
-
-* **spanner:** Start generating proto stubs ([#7030](https://github.com/googleapis/google-cloud-go/issues/7030)) ([41f446f](https://github.com/googleapis/google-cloud-go/commit/41f446f891a17c97278879f2207fd58996fd038c))
-
-## [1.40.0](https://github.com/googleapis/google-cloud-go/compare/spanner/v1.39.0...spanner/v1.40.0) (2022-11-03)
-
-
-### Features
-
-* **spanner/spansql:** Add support for interval arg of some date/timestamp functions ([#6950](https://github.com/googleapis/google-cloud-go/issues/6950)) ([1ce0f7d](https://github.com/googleapis/google-cloud-go/commit/1ce0f7d38778068fd1d9a171377067739f4ea8d6))
-* **spanner:** Configurable logger ([#6958](https://github.com/googleapis/google-cloud-go/issues/6958)) ([bd85442](https://github.com/googleapis/google-cloud-go/commit/bd85442bc6fb8c18d1a7c6d73850d220c3973c46)), refs [#6957](https://github.com/googleapis/google-cloud-go/issues/6957)
-* **spanner:** PG JSONB support ([#6874](https://github.com/googleapis/google-cloud-go/issues/6874)) ([5b14658](https://github.com/googleapis/google-cloud-go/commit/5b146587939ccc3403945c756cbf68e6f2d41fda))
-* **spanner:** Update result_set.proto to return undeclared parameters in ExecuteSql API ([de4e16a](https://github.com/googleapis/google-cloud-go/commit/de4e16a498354ea7271f5b396f7cb2bb430052aa))
-* **spanner:** Update transaction.proto to include different lock modes ([caf4afa](https://github.com/googleapis/google-cloud-go/commit/caf4afa139ad7b38b6df3e3b17b8357c81e1fd6c))
-
-## [1.39.0](https://github.com/googleapis/google-cloud-go/compare/spanner/v1.38.0...spanner/v1.39.0) (2022-09-21)
-
-
-### Features
-
-* **spanner/admin/database:** Add custom instance config operations ([ec1a190](https://github.com/googleapis/google-cloud-go/commit/ec1a190abbc4436fcaeaa1421c7d9df624042752))
-* **spanner/admin/instance:** Add custom instance config operations ([ef2b0b1](https://github.com/googleapis/google-cloud-go/commit/ef2b0b1d4de9beb9005537ae48d7d8e1c0f23b98))
-* **spanner/spannersql:** Add backticks when name contains a hypen ([#6621](https://github.com/googleapis/google-cloud-go/issues/6621)) ([e88ca66](https://github.com/googleapis/google-cloud-go/commit/e88ca66ca950e15d9011322dbfca3c88ccceb0ec))
-* **spanner/spansql:** Add support for create, alter and drop change … ([#6669](https://github.com/googleapis/google-cloud-go/issues/6669)) ([cc4620a](https://github.com/googleapis/google-cloud-go/commit/cc4620a5ee3a9129a4cdd48d90d4060ba0bbcd58))
-* **spanner:** Retry spanner transactions and mutations when RST_STREAM error ([#6699](https://github.com/googleapis/google-cloud-go/issues/6699)) ([1b56cd0](https://github.com/googleapis/google-cloud-go/commit/1b56cd0ec31bc32362259fc722907e092bae081a))
-
-
-### Bug Fixes
-
-* **spanner/admin/database:** Revert add custom instance config operations (change broke client libraries; reverting before any are released) ([ec1a190](https://github.com/googleapis/google-cloud-go/commit/ec1a190abbc4436fcaeaa1421c7d9df624042752))
-* **spanner:** Destroy session when client is closing ([#6700](https://github.com/googleapis/google-cloud-go/issues/6700)) ([a1ce541](https://github.com/googleapis/google-cloud-go/commit/a1ce5410f1e0f4d68dae0ddc790518e9978faf0c))
-* **spanner:** Spanner sessions will be cleaned up from the backend ([#6679](https://github.com/googleapis/google-cloud-go/issues/6679)) ([c27097e](https://github.com/googleapis/google-cloud-go/commit/c27097e236abeb8439a67ad9b716d05c001aea2e))
-
-## [1.38.0](https://github.com/googleapis/google-cloud-go/compare/spanner/v1.37.0...spanner/v1.38.0) (2022-09-03)
-
-
-### Features
-
-* **spanner/spannertest:** add support for adding and dropping Foreign Keys ([#6608](https://github.com/googleapis/google-cloud-go/issues/6608)) ([ccd3614](https://github.com/googleapis/google-cloud-go/commit/ccd3614f6edbaf3d7d202feb4df220f244550a78))
-* **spanner/spansql:** add support for coalesce expressions ([#6461](https://github.com/googleapis/google-cloud-go/issues/6461)) ([bff16a7](https://github.com/googleapis/google-cloud-go/commit/bff16a783c1fd4d7e888d4ee3b5420c1bbf10da1))
-* **spanner:** Adds auto-generated CL for googleapis for jsonb ([3bc37e2](https://github.com/googleapis/google-cloud-go/commit/3bc37e28626df5f7ec37b00c0c2f0bfb91c30495))
-
-
-### Bug Fixes
-
-* **spanner:** pass userAgent to cloud spanner requests ([#6598](https://github.com/googleapis/google-cloud-go/issues/6598)) ([59d162b](https://github.com/googleapis/google-cloud-go/commit/59d162bdfcbe00a060a52930be7185f00e8df2c1))
-
-## [1.37.0](https://github.com/googleapis/google-cloud-go/compare/spanner/v1.36.0...spanner/v1.37.0) (2022-08-28)
-
-
-### Features
-
-* **spanner/admin/database:** Add ListDatabaseRoles API to support role based access control ([1ffeb95](https://github.com/googleapis/google-cloud-go/commit/1ffeb9557bf1f18cc131aff40ec7e0e15a9f4ead))
-* **spanner/spansql:** add support for nullif expressions ([#6423](https://github.com/googleapis/google-cloud-go/issues/6423)) ([5b7bfeb](https://github.com/googleapis/google-cloud-go/commit/5b7bfebcd4a0fd3cbe355d9d290e6b5101810b7e))
-* **spanner:** install grpc rls and xds by default ([#6007](https://github.com/googleapis/google-cloud-go/issues/6007)) ([70d562f](https://github.com/googleapis/google-cloud-go/commit/70d562f25738052e833a46daf6ff7fa1f4a0a746))
-* **spanner:** set client wide ReadOptions, ApplyOptions, and TransactionOptions ([#6486](https://github.com/googleapis/google-cloud-go/issues/6486)) ([757f1ca](https://github.com/googleapis/google-cloud-go/commit/757f1cac7a765fe2e7ead872d07eb24baad61c28))
-
-
-### Bug Fixes
-
-* **spanner/admin/database:** target new spanner db admin service config ([1d6fbcc](https://github.com/googleapis/google-cloud-go/commit/1d6fbcc6406e2063201ef5a98de560bf32f7fb73))
-
-## [1.36.0](https://github.com/googleapis/google-cloud-go/compare/spanner/v1.35.0...spanner/v1.36.0) (2022-07-23)
-
-
-### Features
-
-* **spanner/spansql:** add support for IFNULL expressions ([#6389](https://github.com/googleapis/google-cloud-go/issues/6389)) ([09e96ce](https://github.com/googleapis/google-cloud-go/commit/09e96ce1076df4b41d45c3676b7506b318da6b9c))
-* **spanner/spansql:** support for parsing a DML file ([#6349](https://github.com/googleapis/google-cloud-go/issues/6349)) ([267a9bb](https://github.com/googleapis/google-cloud-go/commit/267a9bbec55ee8fe885354efc8db8a61a17a8374))
-
-## [1.35.0](https://github.com/googleapis/google-cloud-go/compare/spanner/v1.34.1...spanner/v1.35.0) (2022-07-19)
-
-
-### Features
-
-* **spanner/admin/instance:** Adding two new fields for Instance create_time and update_time ([8a1ad06](https://github.com/googleapis/google-cloud-go/commit/8a1ad06572a65afa91a0a77a85b849e766876671))
-* **spanner/spansql:** add support for if expressions ([#6341](https://github.com/googleapis/google-cloud-go/issues/6341)) ([56c858c](https://github.com/googleapis/google-cloud-go/commit/56c858cebd683e45d1dd5ab8ae98ef9bfd767edc))
-
-
-### Bug Fixes
-
-* **spanner:** fix pool.numInUse exceeding MaxOpened ([#6344](https://github.com/googleapis/google-cloud-go/issues/6344)) ([882b325](https://github.com/googleapis/google-cloud-go/commit/882b32593e8c7bff8369b1ff9259c7b408fad661))
-
-## [1.34.1](https://github.com/googleapis/google-cloud-go/compare/spanner/v1.34.0...spanner/v1.34.1) (2022-07-06)
-
-
-### Bug Fixes
-
-* **spanner/spansql:** Add tests for INSERT parsing ([#6303](https://github.com/googleapis/google-cloud-go/issues/6303)) ([0d19fb5](https://github.com/googleapis/google-cloud-go/commit/0d19fb5d60554b9a90fac52918f784e6c3e13918))
-
-## [1.34.0](https://github.com/googleapis/google-cloud-go/compare/spanner/v1.33.0...spanner/v1.34.0) (2022-06-17)
-
-
-### Features
-
-* **spanner/spansql:** add a support for parsing INSERT statement ([#6148](https://github.com/googleapis/google-cloud-go/issues/6148)) ([c6185cf](https://github.com/googleapis/google-cloud-go/commit/c6185cffc7f23741ac4a230aadee74b3def85ced))
-* **spanner:** add Session creator role docs: clarify transaction semantics ([4134941](https://github.com/googleapis/google-cloud-go/commit/41349411e601f57dc6d9e246f1748fd86d17bb15))
-
-## [1.33.0](https://github.com/googleapis/google-cloud-go/compare/spanner/v1.32.0...spanner/v1.33.0) (2022-05-28)
-
-
-### Bug Fixes
-
-* **spanner/spansql:** fix invalid timestamp literal formats ([#6077](https://github.com/googleapis/google-cloud-go/issues/6077)) ([6ab8bed](https://github.com/googleapis/google-cloud-go/commit/6ab8bed93a978e00a6c195d8cb4d574ca6db27c3))
-
-
-### Miscellaneous Chores
-
-* **spanner:** release 1.33.0 ([#6104](https://github.com/googleapis/google-cloud-go/issues/6104)) ([54bc54e](https://github.com/googleapis/google-cloud-go/commit/54bc54e9bbdc22e2bbfd9f315885f95987e2c3f2))
-
-## [1.32.0](https://github.com/googleapis/google-cloud-go/compare/spanner/v1.31.0...spanner/v1.32.0) (2022-05-09)
-
-
-### Features
-
-* **spanner/spansql:** support DEFAULT keyword ([#5932](https://github.com/googleapis/google-cloud-go/issues/5932)) ([49c19a9](https://github.com/googleapis/google-cloud-go/commit/49c19a956031fa889d024bd57fa34681bc79e743))
-* **spanner/spansql:** support JSON literals ([#5968](https://github.com/googleapis/google-cloud-go/issues/5968)) ([b500120](https://github.com/googleapis/google-cloud-go/commit/b500120f3cc5c7b5717f6525a24de72fd317ba66))
-* **spanner:** enable row.ToStructLenient to work with STRUCT data type ([#5944](https://github.com/googleapis/google-cloud-go/issues/5944)) ([bca8d50](https://github.com/googleapis/google-cloud-go/commit/bca8d50533115b9995f7b4a63d5d1f9abaf6a753))
-
-## [1.31.0](https://github.com/googleapis/google-cloud-go/compare/spanner/v1.30.1...spanner/v1.31.0) (2022-04-08)
-
-
-### Features
-
-* **spanner/spansql:** support case expression ([#5836](https://github.com/googleapis/google-cloud-go/issues/5836)) ([3ffdd62](https://github.com/googleapis/google-cloud-go/commit/3ffdd626e72c6472f337a423b9702baf0c298185))
-
-
-### Bug Fixes
-
-* **spanner/spannertest:** Improve DDL application delay cancellation. ([#5874](https://github.com/googleapis/google-cloud-go/issues/5874)) ([08f1e72](https://github.com/googleapis/google-cloud-go/commit/08f1e72dbf2ef5a06425f71500d061af246bd490))
-
-### [1.30.1](https://github.com/googleapis/google-cloud-go/compare/spanner/v1.30.0...spanner/v1.30.1) (2022-03-28)
-
-
-### Bug Fixes
-
-* **spanner:** early unlock of session pool lock during dumping the tracked session handles to avoid deadlock ([#5777](https://github.com/googleapis/google-cloud-go/issues/5777)) ([b007836](https://github.com/googleapis/google-cloud-go/commit/b0078362865159b87bc34c1a7f990a361f1cafcf))
-
-## [1.30.0](https://github.com/googleapis/google-cloud-go/compare/spanner/v1.29.0...spanner/v1.30.0) (2022-03-04)
-
-
-### Features
-
-* **spanner:** add better version metadata to calls ([#5515](https://github.com/googleapis/google-cloud-go/issues/5515)) ([dcab7c4](https://github.com/googleapis/google-cloud-go/commit/dcab7c4a98ebecfef1f75ec5bddfd7782b28a7c5)), refs [#2749](https://github.com/googleapis/google-cloud-go/issues/2749)
-* **spanner:** add file for tracking version ([17b36ea](https://github.com/googleapis/google-cloud-go/commit/17b36ead42a96b1a01105122074e65164357519e))
-* **spanner:** add support of PGNumeric with integration tests for PG dialect ([#5700](https://github.com/googleapis/google-cloud-go/issues/5700)) ([f7e02e1](https://github.com/googleapis/google-cloud-go/commit/f7e02e11064d14c04eca18ab808e8fe5194ac355))
-* **spanner:** set versionClient to module version ([55f0d92](https://github.com/googleapis/google-cloud-go/commit/55f0d92bf112f14b024b4ab0076c9875a17423c9))
-
-### Bug Fixes
-
-* **spanner/spansql:** support GROUP BY without an aggregation function ([#5717](https://github.com/googleapis/google-cloud-go/issues/5717)) ([c819ee9](https://github.com/googleapis/google-cloud-go/commit/c819ee9ad4695afa31eddcb4bf87764762555cd5))
-
-
-### Miscellaneous Chores
-
-* **spanner:** release 1.30.0 ([#5715](https://github.com/googleapis/google-cloud-go/issues/5715)) ([a19d182](https://github.com/googleapis/google-cloud-go/commit/a19d182dab5476cf01e719c751e94a73a98c6c4a))
-
-## [1.29.0](https://www.github.com/googleapis/google-cloud-go/compare/spanner/v1.28.0...spanner/v1.29.0) (2022-01-06)
-
-
-### ⚠ BREAKING CHANGES
-
-* **spanner:** fix data race in spanner integration tests (#5276)
-
-### Features
-
-* **spanner/spansql:** support EXTRACT ([#5218](https://www.github.com/googleapis/google-cloud-go/issues/5218)) ([81b7c85](https://www.github.com/googleapis/google-cloud-go/commit/81b7c85a8993a36557ea4eb4ec0c47d1f93c4960))
-* **spanner/spansql:** support MOD function ([#5231](https://www.github.com/googleapis/google-cloud-go/issues/5231)) ([0a81fbc](https://www.github.com/googleapis/google-cloud-go/commit/0a81fbc0171af7e828f3e606cbe7b3905ac32213))
-* **spanner:** add google-c2p dependence ([5343756](https://www.github.com/googleapis/google-cloud-go/commit/534375668b5b81bae5ef750c96856bef027f9d1e))
-* **spanner:** Add ReadRowWithOptions method ([#5240](https://www.github.com/googleapis/google-cloud-go/issues/5240)) ([c276428](https://www.github.com/googleapis/google-cloud-go/commit/c276428bca79702245d422849af6472bb2e74171))
-* **spanner:** Adding GFE Latency and Header Missing Count Metrics ([#5199](https://www.github.com/googleapis/google-cloud-go/issues/5199)) ([3d8a9ea](https://www.github.com/googleapis/google-cloud-go/commit/3d8a9ead8d73a4f38524a424a98362c32f56954b))
-
-
-### Bug Fixes
-
-* **spanner:** result from unmarshal of string and spanner.NullString type from json should be consistent. ([#5263](https://www.github.com/googleapis/google-cloud-go/issues/5263)) ([7eaaa47](https://www.github.com/googleapis/google-cloud-go/commit/7eaaa470fda5dc7cd1ff041d6a898e35fb54920e))
-
-
-### Tests
-
-* **spanner:** fix data race in spanner integration tests ([#5276](https://www.github.com/googleapis/google-cloud-go/issues/5276)) ([22df34b](https://www.github.com/googleapis/google-cloud-go/commit/22df34b8e7d0d003b3eeaf1c069aee58f30a8dfe))
-
-
-### Miscellaneous Chores
-
-* **spanner:** release 1.29.0 ([#5292](https://www.github.com/googleapis/google-cloud-go/issues/5292)) ([9f0b900](https://www.github.com/googleapis/google-cloud-go/commit/9f0b9003686d26c66a10c3b54e67b59c2a6327ff))
-
-## [1.28.0](https://www.github.com/googleapis/google-cloud-go/compare/spanner/v1.27.0...spanner/v1.28.0) (2021-12-03)
-
-
-### Features
-
-* **spanner/spannertest:** support JSON_VALUE function ([#5173](https://www.github.com/googleapis/google-cloud-go/issues/5173)) ([ac98735](https://www.github.com/googleapis/google-cloud-go/commit/ac98735cb1adc9384c5b2caeb9aac938db275bf7))
-* **spanner/spansql:** support CAST and SAFE_CAST ([#5057](https://www.github.com/googleapis/google-cloud-go/issues/5057)) ([54cbf4c](https://www.github.com/googleapis/google-cloud-go/commit/54cbf4c0a0305e680b213f84487110dfeaf8e7e1))
-* **spanner:** add ToStructLenient method to decode to struct fields with no error return with un-matched row's column with struct's exported fields. ([#5153](https://www.github.com/googleapis/google-cloud-go/issues/5153)) ([899ffbf](https://www.github.com/googleapis/google-cloud-go/commit/899ffbf8ce42b1597ca3cd59bfd9f042054b8ae2))
-
-## [1.27.0](https://www.github.com/googleapis/google-cloud-go/compare/spanner/v1.26.0...spanner/v1.27.0) (2021-10-19)
-
-
-### Features
-
-* **spanner:** implement valuer and scanner interfaces ([#4936](https://www.github.com/googleapis/google-cloud-go/issues/4936)) ([4537b45](https://www.github.com/googleapis/google-cloud-go/commit/4537b45d2611ce480abfb5d186b59e7258ec872c))
-
-## [1.26.0](https://www.github.com/googleapis/google-cloud-go/compare/spanner/v1.25.0...spanner/v1.26.0) (2021-10-11)
-
-
-### Features
-
-* **spanner/spannertest:** implement RowDeletionPolicy in spannertest ([#4961](https://www.github.com/googleapis/google-cloud-go/issues/4961)) ([7800a33](https://www.github.com/googleapis/google-cloud-go/commit/7800a3303b97204a0573780786388437bbbf2673)), refs [#4782](https://www.github.com/googleapis/google-cloud-go/issues/4782)
-* **spanner/spannertest:** Support generated columns ([#4742](https://www.github.com/googleapis/google-cloud-go/issues/4742)) ([324d11d](https://www.github.com/googleapis/google-cloud-go/commit/324d11d3c19ffbd77848c8e19c972b70ff5e9268))
-* **spanner/spansql:** fill in missing hash functions ([#4808](https://www.github.com/googleapis/google-cloud-go/issues/4808)) ([37ee2d9](https://www.github.com/googleapis/google-cloud-go/commit/37ee2d95220efc1aaf0280d0aa2c01ae4b9d4c1b))
-* **spanner/spansql:** support JSON data type ([#4959](https://www.github.com/googleapis/google-cloud-go/issues/4959)) ([e84e408](https://www.github.com/googleapis/google-cloud-go/commit/e84e40830752fc8bc0ccdd869fa7b8fd0c80f306))
-* **spanner/spansql:** Support multiple joins in query ([#4743](https://www.github.com/googleapis/google-cloud-go/issues/4743)) ([81a308e](https://www.github.com/googleapis/google-cloud-go/commit/81a308e909a3ae97504a49fbc9982f7eeb6be80c))
-
-## [1.25.0](https://www.github.com/googleapis/google-cloud-go/compare/spanner/v1.24.1...spanner/v1.25.0) (2021-08-25)
-
-
-### Features
-
-* **spanner/spansql:** add support for STARTS_WITH function ([#4670](https://www.github.com/googleapis/google-cloud-go/issues/4670)) ([7a56af0](https://www.github.com/googleapis/google-cloud-go/commit/7a56af03d1505d9a29d1185a50e261c0e90fdb1a)), refs [#4661](https://www.github.com/googleapis/google-cloud-go/issues/4661)
-* **spanner:** add support for JSON data type ([#4104](https://www.github.com/googleapis/google-cloud-go/issues/4104)) ([ade8ab1](https://www.github.com/googleapis/google-cloud-go/commit/ade8ab111315d84fa140ddde020387a78668dfa4))
-
-
-### Bug Fixes
-
-* **spanner/spannertest:** Fix the "LIKE" clause handling for prefix and suffix matches ([#4655](https://www.github.com/googleapis/google-cloud-go/issues/4655)) ([a2118f0](https://www.github.com/googleapis/google-cloud-go/commit/a2118f02fb03bfc50952699318f35c23dc234c41))
-* **spanner:** invalid numeric should throw an error ([#3926](https://www.github.com/googleapis/google-cloud-go/issues/3926)) ([cde8697](https://www.github.com/googleapis/google-cloud-go/commit/cde8697be01f1ef57806275c0ddf54f87bb9a571))
-
-### [1.24.1](https://www.github.com/googleapis/google-cloud-go/compare/spanner/v1.24.0...spanner/v1.24.1) (2021-08-11)
-
-
-### Bug Fixes
-
-* **spanner/spansql:** only add comma after other option ([#4551](https://www.github.com/googleapis/google-cloud-go/issues/4551)) ([3ac1e00](https://www.github.com/googleapis/google-cloud-go/commit/3ac1e007163803d315dcf5db612fe003f6eab978))
-* **spanner:** allow decoding null values to spanner.Decoder ([#4558](https://www.github.com/googleapis/google-cloud-go/issues/4558)) ([45ddaca](https://www.github.com/googleapis/google-cloud-go/commit/45ddaca606a372d9293bf2e2b3dc6d4398166c43)), refs [#4552](https://www.github.com/googleapis/google-cloud-go/issues/4552)
-
-## [1.24.0](https://www.github.com/googleapis/google-cloud-go/compare/spanner/v1.23.0...spanner/v1.24.0) (2021-07-29)
-
-
-### Features
-
-* **spanner/spansql:** add ROW DELETION POLICY parsing ([#4496](https://www.github.com/googleapis/google-cloud-go/issues/4496)) ([3d6c6c7](https://www.github.com/googleapis/google-cloud-go/commit/3d6c6c7873e1b75e8b492ede2e561411dc40536a))
-* **spanner/spansql:** fix unstable SelectFromTable SQL ([#4473](https://www.github.com/googleapis/google-cloud-go/issues/4473)) ([39bc4ec](https://www.github.com/googleapis/google-cloud-go/commit/39bc4eca655d0180b18378c175d4a9a77fe1602f))
-* **spanner/spansql:** support ALTER DATABASE ([#4403](https://www.github.com/googleapis/google-cloud-go/issues/4403)) ([1458dc9](https://www.github.com/googleapis/google-cloud-go/commit/1458dc9c21d98ffffb871943f178678cc3c21306))
-* **spanner/spansql:** support table_hint_expr at from_clause on query_statement ([#4457](https://www.github.com/googleapis/google-cloud-go/issues/4457)) ([7047808](https://www.github.com/googleapis/google-cloud-go/commit/7047808794cf463c6a96d7b59ef5af3ed94fd7cf))
-* **spanner:** add row.String() and refine error message for decoding a struct array ([#4431](https://www.github.com/googleapis/google-cloud-go/issues/4431)) ([f6258a4](https://www.github.com/googleapis/google-cloud-go/commit/f6258a47a4dfadc02dcdd75b53fd5f88c5dcca30))
-* **spanner:** allow untyped nil values in parameterized queries ([#4482](https://www.github.com/googleapis/google-cloud-go/issues/4482)) ([c1ba18b](https://www.github.com/googleapis/google-cloud-go/commit/c1ba18b1b1fc45de6e959cc22a5c222cc80433ee))
-
-
-### Bug Fixes
-
-* **spanner/spansql:** fix DATE and TIMESTAMP parsing. ([#4480](https://www.github.com/googleapis/google-cloud-go/issues/4480)) ([dec7a67](https://www.github.com/googleapis/google-cloud-go/commit/dec7a67a3e980f6f5e0d170919da87e1bffe923f))
-
-## [1.23.0](https://www.github.com/googleapis/google-cloud-go/compare/spanner/v1.22.0...spanner/v1.23.0) (2021-07-08)
-
-
-### Features
-
-* **spanner/admin/database:** add leader_options to InstanceConfig and default_leader to Database ([7aa0e19](https://www.github.com/googleapis/google-cloud-go/commit/7aa0e195a5536dd060a1fca871bd3c6f946d935e))
-
-## [1.22.0](https://www.github.com/googleapis/google-cloud-go/compare/spanner/v1.21.0...spanner/v1.22.0) (2021-06-30)
-
-
-### Features
-
-* **spanner:** support request and transaction tags ([#4336](https://www.github.com/googleapis/google-cloud-go/issues/4336)) ([f08c73a](https://www.github.com/googleapis/google-cloud-go/commit/f08c73a75e2d2a8b9a0b184179346cb97c82e9e5))
-* **spanner:** enable request options for batch read ([#4337](https://www.github.com/googleapis/google-cloud-go/issues/4337)) ([b9081c3](https://www.github.com/googleapis/google-cloud-go/commit/b9081c36ed6495a67f8e458ad884bdb8da5b7fbc))
-
-## [1.21.0](https://www.github.com/googleapis/google-cloud-go/compare/spanner/v1.20.0...spanner/v1.21.0) (2021-06-23)
-
-
-### Miscellaneous Chores
-
-* **spanner:** trigger a release for low cost instance ([#4264](https://www.github.com/googleapis/google-cloud-go/issues/4264)) ([24c4451](https://www.github.com/googleapis/google-cloud-go/commit/24c4451404cdf4a83cc7a35ee1911d654d2ba132))
-
-## [1.20.0](https://www.github.com/googleapis/google-cloud-go/compare/spanner/v1.19.0...spanner/v1.20.0) (2021-06-08)
-
-
-### Features
-
-* **spanner:** add the support of optimizer statistics package ([#2717](https://www.github.com/googleapis/google-cloud-go/issues/2717)) ([29c7247](https://www.github.com/googleapis/google-cloud-go/commit/29c724771f0b19849c76e62d4bc8e9342922bf75))
-
-## [1.19.0](https://www.github.com/googleapis/google-cloud-go/compare/spanner/v1.18.0...spanner/v1.19.0) (2021-06-03)
-
-
-### Features
-
-* **spanner/spannertest:** support multiple aggregations ([#3965](https://www.github.com/googleapis/google-cloud-go/issues/3965)) ([1265dc3](https://www.github.com/googleapis/google-cloud-go/commit/1265dc3289693f79fcb9c5785a424eb510a50007))
-* **spanner/spansql:** case insensitive parsing of keywords and functions ([#4034](https://www.github.com/googleapis/google-cloud-go/issues/4034)) ([ddb09d2](https://www.github.com/googleapis/google-cloud-go/commit/ddb09d22a737deea0d0a9ab58cd5d337164bbbfe))
-* **spanner:** add a database name getter to client ([#4190](https://www.github.com/googleapis/google-cloud-go/issues/4190)) ([7fce29a](https://www.github.com/googleapis/google-cloud-go/commit/7fce29af404f0623b483ca6d6f2af4c726105fa6))
-* **spanner:** add custom instance config to tests ([#4194](https://www.github.com/googleapis/google-cloud-go/issues/4194)) ([e935345](https://www.github.com/googleapis/google-cloud-go/commit/e9353451237e658bde2e41b30e8270fbc5987b39))
-
-
-### Bug Fixes
-
-* **spanner:** add missing NUMERIC type to the doc for Row ([#4116](https://www.github.com/googleapis/google-cloud-go/issues/4116)) ([9a3b416](https://www.github.com/googleapis/google-cloud-go/commit/9a3b416221f3c8b3793837e2a459b1d7cd9c479f))
-* **spanner:** indent code example for Encoder and Decoder ([#4128](https://www.github.com/googleapis/google-cloud-go/issues/4128)) ([7c1f48f](https://www.github.com/googleapis/google-cloud-go/commit/7c1f48f307284c26c10cd5787dbc94136a2a36a6))
-* **spanner:** mark SessionPoolConfig.MaxBurst deprecated ([#4115](https://www.github.com/googleapis/google-cloud-go/issues/4115)) ([d60a686](https://www.github.com/googleapis/google-cloud-go/commit/d60a68649f85f1edfbd8f11673bb280813c2b771))
-
-## [1.18.0](https://www.github.com/googleapis/google-cloud-go/compare/spanner/v1.17.0...spanner/v1.18.0) (2021-04-29)
-
-
-### Features
-
-* **spanner/admin/database:** add `progress` field to `UpdateDatabaseDdlMetadata` ([9029071](https://www.github.com/googleapis/google-cloud-go/commit/90290710158cf63de918c2d790df48f55a23adc5))
-
-## [1.17.0](https://www.github.com/googleapis/google-cloud-go/compare/spanner/v1.16.0...spanner/v1.17.0) (2021-03-31)
-
-
-### Features
-
-* **spanner/admin/database:** add tagging request options ([2b02a03](https://www.github.com/googleapis/google-cloud-go/commit/2b02a03ff9f78884da5a8e7b64a336014c61bde7))
-* **spanner:** add RPC Priority request options ([b5b4da6](https://www.github.com/googleapis/google-cloud-go/commit/b5b4da6952922440d03051f629f3166f731dfaa3))
-* **spanner:** Add support for RPC priority ([#3341](https://www.github.com/googleapis/google-cloud-go/issues/3341)) ([88cf097](https://www.github.com/googleapis/google-cloud-go/commit/88cf097649f1cdf01cab531eabdff7fbf2be3f8f))
-
-## [1.16.0](https://www.github.com/googleapis/google-cloud-go/compare/v1.15.0...v1.16.0) (2021-03-17)
-
-
-### Features
-
-* **spanner:** add `optimizer_statistics_package` field in `QueryOptions` ([18c88c4](https://www.github.com/googleapis/google-cloud-go/commit/18c88c437bd1741eaf5bf5911b9da6f6ea7cd75d))
-* **spanner/admin/database:** add CMEK fields to backup and database ([16597fa](https://github.com/googleapis/google-cloud-go/commit/16597fa1ce549053c7183e8456e23f554a5501de))
-
-
-### Bug Fixes
-
-* **spanner/spansql:** fix parsing of NOT IN operator ([#3724](https://www.github.com/googleapis/google-cloud-go/issues/3724)) ([7636478](https://www.github.com/googleapis/google-cloud-go/commit/76364784d82073b80929ae60fd42da34c8050820))
-
-## [1.15.0](https://www.github.com/googleapis/google-cloud-go/compare/v1.14.1...v1.15.0) (2021-02-24)
-
-
-### Features
-
-* **spanner/admin/database:** add CMEK fields to backup and database ([47037ed](https://www.github.com/googleapis/google-cloud-go/commit/47037ed33cd36edfff4ba7c4a4ea332140d5e67b))
-* **spanner/admin/database:** add CMEK fields to backup and database ([16597fa](https://www.github.com/googleapis/google-cloud-go/commit/16597fa1ce549053c7183e8456e23f554a5501de))
-
-
-### Bug Fixes
-
-* **spanner:** parallelize session deletion when closing pool ([#3701](https://www.github.com/googleapis/google-cloud-go/issues/3701)) ([75ac7d2](https://www.github.com/googleapis/google-cloud-go/commit/75ac7d2506e706869ae41cf186b0c873b146e926)), refs [#3685](https://www.github.com/googleapis/google-cloud-go/issues/3685)
-
-### [1.14.1](https://www.github.com/googleapis/google-cloud-go/compare/v1.14.0...v1.14.1) (2021-02-09)
-
-
-### Bug Fixes
-
-* **spanner:** restore removed scopes ([#3684](https://www.github.com/googleapis/google-cloud-go/issues/3684)) ([232d3a1](https://www.github.com/googleapis/google-cloud-go/commit/232d3a17bdadb92864592351a335ec920a68f9bf))
-
-## [1.14.0](https://www.github.com/googleapis/google-cloud-go/compare/spanner/v1.13.0...v1.14.0) (2021-02-09)
-
-
-### Features
-
-* **spanner/admin/database:** adds PITR fields to backup and database ([0959f27](https://www.github.com/googleapis/google-cloud-go/commit/0959f27e85efe94d39437ceef0ff62ddceb8e7a7))
-* **spanner/spannertest:** restructure column alteration implementation ([#3616](https://www.github.com/googleapis/google-cloud-go/issues/3616)) ([176400b](https://www.github.com/googleapis/google-cloud-go/commit/176400be9ab485fb343b8994bc49ac2291d8eea9))
-* **spanner/spansql:** add complete set of array functions ([#3633](https://www.github.com/googleapis/google-cloud-go/issues/3633)) ([13d50b9](https://www.github.com/googleapis/google-cloud-go/commit/13d50b93cc8348c54641b594371a96ecdb1bcabc))
-* **spanner/spansql:** add complete set of string functions ([#3625](https://www.github.com/googleapis/google-cloud-go/issues/3625)) ([34027ad](https://www.github.com/googleapis/google-cloud-go/commit/34027ada6a718603be2987b4084ce5e0ead6413c))
-* **spanner:** add option for returning Spanner commit stats ([c7ecf0f](https://www.github.com/googleapis/google-cloud-go/commit/c7ecf0f3f454606b124e52d20af2545b2c68646f))
-* **spanner:** add option for returning Spanner commit stats ([7bdebad](https://www.github.com/googleapis/google-cloud-go/commit/7bdebadbe06774c94ab745dfef4ce58ce40a5582))
-* **spanner:** support CommitStats ([#3444](https://www.github.com/googleapis/google-cloud-go/issues/3444)) ([b7c3ca6](https://www.github.com/googleapis/google-cloud-go/commit/b7c3ca6c83cbdca95d734df8aa07c5ddb8ab3db0))
-
-
-### Bug Fixes
-
-* **spanner/spannertest:** support queries in ExecuteSql ([#3640](https://www.github.com/googleapis/google-cloud-go/issues/3640)) ([8eede84](https://www.github.com/googleapis/google-cloud-go/commit/8eede8411a5521f45a5c3f8091c42b3c5407ea90)), refs [#3639](https://www.github.com/googleapis/google-cloud-go/issues/3639)
-* **spanner/spansql:** fix SelectFromJoin behavior ([#3571](https://www.github.com/googleapis/google-cloud-go/issues/3571)) ([e0887c7](https://www.github.com/googleapis/google-cloud-go/commit/e0887c762a4c58f29b3e5b49ee163a36a065463c))
-
-## [1.13.0](https://www.github.com/googleapis/google-cloud-go/compare/spanner/v1.12.0...v1.13.0) (2021-01-15)
-
-
-### Features
-
-* **spanner/spannertest:** implement ANY_VALUE aggregation function ([#3428](https://www.github.com/googleapis/google-cloud-go/issues/3428)) ([e16c3e9](https://www.github.com/googleapis/google-cloud-go/commit/e16c3e9b412762b85483f3831ee586a5e6631313))
-* **spanner/spannertest:** implement FULL JOIN ([#3218](https://www.github.com/googleapis/google-cloud-go/issues/3218)) ([99f7212](https://www.github.com/googleapis/google-cloud-go/commit/99f7212bd70bb333c1aa1c7a57348b4dfd80d31b))
-* **spanner/spannertest:** implement SELECT ... FROM UNNEST(...) ([#3431](https://www.github.com/googleapis/google-cloud-go/issues/3431)) ([deb466f](https://www.github.com/googleapis/google-cloud-go/commit/deb466f497a1e6df78fcad57c3b90b1a4ccd93b4))
-* **spanner/spannertest:** support array literals ([#3438](https://www.github.com/googleapis/google-cloud-go/issues/3438)) ([69e0110](https://www.github.com/googleapis/google-cloud-go/commit/69e0110f4977035cd1a705c3034c3ba96cadf36f))
-* **spanner/spannertest:** support AVG aggregation function ([#3286](https://www.github.com/googleapis/google-cloud-go/issues/3286)) ([4788415](https://www.github.com/googleapis/google-cloud-go/commit/4788415c908f58c1cc08c951f1a7f17cdaf35aa2))
-* **spanner/spannertest:** support Not Null constraint ([#3491](https://www.github.com/googleapis/google-cloud-go/issues/3491)) ([c36aa07](https://www.github.com/googleapis/google-cloud-go/commit/c36aa0785e798b9339d540e691850ca3c474a288))
-* **spanner/spannertest:** support UPDATE DML ([#3201](https://www.github.com/googleapis/google-cloud-go/issues/3201)) ([1dec6f6](https://www.github.com/googleapis/google-cloud-go/commit/1dec6f6a31768a3f70bfec7274828301c22ea10b))
-* **spanner/spansql:** define structures and parse UPDATE DML statements ([#3192](https://www.github.com/googleapis/google-cloud-go/issues/3192)) ([23b6904](https://www.github.com/googleapis/google-cloud-go/commit/23b69042c58489df512703259f54d075ba0c0722))
-* **spanner/spansql:** support DATE and TIMESTAMP literals ([#3557](https://www.github.com/googleapis/google-cloud-go/issues/3557)) ([1961930](https://www.github.com/googleapis/google-cloud-go/commit/196193034a15f84dc3d3c27901990e8be77fca85))
-* **spanner/spansql:** support for parsing generated columns ([#3373](https://www.github.com/googleapis/google-cloud-go/issues/3373)) ([9b1d06f](https://www.github.com/googleapis/google-cloud-go/commit/9b1d06fc90a4c07899c641a893dba0b47a1cead9))
-* **spanner/spansql:** support NUMERIC data type ([#3411](https://www.github.com/googleapis/google-cloud-go/issues/3411)) ([1bc65d9](https://www.github.com/googleapis/google-cloud-go/commit/1bc65d9124ba22db5bec4c71b6378c27dfc04724))
-* **spanner:** Add a DirectPath fallback integration test ([#3487](https://www.github.com/googleapis/google-cloud-go/issues/3487)) ([de821c5](https://www.github.com/googleapis/google-cloud-go/commit/de821c59fb81e9946216d205162b59de8b5ce71c))
-* **spanner:** attempt DirectPath by default ([#3516](https://www.github.com/googleapis/google-cloud-go/issues/3516)) ([bbc61ed](https://www.github.com/googleapis/google-cloud-go/commit/bbc61ed368453b28aaf5bed627ca2499a3591f63))
-* **spanner:** include User agent ([#3465](https://www.github.com/googleapis/google-cloud-go/issues/3465)) ([4e1ef1b](https://www.github.com/googleapis/google-cloud-go/commit/4e1ef1b3fb536ef950249cdee02cc0b6c2b56e86))
-* **spanner:** run E2E test over DirectPath ([#3466](https://www.github.com/googleapis/google-cloud-go/issues/3466)) ([18e3a4f](https://www.github.com/googleapis/google-cloud-go/commit/18e3a4fe2a0c59c6295db2d85c7893ac51688083))
-* **spanner:** support NUMERIC in mutations ([#3328](https://www.github.com/googleapis/google-cloud-go/issues/3328)) ([fa90737](https://www.github.com/googleapis/google-cloud-go/commit/fa90737a2adbe0cefbaba4aa1046a6efbba2a0e9))
-
-
-### Bug Fixes
-
-* **spanner:** fix session leak ([#3461](https://www.github.com/googleapis/google-cloud-go/issues/3461)) ([11fb917](https://www.github.com/googleapis/google-cloud-go/commit/11fb91711db5b941995737980cef7b48b611fefd)), refs [#3460](https://www.github.com/googleapis/google-cloud-go/issues/3460)
-
-## [1.12.0](https://www.github.com/googleapis/google-cloud-go/compare/spanner/v1.11.0...v1.12.0) (2020-11-10)
-
-
-### Features
-
-* **spanner:** add metadata to RowIterator ([#3050](https://www.github.com/googleapis/google-cloud-go/issues/3050)) ([9a2289c](https://www.github.com/googleapis/google-cloud-go/commit/9a2289c3a38492bc2e84e0f4000c68a8718f5c11)), closes [#1805](https://www.github.com/googleapis/google-cloud-go/issues/1805)
-* **spanner:** export ToSpannerError ([#3133](https://www.github.com/googleapis/google-cloud-go/issues/3133)) ([b951d8b](https://www.github.com/googleapis/google-cloud-go/commit/b951d8bd194b76da0a8bf2ce7cf85b546d2e051c)), closes [#3122](https://www.github.com/googleapis/google-cloud-go/issues/3122)
-* **spanner:** support rw-transaction with options ([#3058](https://www.github.com/googleapis/google-cloud-go/issues/3058)) ([5130694](https://www.github.com/googleapis/google-cloud-go/commit/51306948eef9d26cff70453efc3eb500ddef9117))
-* **spanner/spannertest:** make SELECT list aliases visible to ORDER BY ([#3054](https://www.github.com/googleapis/google-cloud-go/issues/3054)) ([7d2d83e](https://www.github.com/googleapis/google-cloud-go/commit/7d2d83ee1cce58d4014d5570bc599bcef1ed9c22)), closes [#3043](https://www.github.com/googleapis/google-cloud-go/issues/3043)
-
-## v1.11.0
-
-* Features:
- - feat(spanner): add KeySetFromKeys function (#2837)
-* Misc:
- - test(spanner): check for Aborted error (#3039)
- - test(spanner): fix potential race condition in TestRsdBlockingStates (#3017)
- - test(spanner): compare data instead of struct (#3013)
- - test(spanner): fix flaky oc_test.go (#2838)
- - docs(spanner): document NULL value (#2885)
-* spansql/spannertest:
- - Support JOINs (all but FULL JOIN) (#2936, #2924, #2896, #3042, #3037, #2995, #2945, #2931)
- - feat(spanner/spansql): parse CHECK constraints (#3046)
- - fix(spanner/spansql): fix parsing of unary minus and plus (#2997)
- - fix(spanner/spansql): fix parsing of adjacent inline and leading comments (#2851)
- - fix(spanner/spannertest): fix ORDER BY combined with SELECT aliases (#3043)
- - fix(spanner/spannertest): generate query output columns in construction order (#2990)
- - fix(spanner/spannertest): correct handling of NULL AND FALSE (#2991)
- - fix(spanner/spannertest): correct handling of tri-state boolean expression evaluation (#2983)
- - fix(spanner/spannertest): fix handling of NULL with LIKE operator (#2982)
- - test(spanner/spannertest): migrate most test code to integration_test.go (#2977)
- - test(spanner/spansql): add fuzz target for ParseQuery (#2909)
- - doc(spanner/spannertest): document the implementation (#2996)
- - perf(spanner/spannertest): speed up no-wait DDL changes (#2994)
- - perf(spanner/spansql): make fewer allocations during SQL (#2969)
-* Backward Incompatible Changes
- - chore(spanner/spansql): use ID type for identifiers throughout (#2889)
- - chore(spanner/spansql): restructure FROM, TABLESAMPLE (#2888)
-
-## v1.10.0
-
-* feat(spanner): add support for NUMERIC data type (#2415)
-* feat(spanner): add custom type support to spanner.Key (#2748)
-* feat(spanner/spannertest): add support for bool parameter types (#2674)
-* fix(spanner): update PDML to take sessions from pool (#2736)
-* spanner/spansql: update docs on TableAlteration, ColumnAlteration (#2825)
-* spanner/spannertest: support dropping columns (#2823)
-* spanner/spannertest: implement GetDatabase (#2802)
-* spanner/spannertest: fix aggregation in query evaluation for empty inputs (#2803)
-
-## v1.9.0
-
-* Features:
- - feat(spanner): support custom field type (#2614)
-* Bugfixes:
- - fix(spanner): call ctx.cancel after stats have been recorded (#2728)
- - fix(spanner): retry session not found for read (#2724)
- - fix(spanner): specify credentials with SPANNER_EMULATOR_HOST (#2701)
- - fix(spanner): update pdml to retry EOS internal error (#2678)
-* Misc:
- - test(spanner): unskip tests for emulator (#2675)
-* spansql/spannertest:
- - spanner/spansql: restructure types and parsing for column options (#2656)
- - spanner/spannertest: return error for Read with no keys (#2655)
-
-## v1.8.0
-
-* Features:
- - feat(spanner): support of client-level custom retry settings (#2599)
- - feat(spanner): add a statement-based way to run read-write transaction. (#2545)
-* Bugfixes:
- - fix(spanner): set 'gccl' to the request header. (#2609)
- - fix(spanner): add the missing resource prefix (#2605)
- - fix(spanner): fix the upgrade of protobuf. (#2583)
- - fix(spanner): do not copy protobuf messages by value. (#2581)
- - fix(spanner): fix the required resource prefix. (#2580)
- - fix(spanner): add extra field to ignore with cmp (#2577)
- - fix(spanner): remove appengine-specific numChannels. (#2513)
-* Misc:
- - test(spanner): log warning instead of fail for stress test (#2559)
- - test(spanner): fix failed TestRsdBlockingStates test (#2597)
- - chore(spanner): cleanup mockserver and mockclient (#2414)
-
-## v1.7.0
-
-* Retry:
- - Only retry certain types of internal errors. (#2460)
-* Tracing/metrics:
- - Never sample `ping()` trace spans (#2520)
- - Add oc tests for session pool metrics. (#2416)
-* Encoding:
- - Allow encoding struct with custom types to mutation (#2529)
-* spannertest:
- - Fix evaluation on IN (#2479)
- - Support MIN/MAX aggregation functions (#2411)
-* Misc:
- - Fix TestClient_WithGRPCConnectionPoolAndNumChannels_Misconfigured test (#2539)
- - Cleanup backoff files and rename a variable (#2526)
- - Fix TestIntegration_DML test to return err from tx (#2509)
- - Unskip tests for emulator 0.8.0. (#2494)
- - Fix TestIntegration_StartBackupOperation test. (#2418)
- - Fix flakiness in TestIntegration_BatchDML_Error
- - Unskip TestIntegration_BatchDML and TestIntegration_BatchDML_TwoStatements
- for emulator by checking the existence of status.
- - Fix TestStressSessionPool test by taking lock while getting sessions from
- hc.
-
-## v1.6.0
-
-* Sessions:
- - Increase the number of sessions in batches instead of one by one when
- additional sessions are needed. The step size is set to 25, which means
- that whenever the session pool needs at least one more session, it will
- create a batch of 25 sessions.
-* Emulator:
- - Run integration tests against the emulator in Kokoro Presubmit.
-* RPC retrying:
- - Retry CreateDatabase on retryable codes.
-* spannertest:
- - Change internal representation of DATE/TIMESTAMP values.
-* spansql:
- - Cleanly parse adjacent comment marker/terminator.
- - Support FROM aliases in SELECT statements.
-* Misc:
- - Fix comparing errors in tests.
- - Fix flaky session pool test.
- - Increase timeout in TestIntegration_ReadOnlyTransaction.
- - Fix incorrect instance IDs when deleting instances in tests.
- - Clean up test instances.
- - Clearify docs on Aborted transaction.
- - Fix timeout+staleness bound for test
- - Remove the support for resource-based routing.
- - Fix TestTransaction_SessionNotFound test.
-
-## v1.5.1
-
-* Fix incorrect decreasing metrics, numReads and numWrites.
-* Fix an issue that XXX fields/methods are internal to proto and may change
- at any time. XXX_Merge panics in proto v1.4.0. Use proto.Merge instead of
- XXX_Merge.
-* spannertest: handle list parameters in RPC interfacea.
-
-## v1.5.0
-
-* Metrics
- - Instrument client library with adding OpenCensus metrics. This allows for
- better monitoring of the session pool.
-* Session management
- - Switch the session keepalive method from GetSession to SELECT 1.
-* Emulator
- - Use client hooks for admin clients running against an emulator. With
- this change, users can use SPANNER_EMULATOR_HOST for initializing admin
- clients when running against an emulator.
-* spansql
- - Add space between constraint name and foreign key def.
-* Misc
- - Fix segfault when a non-existent credentials file had been specified.
- - Fix cleaning up instances in integration tests.
- - Fix race condition in batch read-only transaction.
- - Fix the flaky TestLIFOTakeWriteSessionOrder test.
- - Fix ITs to order results in SELECT queries.
- - Fix the documentation of timestamp bounds.
- - Fix the regex issue in managing backups.
-
-## v1.4.0
-
-- Support managed backups. This includes the API methods for CreateBackup,
- GetBackup, UpdateBackup, DeleteBackup and others. Also includes a simple
- wrapper in DatabaseAdminClient to create a backup.
-- Update the healthcheck interval. The default interval is updated to 50 mins.
- By default, the first healthcheck is scheduled between 10 and 55 mins and
- the subsequent healthchecks are between 45 and 55 mins. This update avoids
- overloading the backend service with frequent healthchecking.
-
-## v1.3.0
-
-* Query options:
- - Adds the support of providing query options (optimizer version) via
- three ways (precedence follows the order):
- `client-level < environment variables < query-level`. The environment
- variable is set by "SPANNER_OPTIMIZER_VERSION".
-* Connection pooling:
- - Use the new connection pooling in gRPC. This change deprecates
- `ClientConfig.numChannels` and users should move to
- `WithGRPCConnectionPool(numChannels)` at their earliest convenience.
- Example:
- ```go
- // numChannels (deprecated):
- err, client := NewClientWithConfig(ctx, database, ClientConfig{NumChannels: 8})
-
- // gRPC connection pool:
- err, client := NewClientWithConfig(ctx, database, ClientConfig{}, option.WithGRPCConnectionPool(8))
- ```
-* Error handling:
- - Do not rollback after failed commit.
- - Return TransactionOutcomeUnknownError if a DEADLINE_EXCEEDED or CANCELED
- error occurs while a COMMIT request is in flight.
-* spansql:
- - Added support for IN expressions and OFFSET clauses.
- - Fixed parsing of table constraints.
- - Added support for foreign key constraints in ALTER TABLE and CREATE TABLE.
- - Added support for GROUP BY clauses.
-* spannertest:
- - Added support for IN expressions and OFFSET clauses.
- - Added support for GROUP BY clauses.
- - Fixed data race in query execution.
- - No longer rejects reads specifying an index to use.
- - Return last commit timestamp as read timestamp when requested.
- - Evaluate add, subtract, multiply, divide, unary
- negation, unary not, bitwise and/xor/or operations, as well as reporting
- column types for expressions involving any possible arithmetic
- operator.arithmetic expressions.
- - Fixed handling of descending primary keys.
-* Misc:
- - Change default healthcheck interval to 30 mins to reduce the GetSession
- calls made to the backend.
- - Add marshal/unmarshal json for nullable types to support NullString,
- NullInt64, NullFloat64, NullBool, NullTime, NullDate.
- - Use ResourceInfo to extract error.
- - Extract retry info from status.
-
-## v1.2.1
-
-- Fix session leakage for ApplyAtLeastOnce. Previously session handles where
- leaked whenever Commit() returned a non-abort, non-session-not-found error,
- due to a missing recycle() call.
-- Fix error for WriteStruct with pointers. This fixes a specific check for
- encoding and decoding to pointer types.
-- Fix a GRPCStatus issue that returns a Status that has Unknown code if the
- base error is nil. Now, it always returns a Status based on Code field of
- current error.
-
-## v1.2.0
-
-- Support tracking stacktrace of sessionPool.take() that allows the user
- to instruct the session pool to keep track of the stacktrace of each
- goroutine that checks out a session from the pool. This is disabled by
- default, but it can be enabled by setting
- `SessionPoolConfig.TrackSessionHandles: true`.
-- Add resource-based routing that includes a step to retrieve the
- instance-specific endpoint before creating the session client when
- creating a new spanner client. This is disabled by default, but it can
- be enabled by setting `GOOGLE_CLOUD_SPANNER_ENABLE_RESOURCE_BASED_ROUTING`.
-- Make logger configurable so that the Spanner client can now be configured to
- use a specific logger instead of the standard logger.
-- Support encoding custom types that point back to supported basic types.
-- Allow decoding Spanner values to custom types that point back to supported
- types.
-
-## v1.1.0
-
-- The String() method of NullString, NullTime and NullDate will now return
- an unquoted string instead of a quoted string. This is a BREAKING CHANGE.
- If you relied on the old behavior, please use fmt.Sprintf("%q", T).
-- The Spanner client will now use the new BatchCreateSessions RPC to initialize
- the session pool. This will improve the startup time of clients that are
- initialized with a minimum number of sessions greater than zero
- (i.e. SessionPoolConfig.MinOpened>0).
-- Spanner clients that are created with the NewClient method will now default
- to a minimum of 100 opened sessions in the pool
- (i.e. SessionPoolConfig.MinOpened=100). This will improve the performance
- of the first transaction/query that is executed by an application, as a
- session will normally not have to be created as part of the transaction.
- Spanner clients that are created with the NewClientWithConfig method are
- not affected by this change.
-- Spanner clients that are created with the NewClient method will now default
- to a write sessions fraction of 0.2 in the pool
- (i.e. SessionPoolConfig.WriteSessions=0.2).
- Spanner clients that are created with the NewClientWithConfig method are
- not affected by this change.
-- The session pool maintenance worker has been improved so it keeps better
- track of the actual number of sessions needed. It will now less often delete
- and re-create sessions. This can improve the overall performance of
- applications with a low transaction rate.
-
-## v1.0.0
-
-This is the first tag to carve out spanner as its own module. See:
-https://github.com/golang/go/wiki/Modules#is-it-possible-to-add-a-module-to-a-multi-module-repository.
diff --git a/vendor/cloud.google.com/go/spanner/LICENSE b/vendor/cloud.google.com/go/spanner/LICENSE
deleted file mode 100644
index d64569567..000000000
--- a/vendor/cloud.google.com/go/spanner/LICENSE
+++ /dev/null
@@ -1,202 +0,0 @@
-
- Apache License
- Version 2.0, January 2004
- http://www.apache.org/licenses/
-
- TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
-
- 1. Definitions.
-
- "License" shall mean the terms and conditions for use, reproduction,
- and distribution as defined by Sections 1 through 9 of this document.
-
- "Licensor" shall mean the copyright owner or entity authorized by
- the copyright owner that is granting the License.
-
- "Legal Entity" shall mean the union of the acting entity and all
- other entities that control, are controlled by, or are under common
- control with that entity. For the purposes of this definition,
- "control" means (i) the power, direct or indirect, to cause the
- direction or management of such entity, whether by contract or
- otherwise, or (ii) ownership of fifty percent (50%) or more of the
- outstanding shares, or (iii) beneficial ownership of such entity.
-
- "You" (or "Your") shall mean an individual or Legal Entity
- exercising permissions granted by this License.
-
- "Source" form shall mean the preferred form for making modifications,
- including but not limited to software source code, documentation
- source, and configuration files.
-
- "Object" form shall mean any form resulting from mechanical
- transformation or translation of a Source form, including but
- not limited to compiled object code, generated documentation,
- and conversions to other media types.
-
- "Work" shall mean the work of authorship, whether in Source or
- Object form, made available under the License, as indicated by a
- copyright notice that is included in or attached to the work
- (an example is provided in the Appendix below).
-
- "Derivative Works" shall mean any work, whether in Source or Object
- form, that is based on (or derived from) the Work and for which the
- editorial revisions, annotations, elaborations, or other modifications
- represent, as a whole, an original work of authorship. For the purposes
- of this License, Derivative Works shall not include works that remain
- separable from, or merely link (or bind by name) to the interfaces of,
- the Work and Derivative Works thereof.
-
- "Contribution" shall mean any work of authorship, including
- the original version of the Work and any modifications or additions
- to that Work or Derivative Works thereof, that is intentionally
- submitted to Licensor for inclusion in the Work by the copyright owner
- or by an individual or Legal Entity authorized to submit on behalf of
- the copyright owner. For the purposes of this definition, "submitted"
- means any form of electronic, verbal, or written communication sent
- to the Licensor or its representatives, including but not limited to
- communication on electronic mailing lists, source code control systems,
- and issue tracking systems that are managed by, or on behalf of, the
- Licensor for the purpose of discussing and improving the Work, but
- excluding communication that is conspicuously marked or otherwise
- designated in writing by the copyright owner as "Not a Contribution."
-
- "Contributor" shall mean Licensor and any individual or Legal Entity
- on behalf of whom a Contribution has been received by Licensor and
- subsequently incorporated within the Work.
-
- 2. Grant of Copyright License. Subject to the terms and conditions of
- this License, each Contributor hereby grants to You a perpetual,
- worldwide, non-exclusive, no-charge, royalty-free, irrevocable
- copyright license to reproduce, prepare Derivative Works of,
- publicly display, publicly perform, sublicense, and distribute the
- Work and such Derivative Works in Source or Object form.
-
- 3. Grant of Patent License. Subject to the terms and conditions of
- this License, each Contributor hereby grants to You a perpetual,
- worldwide, non-exclusive, no-charge, royalty-free, irrevocable
- (except as stated in this section) patent license to make, have made,
- use, offer to sell, sell, import, and otherwise transfer the Work,
- where such license applies only to those patent claims licensable
- by such Contributor that are necessarily infringed by their
- Contribution(s) alone or by combination of their Contribution(s)
- with the Work to which such Contribution(s) was submitted. If You
- institute patent litigation against any entity (including a
- cross-claim or counterclaim in a lawsuit) alleging that the Work
- or a Contribution incorporated within the Work constitutes direct
- or contributory patent infringement, then any patent licenses
- granted to You under this License for that Work shall terminate
- as of the date such litigation is filed.
-
- 4. Redistribution. You may reproduce and distribute copies of the
- Work or Derivative Works thereof in any medium, with or without
- modifications, and in Source or Object form, provided that You
- meet the following conditions:
-
- (a) You must give any other recipients of the Work or
- Derivative Works a copy of this License; and
-
- (b) You must cause any modified files to carry prominent notices
- stating that You changed the files; and
-
- (c) You must retain, in the Source form of any Derivative Works
- that You distribute, all copyright, patent, trademark, and
- attribution notices from the Source form of the Work,
- excluding those notices that do not pertain to any part of
- the Derivative Works; and
-
- (d) If the Work includes a "NOTICE" text file as part of its
- distribution, then any Derivative Works that You distribute must
- include a readable copy of the attribution notices contained
- within such NOTICE file, excluding those notices that do not
- pertain to any part of the Derivative Works, in at least one
- of the following places: within a NOTICE text file distributed
- as part of the Derivative Works; within the Source form or
- documentation, if provided along with the Derivative Works; or,
- within a display generated by the Derivative Works, if and
- wherever such third-party notices normally appear. The contents
- of the NOTICE file are for informational purposes only and
- do not modify the License. You may add Your own attribution
- notices within Derivative Works that You distribute, alongside
- or as an addendum to the NOTICE text from the Work, provided
- that such additional attribution notices cannot be construed
- as modifying the License.
-
- You may add Your own copyright statement to Your modifications and
- may provide additional or different license terms and conditions
- for use, reproduction, or distribution of Your modifications, or
- for any such Derivative Works as a whole, provided Your use,
- reproduction, and distribution of the Work otherwise complies with
- the conditions stated in this License.
-
- 5. Submission of Contributions. Unless You explicitly state otherwise,
- any Contribution intentionally submitted for inclusion in the Work
- by You to the Licensor shall be under the terms and conditions of
- this License, without any additional terms or conditions.
- Notwithstanding the above, nothing herein shall supersede or modify
- the terms of any separate license agreement you may have executed
- with Licensor regarding such Contributions.
-
- 6. Trademarks. This License does not grant permission to use the trade
- names, trademarks, service marks, or product names of the Licensor,
- except as required for reasonable and customary use in describing the
- origin of the Work and reproducing the content of the NOTICE file.
-
- 7. Disclaimer of Warranty. Unless required by applicable law or
- agreed to in writing, Licensor provides the Work (and each
- Contributor provides its Contributions) on an "AS IS" BASIS,
- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
- implied, including, without limitation, any warranties or conditions
- of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
- PARTICULAR PURPOSE. You are solely responsible for determining the
- appropriateness of using or redistributing the Work and assume any
- risks associated with Your exercise of permissions under this License.
-
- 8. Limitation of Liability. In no event and under no legal theory,
- whether in tort (including negligence), contract, or otherwise,
- unless required by applicable law (such as deliberate and grossly
- negligent acts) or agreed to in writing, shall any Contributor be
- liable to You for damages, including any direct, indirect, special,
- incidental, or consequential damages of any character arising as a
- result of this License or out of the use or inability to use the
- Work (including but not limited to damages for loss of goodwill,
- work stoppage, computer failure or malfunction, or any and all
- other commercial damages or losses), even if such Contributor
- has been advised of the possibility of such damages.
-
- 9. Accepting Warranty or Additional Liability. While redistributing
- the Work or Derivative Works thereof, You may choose to offer,
- and charge a fee for, acceptance of support, warranty, indemnity,
- or other liability obligations and/or rights consistent with this
- License. However, in accepting such obligations, You may act only
- on Your own behalf and on Your sole responsibility, not on behalf
- of any other Contributor, and only if You agree to indemnify,
- defend, and hold each Contributor harmless for any liability
- incurred by, or claims asserted against, such Contributor by reason
- of your accepting any such warranty or additional liability.
-
- END OF TERMS AND CONDITIONS
-
- APPENDIX: How to apply the Apache License to your work.
-
- To apply the Apache License to your work, attach the following
- boilerplate notice, with the fields enclosed by brackets "[]"
- replaced with your own identifying information. (Don't include
- the brackets!) The text should be enclosed in the appropriate
- comment syntax for the file format. We also recommend that a
- file or class name and description of purpose be included on the
- same "printed page" as the copyright notice for easier
- identification within third-party archives.
-
- Copyright [yyyy] [name of copyright owner]
-
- Licensed under the Apache License, Version 2.0 (the "License");
- you may not use this file except in compliance with the License.
- You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
- Unless required by applicable law or agreed to in writing, software
- distributed under the License is distributed on an "AS IS" BASIS,
- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- See the License for the specific language governing permissions and
- limitations under the License.
diff --git a/vendor/cloud.google.com/go/spanner/README.md b/vendor/cloud.google.com/go/spanner/README.md
deleted file mode 100644
index 04757e9b2..000000000
--- a/vendor/cloud.google.com/go/spanner/README.md
+++ /dev/null
@@ -1,116 +0,0 @@
-## Cloud Spanner [![Go Reference](https://pkg.go.dev/badge/cloud.google.com/go/spanner.svg)](https://pkg.go.dev/cloud.google.com/go/spanner)
-
-- [About Cloud Spanner](https://cloud.google.com/spanner/)
-- [API documentation](https://cloud.google.com/spanner/docs)
-- [Go client documentation](https://pkg.go.dev/cloud.google.com/go/spanner)
-
-### Example Usage
-
-First create a `spanner.Client` to use throughout your application:
-
-[snip]:# (spanner-1)
-```go
-client, err := spanner.NewClient(ctx, "projects/P/instances/I/databases/D")
-if err != nil {
- log.Fatal(err)
-}
-```
-
-[snip]:# (spanner-2)
-```go
-// Simple Reads And Writes
-_, err = client.Apply(ctx, []*spanner.Mutation{
- spanner.Insert("Users",
- []string{"name", "email"},
- []interface{}{"alice", "a@example.com"})})
-if err != nil {
- log.Fatal(err)
-}
-row, err := client.Single().ReadRow(ctx, "Users",
- spanner.Key{"alice"}, []string{"email"})
-if err != nil {
- log.Fatal(err)
-}
-```
-
-### Session Leak
-A `Client` object of the Client Library has a limit on the number of maximum sessions. For example the
-default value of `MaxOpened`, which is the maximum number of sessions allowed by the session pool in the
-Golang Client Library, is 400. You can configure these values at the time of
-creating a `Client` by passing custom `SessionPoolConfig` as part of `ClientConfig`. When all the sessions are checked
-out of the session pool, every new transaction has to wait until a session is returned to the pool.
-If a session is never returned to the pool (hence causing a session leak), the transactions will have to wait
-indefinitely and your application will be blocked.
-
-#### Common Root Causes
-The most common reason for session leaks in the Golang client library are:
-1. Not stopping a `RowIterator` that is returned by `Query`, `Read` and other methods. Always use `RowIterator.Stop()` to ensure that the `RowIterator` is always closed.
-2. Not closing a `ReadOnlyTransaction` when you no longer need it. Always call `ReadOnlyTransaction.Close()` after use, to ensure that the `ReadOnlyTransaction` is always closed.
-
-As shown in the example below, the `txn.Close()` statement releases the session after it is complete.
-If you fail to call `txn.Close()`, the session is not released back to the pool. The recommended way is to use `defer` as shown below.
-```go
-client, err := spanner.NewClient(ctx, "projects/P/instances/I/databases/D")
-if err != nil {
- log.Fatal(err)
-}
-txn := client.ReadOnlyTransaction()
-defer txn.Close()
-```
-
-#### Debugging and Resolving Session Leaks
-
-##### Logging inactive transactions
-This option logs warnings when you have exhausted >95% of your session pool. It is enabled by default.
-This could mean two things; either you need to increase the max sessions in your session pool (as the number
-of queries run using the client side database object is greater than your session pool can serve), or you may
-have a session leak. To help debug which transactions may be causing this session leak, the logs will also contain stack traces of
-transactions which have been running longer than expected if `TrackSessionHandles` under `SessionPoolConfig` is enabled.
-
-```go
-sessionPoolConfig := spanner.SessionPoolConfig{
- TrackSessionHandles: true,
- InactiveTransactionRemovalOptions: spanner.InactiveTransactionRemovalOptions{
- ActionOnInactiveTransaction: spanner.Warn,
- },
-}
-client, err := spanner.NewClientWithConfig(
- ctx, database, spanner.ClientConfig{SessionPoolConfig: sessionPoolConfig},
-)
-if err != nil {
- log.Fatal(err)
-}
-defer client.Close()
-
-// Example Log message to warn presence of long running transactions
-// session <session-info> checked out of pool at <session-checkout-time> is long running due to possible session leak for goroutine
-// <Stack Trace of transaction>
-
-```
-
-##### Automatically clean inactive transactions
-When the option to automatically clean inactive transactions is enabled, the client library will automatically detect
-problematic transactions that are running for a very long time (thus causing session leaks) and close them.
-The session will be removed from the pool and be replaced by a new session. To dig deeper into which transactions are being
-closed, you can check the logs to see the stack trace of the transactions which might be causing these leaks and further
-debug them.
-
-```go
-sessionPoolConfig := spanner.SessionPoolConfig{
- TrackSessionHandles: true,
- InactiveTransactionRemovalOptions: spanner.InactiveTransactionRemovalOptions{
- ActionOnInactiveTransaction: spanner.WarnAndClose,
- },
-}
-client, err := spanner.NewClientWithConfig(
- ctx, database, spanner.ClientConfig{SessionPoolConfig: sessionPoolConfig},
-)
-if err != nil {
-log.Fatal(err)
-}
-defer client.Close()
-
-// Example Log message for when transaction is recycled
-// session <session-info> checked out of pool at <session-checkout-time> is long running and will be removed due to possible session leak for goroutine
-// <Stack Trace of transaction>
-``` \ No newline at end of file
diff --git a/vendor/cloud.google.com/go/spanner/admin/database/apiv1/auxiliary.go b/vendor/cloud.google.com/go/spanner/admin/database/apiv1/auxiliary.go
deleted file mode 100644
index 1c8385f86..000000000
--- a/vendor/cloud.google.com/go/spanner/admin/database/apiv1/auxiliary.go
+++ /dev/null
@@ -1,636 +0,0 @@
-// Copyright 2024 Google LLC
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// https://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-// Code generated by protoc-gen-go_gapic. DO NOT EDIT.
-
-package database
-
-import (
- "context"
- "time"
-
- "cloud.google.com/go/longrunning"
- longrunningpb "cloud.google.com/go/longrunning/autogen/longrunningpb"
- databasepb "cloud.google.com/go/spanner/admin/database/apiv1/databasepb"
- gax "github.com/googleapis/gax-go/v2"
- "google.golang.org/api/iterator"
-)
-
-// CopyBackupOperation manages a long-running operation from CopyBackup.
-type CopyBackupOperation struct {
- lro *longrunning.Operation
- pollPath string
-}
-
-// Wait blocks until the long-running operation is completed, returning the response and any errors encountered.
-//
-// See documentation of Poll for error-handling information.
-func (op *CopyBackupOperation) Wait(ctx context.Context, opts ...gax.CallOption) (*databasepb.Backup, error) {
- opts = append([]gax.CallOption{gax.WithPath(op.pollPath)}, opts...)
- var resp databasepb.Backup
- if err := op.lro.WaitWithInterval(ctx, &resp, time.Minute, opts...); err != nil {
- return nil, err
- }
- return &resp, nil
-}
-
-// Poll fetches the latest state of the long-running operation.
-//
-// Poll also fetches the latest metadata, which can be retrieved by Metadata.
-//
-// If Poll fails, the error is returned and op is unmodified. If Poll succeeds and
-// the operation has completed with failure, the error is returned and op.Done will return true.
-// If Poll succeeds and the operation has completed successfully,
-// op.Done will return true, and the response of the operation is returned.
-// If Poll succeeds and the operation has not completed, the returned response and error are both nil.
-func (op *CopyBackupOperation) Poll(ctx context.Context, opts ...gax.CallOption) (*databasepb.Backup, error) {
- opts = append([]gax.CallOption{gax.WithPath(op.pollPath)}, opts...)
- var resp databasepb.Backup
- if err := op.lro.Poll(ctx, &resp, opts...); err != nil {
- return nil, err
- }
- if !op.Done() {
- return nil, nil
- }
- return &resp, nil
-}
-
-// Metadata returns metadata associated with the long-running operation.
-// Metadata itself does not contact the server, but Poll does.
-// To get the latest metadata, call this method after a successful call to Poll.
-// If the metadata is not available, the returned metadata and error are both nil.
-func (op *CopyBackupOperation) Metadata() (*databasepb.CopyBackupMetadata, error) {
- var meta databasepb.CopyBackupMetadata
- if err := op.lro.Metadata(&meta); err == longrunning.ErrNoMetadata {
- return nil, nil
- } else if err != nil {
- return nil, err
- }
- return &meta, nil
-}
-
-// Done reports whether the long-running operation has completed.
-func (op *CopyBackupOperation) Done() bool {
- return op.lro.Done()
-}
-
-// Name returns the name of the long-running operation.
-// The name is assigned by the server and is unique within the service from which the operation is created.
-func (op *CopyBackupOperation) Name() string {
- return op.lro.Name()
-}
-
-// CreateBackupOperation manages a long-running operation from CreateBackup.
-type CreateBackupOperation struct {
- lro *longrunning.Operation
- pollPath string
-}
-
-// Wait blocks until the long-running operation is completed, returning the response and any errors encountered.
-//
-// See documentation of Poll for error-handling information.
-func (op *CreateBackupOperation) Wait(ctx context.Context, opts ...gax.CallOption) (*databasepb.Backup, error) {
- opts = append([]gax.CallOption{gax.WithPath(op.pollPath)}, opts...)
- var resp databasepb.Backup
- if err := op.lro.WaitWithInterval(ctx, &resp, time.Minute, opts...); err != nil {
- return nil, err
- }
- return &resp, nil
-}
-
-// Poll fetches the latest state of the long-running operation.
-//
-// Poll also fetches the latest metadata, which can be retrieved by Metadata.
-//
-// If Poll fails, the error is returned and op is unmodified. If Poll succeeds and
-// the operation has completed with failure, the error is returned and op.Done will return true.
-// If Poll succeeds and the operation has completed successfully,
-// op.Done will return true, and the response of the operation is returned.
-// If Poll succeeds and the operation has not completed, the returned response and error are both nil.
-func (op *CreateBackupOperation) Poll(ctx context.Context, opts ...gax.CallOption) (*databasepb.Backup, error) {
- opts = append([]gax.CallOption{gax.WithPath(op.pollPath)}, opts...)
- var resp databasepb.Backup
- if err := op.lro.Poll(ctx, &resp, opts...); err != nil {
- return nil, err
- }
- if !op.Done() {
- return nil, nil
- }
- return &resp, nil
-}
-
-// Metadata returns metadata associated with the long-running operation.
-// Metadata itself does not contact the server, but Poll does.
-// To get the latest metadata, call this method after a successful call to Poll.
-// If the metadata is not available, the returned metadata and error are both nil.
-func (op *CreateBackupOperation) Metadata() (*databasepb.CreateBackupMetadata, error) {
- var meta databasepb.CreateBackupMetadata
- if err := op.lro.Metadata(&meta); err == longrunning.ErrNoMetadata {
- return nil, nil
- } else if err != nil {
- return nil, err
- }
- return &meta, nil
-}
-
-// Done reports whether the long-running operation has completed.
-func (op *CreateBackupOperation) Done() bool {
- return op.lro.Done()
-}
-
-// Name returns the name of the long-running operation.
-// The name is assigned by the server and is unique within the service from which the operation is created.
-func (op *CreateBackupOperation) Name() string {
- return op.lro.Name()
-}
-
-// CreateDatabaseOperation manages a long-running operation from CreateDatabase.
-type CreateDatabaseOperation struct {
- lro *longrunning.Operation
- pollPath string
-}
-
-// Wait blocks until the long-running operation is completed, returning the response and any errors encountered.
-//
-// See documentation of Poll for error-handling information.
-func (op *CreateDatabaseOperation) Wait(ctx context.Context, opts ...gax.CallOption) (*databasepb.Database, error) {
- opts = append([]gax.CallOption{gax.WithPath(op.pollPath)}, opts...)
- var resp databasepb.Database
- if err := op.lro.WaitWithInterval(ctx, &resp, time.Minute, opts...); err != nil {
- return nil, err
- }
- return &resp, nil
-}
-
-// Poll fetches the latest state of the long-running operation.
-//
-// Poll also fetches the latest metadata, which can be retrieved by Metadata.
-//
-// If Poll fails, the error is returned and op is unmodified. If Poll succeeds and
-// the operation has completed with failure, the error is returned and op.Done will return true.
-// If Poll succeeds and the operation has completed successfully,
-// op.Done will return true, and the response of the operation is returned.
-// If Poll succeeds and the operation has not completed, the returned response and error are both nil.
-func (op *CreateDatabaseOperation) Poll(ctx context.Context, opts ...gax.CallOption) (*databasepb.Database, error) {
- opts = append([]gax.CallOption{gax.WithPath(op.pollPath)}, opts...)
- var resp databasepb.Database
- if err := op.lro.Poll(ctx, &resp, opts...); err != nil {
- return nil, err
- }
- if !op.Done() {
- return nil, nil
- }
- return &resp, nil
-}
-
-// Metadata returns metadata associated with the long-running operation.
-// Metadata itself does not contact the server, but Poll does.
-// To get the latest metadata, call this method after a successful call to Poll.
-// If the metadata is not available, the returned metadata and error are both nil.
-func (op *CreateDatabaseOperation) Metadata() (*databasepb.CreateDatabaseMetadata, error) {
- var meta databasepb.CreateDatabaseMetadata
- if err := op.lro.Metadata(&meta); err == longrunning.ErrNoMetadata {
- return nil, nil
- } else if err != nil {
- return nil, err
- }
- return &meta, nil
-}
-
-// Done reports whether the long-running operation has completed.
-func (op *CreateDatabaseOperation) Done() bool {
- return op.lro.Done()
-}
-
-// Name returns the name of the long-running operation.
-// The name is assigned by the server and is unique within the service from which the operation is created.
-func (op *CreateDatabaseOperation) Name() string {
- return op.lro.Name()
-}
-
-// RestoreDatabaseOperation manages a long-running operation from RestoreDatabase.
-type RestoreDatabaseOperation struct {
- lro *longrunning.Operation
- pollPath string
-}
-
-// Wait blocks until the long-running operation is completed, returning the response and any errors encountered.
-//
-// See documentation of Poll for error-handling information.
-func (op *RestoreDatabaseOperation) Wait(ctx context.Context, opts ...gax.CallOption) (*databasepb.Database, error) {
- opts = append([]gax.CallOption{gax.WithPath(op.pollPath)}, opts...)
- var resp databasepb.Database
- if err := op.lro.WaitWithInterval(ctx, &resp, time.Minute, opts...); err != nil {
- return nil, err
- }
- return &resp, nil
-}
-
-// Poll fetches the latest state of the long-running operation.
-//
-// Poll also fetches the latest metadata, which can be retrieved by Metadata.
-//
-// If Poll fails, the error is returned and op is unmodified. If Poll succeeds and
-// the operation has completed with failure, the error is returned and op.Done will return true.
-// If Poll succeeds and the operation has completed successfully,
-// op.Done will return true, and the response of the operation is returned.
-// If Poll succeeds and the operation has not completed, the returned response and error are both nil.
-func (op *RestoreDatabaseOperation) Poll(ctx context.Context, opts ...gax.CallOption) (*databasepb.Database, error) {
- opts = append([]gax.CallOption{gax.WithPath(op.pollPath)}, opts...)
- var resp databasepb.Database
- if err := op.lro.Poll(ctx, &resp, opts...); err != nil {
- return nil, err
- }
- if !op.Done() {
- return nil, nil
- }
- return &resp, nil
-}
-
-// Metadata returns metadata associated with the long-running operation.
-// Metadata itself does not contact the server, but Poll does.
-// To get the latest metadata, call this method after a successful call to Poll.
-// If the metadata is not available, the returned metadata and error are both nil.
-func (op *RestoreDatabaseOperation) Metadata() (*databasepb.RestoreDatabaseMetadata, error) {
- var meta databasepb.RestoreDatabaseMetadata
- if err := op.lro.Metadata(&meta); err == longrunning.ErrNoMetadata {
- return nil, nil
- } else if err != nil {
- return nil, err
- }
- return &meta, nil
-}
-
-// Done reports whether the long-running operation has completed.
-func (op *RestoreDatabaseOperation) Done() bool {
- return op.lro.Done()
-}
-
-// Name returns the name of the long-running operation.
-// The name is assigned by the server and is unique within the service from which the operation is created.
-func (op *RestoreDatabaseOperation) Name() string {
- return op.lro.Name()
-}
-
-// UpdateDatabaseDdlOperation manages a long-running operation from UpdateDatabaseDdl.
-type UpdateDatabaseDdlOperation struct {
- lro *longrunning.Operation
- pollPath string
-}
-
-// Wait blocks until the long-running operation is completed, returning the response and any errors encountered.
-//
-// See documentation of Poll for error-handling information.
-func (op *UpdateDatabaseDdlOperation) Wait(ctx context.Context, opts ...gax.CallOption) error {
- opts = append([]gax.CallOption{gax.WithPath(op.pollPath)}, opts...)
- return op.lro.WaitWithInterval(ctx, nil, time.Minute, opts...)
-}
-
-// Poll fetches the latest state of the long-running operation.
-//
-// Poll also fetches the latest metadata, which can be retrieved by Metadata.
-//
-// If Poll fails, the error is returned and op is unmodified. If Poll succeeds and
-// the operation has completed with failure, the error is returned and op.Done will return true.
-// If Poll succeeds and the operation has completed successfully,
-// op.Done will return true, and the response of the operation is returned.
-// If Poll succeeds and the operation has not completed, the returned response and error are both nil.
-func (op *UpdateDatabaseDdlOperation) Poll(ctx context.Context, opts ...gax.CallOption) error {
- opts = append([]gax.CallOption{gax.WithPath(op.pollPath)}, opts...)
- return op.lro.Poll(ctx, nil, opts...)
-}
-
-// Metadata returns metadata associated with the long-running operation.
-// Metadata itself does not contact the server, but Poll does.
-// To get the latest metadata, call this method after a successful call to Poll.
-// If the metadata is not available, the returned metadata and error are both nil.
-func (op *UpdateDatabaseDdlOperation) Metadata() (*databasepb.UpdateDatabaseDdlMetadata, error) {
- var meta databasepb.UpdateDatabaseDdlMetadata
- if err := op.lro.Metadata(&meta); err == longrunning.ErrNoMetadata {
- return nil, nil
- } else if err != nil {
- return nil, err
- }
- return &meta, nil
-}
-
-// Done reports whether the long-running operation has completed.
-func (op *UpdateDatabaseDdlOperation) Done() bool {
- return op.lro.Done()
-}
-
-// Name returns the name of the long-running operation.
-// The name is assigned by the server and is unique within the service from which the operation is created.
-func (op *UpdateDatabaseDdlOperation) Name() string {
- return op.lro.Name()
-}
-
-// UpdateDatabaseOperation manages a long-running operation from UpdateDatabase.
-type UpdateDatabaseOperation struct {
- lro *longrunning.Operation
- pollPath string
-}
-
-// Wait blocks until the long-running operation is completed, returning the response and any errors encountered.
-//
-// See documentation of Poll for error-handling information.
-func (op *UpdateDatabaseOperation) Wait(ctx context.Context, opts ...gax.CallOption) (*databasepb.Database, error) {
- opts = append([]gax.CallOption{gax.WithPath(op.pollPath)}, opts...)
- var resp databasepb.Database
- if err := op.lro.WaitWithInterval(ctx, &resp, time.Minute, opts...); err != nil {
- return nil, err
- }
- return &resp, nil
-}
-
-// Poll fetches the latest state of the long-running operation.
-//
-// Poll also fetches the latest metadata, which can be retrieved by Metadata.
-//
-// If Poll fails, the error is returned and op is unmodified. If Poll succeeds and
-// the operation has completed with failure, the error is returned and op.Done will return true.
-// If Poll succeeds and the operation has completed successfully,
-// op.Done will return true, and the response of the operation is returned.
-// If Poll succeeds and the operation has not completed, the returned response and error are both nil.
-func (op *UpdateDatabaseOperation) Poll(ctx context.Context, opts ...gax.CallOption) (*databasepb.Database, error) {
- opts = append([]gax.CallOption{gax.WithPath(op.pollPath)}, opts...)
- var resp databasepb.Database
- if err := op.lro.Poll(ctx, &resp, opts...); err != nil {
- return nil, err
- }
- if !op.Done() {
- return nil, nil
- }
- return &resp, nil
-}
-
-// Metadata returns metadata associated with the long-running operation.
-// Metadata itself does not contact the server, but Poll does.
-// To get the latest metadata, call this method after a successful call to Poll.
-// If the metadata is not available, the returned metadata and error are both nil.
-func (op *UpdateDatabaseOperation) Metadata() (*databasepb.UpdateDatabaseMetadata, error) {
- var meta databasepb.UpdateDatabaseMetadata
- if err := op.lro.Metadata(&meta); err == longrunning.ErrNoMetadata {
- return nil, nil
- } else if err != nil {
- return nil, err
- }
- return &meta, nil
-}
-
-// Done reports whether the long-running operation has completed.
-func (op *UpdateDatabaseOperation) Done() bool {
- return op.lro.Done()
-}
-
-// Name returns the name of the long-running operation.
-// The name is assigned by the server and is unique within the service from which the operation is created.
-func (op *UpdateDatabaseOperation) Name() string {
- return op.lro.Name()
-}
-
-// BackupIterator manages a stream of *databasepb.Backup.
-type BackupIterator struct {
- items []*databasepb.Backup
- pageInfo *iterator.PageInfo
- nextFunc func() error
-
- // Response is the raw response for the current page.
- // It must be cast to the RPC response type.
- // Calling Next() or InternalFetch() updates this value.
- Response interface{}
-
- // InternalFetch is for use by the Google Cloud Libraries only.
- // It is not part of the stable interface of this package.
- //
- // InternalFetch returns results from a single call to the underlying RPC.
- // The number of results is no greater than pageSize.
- // If there are no more results, nextPageToken is empty and err is nil.
- InternalFetch func(pageSize int, pageToken string) (results []*databasepb.Backup, nextPageToken string, err error)
-}
-
-// PageInfo supports pagination. See the google.golang.org/api/iterator package for details.
-func (it *BackupIterator) PageInfo() *iterator.PageInfo {
- return it.pageInfo
-}
-
-// Next returns the next result. Its second return value is iterator.Done if there are no more
-// results. Once Next returns Done, all subsequent calls will return Done.
-func (it *BackupIterator) Next() (*databasepb.Backup, error) {
- var item *databasepb.Backup
- if err := it.nextFunc(); err != nil {
- return item, err
- }
- item = it.items[0]
- it.items = it.items[1:]
- return item, nil
-}
-
-func (it *BackupIterator) bufLen() int {
- return len(it.items)
-}
-
-func (it *BackupIterator) takeBuf() interface{} {
- b := it.items
- it.items = nil
- return b
-}
-
-// BackupScheduleIterator manages a stream of *databasepb.BackupSchedule.
-type BackupScheduleIterator struct {
- items []*databasepb.BackupSchedule
- pageInfo *iterator.PageInfo
- nextFunc func() error
-
- // Response is the raw response for the current page.
- // It must be cast to the RPC response type.
- // Calling Next() or InternalFetch() updates this value.
- Response interface{}
-
- // InternalFetch is for use by the Google Cloud Libraries only.
- // It is not part of the stable interface of this package.
- //
- // InternalFetch returns results from a single call to the underlying RPC.
- // The number of results is no greater than pageSize.
- // If there are no more results, nextPageToken is empty and err is nil.
- InternalFetch func(pageSize int, pageToken string) (results []*databasepb.BackupSchedule, nextPageToken string, err error)
-}
-
-// PageInfo supports pagination. See the google.golang.org/api/iterator package for details.
-func (it *BackupScheduleIterator) PageInfo() *iterator.PageInfo {
- return it.pageInfo
-}
-
-// Next returns the next result. Its second return value is iterator.Done if there are no more
-// results. Once Next returns Done, all subsequent calls will return Done.
-func (it *BackupScheduleIterator) Next() (*databasepb.BackupSchedule, error) {
- var item *databasepb.BackupSchedule
- if err := it.nextFunc(); err != nil {
- return item, err
- }
- item = it.items[0]
- it.items = it.items[1:]
- return item, nil
-}
-
-func (it *BackupScheduleIterator) bufLen() int {
- return len(it.items)
-}
-
-func (it *BackupScheduleIterator) takeBuf() interface{} {
- b := it.items
- it.items = nil
- return b
-}
-
-// DatabaseIterator manages a stream of *databasepb.Database.
-type DatabaseIterator struct {
- items []*databasepb.Database
- pageInfo *iterator.PageInfo
- nextFunc func() error
-
- // Response is the raw response for the current page.
- // It must be cast to the RPC response type.
- // Calling Next() or InternalFetch() updates this value.
- Response interface{}
-
- // InternalFetch is for use by the Google Cloud Libraries only.
- // It is not part of the stable interface of this package.
- //
- // InternalFetch returns results from a single call to the underlying RPC.
- // The number of results is no greater than pageSize.
- // If there are no more results, nextPageToken is empty and err is nil.
- InternalFetch func(pageSize int, pageToken string) (results []*databasepb.Database, nextPageToken string, err error)
-}
-
-// PageInfo supports pagination. See the google.golang.org/api/iterator package for details.
-func (it *DatabaseIterator) PageInfo() *iterator.PageInfo {
- return it.pageInfo
-}
-
-// Next returns the next result. Its second return value is iterator.Done if there are no more
-// results. Once Next returns Done, all subsequent calls will return Done.
-func (it *DatabaseIterator) Next() (*databasepb.Database, error) {
- var item *databasepb.Database
- if err := it.nextFunc(); err != nil {
- return item, err
- }
- item = it.items[0]
- it.items = it.items[1:]
- return item, nil
-}
-
-func (it *DatabaseIterator) bufLen() int {
- return len(it.items)
-}
-
-func (it *DatabaseIterator) takeBuf() interface{} {
- b := it.items
- it.items = nil
- return b
-}
-
-// DatabaseRoleIterator manages a stream of *databasepb.DatabaseRole.
-type DatabaseRoleIterator struct {
- items []*databasepb.DatabaseRole
- pageInfo *iterator.PageInfo
- nextFunc func() error
-
- // Response is the raw response for the current page.
- // It must be cast to the RPC response type.
- // Calling Next() or InternalFetch() updates this value.
- Response interface{}
-
- // InternalFetch is for use by the Google Cloud Libraries only.
- // It is not part of the stable interface of this package.
- //
- // InternalFetch returns results from a single call to the underlying RPC.
- // The number of results is no greater than pageSize.
- // If there are no more results, nextPageToken is empty and err is nil.
- InternalFetch func(pageSize int, pageToken string) (results []*databasepb.DatabaseRole, nextPageToken string, err error)
-}
-
-// PageInfo supports pagination. See the google.golang.org/api/iterator package for details.
-func (it *DatabaseRoleIterator) PageInfo() *iterator.PageInfo {
- return it.pageInfo
-}
-
-// Next returns the next result. Its second return value is iterator.Done if there are no more
-// results. Once Next returns Done, all subsequent calls will return Done.
-func (it *DatabaseRoleIterator) Next() (*databasepb.DatabaseRole, error) {
- var item *databasepb.DatabaseRole
- if err := it.nextFunc(); err != nil {
- return item, err
- }
- item = it.items[0]
- it.items = it.items[1:]
- return item, nil
-}
-
-func (it *DatabaseRoleIterator) bufLen() int {
- return len(it.items)
-}
-
-func (it *DatabaseRoleIterator) takeBuf() interface{} {
- b := it.items
- it.items = nil
- return b
-}
-
-// OperationIterator manages a stream of *longrunningpb.Operation.
-type OperationIterator struct {
- items []*longrunningpb.Operation
- pageInfo *iterator.PageInfo
- nextFunc func() error
-
- // Response is the raw response for the current page.
- // It must be cast to the RPC response type.
- // Calling Next() or InternalFetch() updates this value.
- Response interface{}
-
- // InternalFetch is for use by the Google Cloud Libraries only.
- // It is not part of the stable interface of this package.
- //
- // InternalFetch returns results from a single call to the underlying RPC.
- // The number of results is no greater than pageSize.
- // If there are no more results, nextPageToken is empty and err is nil.
- InternalFetch func(pageSize int, pageToken string) (results []*longrunningpb.Operation, nextPageToken string, err error)
-}
-
-// PageInfo supports pagination. See the google.golang.org/api/iterator package for details.
-func (it *OperationIterator) PageInfo() *iterator.PageInfo {
- return it.pageInfo
-}
-
-// Next returns the next result. Its second return value is iterator.Done if there are no more
-// results. Once Next returns Done, all subsequent calls will return Done.
-func (it *OperationIterator) Next() (*longrunningpb.Operation, error) {
- var item *longrunningpb.Operation
- if err := it.nextFunc(); err != nil {
- return item, err
- }
- item = it.items[0]
- it.items = it.items[1:]
- return item, nil
-}
-
-func (it *OperationIterator) bufLen() int {
- return len(it.items)
-}
-
-func (it *OperationIterator) takeBuf() interface{} {
- b := it.items
- it.items = nil
- return b
-}
diff --git a/vendor/cloud.google.com/go/spanner/admin/database/apiv1/backup.go b/vendor/cloud.google.com/go/spanner/admin/database/apiv1/backup.go
deleted file mode 100644
index 648c7f88e..000000000
--- a/vendor/cloud.google.com/go/spanner/admin/database/apiv1/backup.go
+++ /dev/null
@@ -1,61 +0,0 @@
-/*
-Copyright 2020 Google LLC
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-*/
-package database
-
-import (
- "context"
- "fmt"
- "regexp"
- "time"
-
- "cloud.google.com/go/spanner/admin/database/apiv1/databasepb"
- "github.com/googleapis/gax-go/v2"
- pbt "google.golang.org/protobuf/types/known/timestamppb"
-)
-
-var (
- validDBPattern = regexp.MustCompile("^projects/(?P<project>[^/]+)/instances/(?P<instance>[^/]+)/databases/(?P<database>[^/]+)$")
-)
-
-// StartBackupOperation creates a backup of the given database. It will be stored
-// as projects/<project>/instances/<instance>/backups/<backupID>. The
-// backup will be automatically deleted by Cloud Spanner after its expiration.
-//
-// backupID must be unique across an instance.
-//
-// expireTime is the time the backup will expire. It is respected to
-// microsecond granularity.
-//
-// databasePath must have the form
-// projects/<project>/instances/<instance>/databases/<database>.
-func (c *DatabaseAdminClient) StartBackupOperation(ctx context.Context, backupID string, databasePath string, expireTime time.Time, opts ...gax.CallOption) (*CreateBackupOperation, error) {
- m := validDBPattern.FindStringSubmatch(databasePath)
- if m == nil {
- return nil, fmt.Errorf("database name %q should conform to pattern %q",
- databasePath, validDBPattern)
- }
- ts := &pbt.Timestamp{Seconds: expireTime.Unix(), Nanos: int32(expireTime.Nanosecond())}
- // Create request from parameters.
- req := &databasepb.CreateBackupRequest{
- Parent: fmt.Sprintf("projects/%s/instances/%s", m[1], m[2]),
- BackupId: backupID,
- Backup: &databasepb.Backup{
- Database: databasePath,
- ExpireTime: ts,
- },
- }
- return c.CreateBackup(ctx, req, opts...)
-}
diff --git a/vendor/cloud.google.com/go/spanner/admin/database/apiv1/database.go b/vendor/cloud.google.com/go/spanner/admin/database/apiv1/database.go
deleted file mode 100644
index f68a4cb65..000000000
--- a/vendor/cloud.google.com/go/spanner/admin/database/apiv1/database.go
+++ /dev/null
@@ -1,120 +0,0 @@
-// Copyright 2020 Google LLC
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// https://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package database
-
-import (
- "context"
- "fmt"
- "regexp"
- "strings"
- "time"
- "unicode"
-
- "cloud.google.com/go/longrunning/autogen/longrunningpb"
- "cloud.google.com/go/spanner/admin/database/apiv1/databasepb"
- "github.com/googleapis/gax-go/v2"
- "google.golang.org/api/iterator"
- "google.golang.org/grpc/codes"
- "google.golang.org/grpc/status"
-)
-
-var retryer = gax.OnCodes(
- []codes.Code{codes.DeadlineExceeded, codes.Unavailable},
- gax.Backoff{Initial: time.Millisecond, Max: time.Millisecond, Multiplier: 1.0},
-)
-
-// CreateDatabaseWithRetry creates a new database and retries the call if the
-// backend returns a retryable error. The actual CreateDatabase RPC is only
-// retried if the initial call did not reach the server. In other cases, the
-// client will query the backend for the long-running operation that was
-// created by the initial RPC and return that operation.
-func (c *DatabaseAdminClient) CreateDatabaseWithRetry(ctx context.Context, req *databasepb.CreateDatabaseRequest, opts ...gax.CallOption) (*CreateDatabaseOperation, error) {
- for {
- db, createErr := c.CreateDatabase(ctx, req, opts...)
- if createErr == nil {
- return db, nil
- }
- // Failed, check whether we should retry.
- delay, shouldRetry := retryer.Retry(createErr)
- if !shouldRetry {
- return nil, createErr
- }
- if err := gax.Sleep(ctx, delay); err != nil {
- return nil, err
- }
- // Extract the name of the database.
- dbName := extractDBName(req.CreateStatement)
- // Query the backend for any corresponding long-running operation to
- // determine whether we should retry the RPC or not.
- iter := c.ListDatabaseOperations(ctx, &databasepb.ListDatabaseOperationsRequest{
- Parent: req.Parent,
- Filter: fmt.Sprintf("(metadata.@type:type.googleapis.com/google.spanner.admin.database.v1.CreateDatabaseMetadata) AND (name:%s/databases/%s/operations/)", req.Parent, dbName),
- }, opts...)
- var mostRecentOp *longrunningpb.Operation
- for {
- op, err := iter.Next()
- if err == iterator.Done {
- break
- }
- if err != nil {
- return nil, err
- }
- // A running operation is the most recent and should be returned.
- if !op.Done {
- return c.CreateDatabaseOperation(op.Name), nil
- }
- if op.GetError() == nil {
- mostRecentOp = op
- }
- }
- if mostRecentOp == nil {
- continue
- }
- // Only finished operations found. Check whether the database exists.
- _, getErr := c.GetDatabase(ctx, &databasepb.GetDatabaseRequest{
- Name: fmt.Sprintf("%s/databases/%s", req.Parent, dbName),
- })
- if getErr == nil {
- // Database found, return one of the long-running operations that
- // has finished, which again should return the database.
- return c.CreateDatabaseOperation(mostRecentOp.Name), nil
- }
- if status.Code(getErr) == codes.NotFound {
- continue
- }
- // Error getting the database that was not NotFound.
- return nil, getErr
- }
-}
-
-var dbNameRegEx = regexp.MustCompile("\\s*CREATE\\s+DATABASE\\s+(.+)\\s*")
-
-// extractDBName extracts the database name from a valid CREATE DATABASE <db>
-// statement. We don't have to worry about invalid create statements, as those
-// should already have been handled by the backend and should return a non-
-// retryable error.
-func extractDBName(createStatement string) string {
- if dbNameRegEx.MatchString(createStatement) {
- namePossiblyWithQuotes := strings.TrimRightFunc(dbNameRegEx.FindStringSubmatch(createStatement)[1], unicode.IsSpace)
- if len(namePossiblyWithQuotes) > 0 && namePossiblyWithQuotes[0] == '`' {
- if len(namePossiblyWithQuotes) > 5 && namePossiblyWithQuotes[1] == '`' && namePossiblyWithQuotes[2] == '`' {
- return string(namePossiblyWithQuotes[3 : len(namePossiblyWithQuotes)-3])
- }
- return string(namePossiblyWithQuotes[1 : len(namePossiblyWithQuotes)-1])
- }
- return string(namePossiblyWithQuotes)
- }
- return ""
-}
diff --git a/vendor/cloud.google.com/go/spanner/admin/database/apiv1/database_admin_client.go b/vendor/cloud.google.com/go/spanner/admin/database/apiv1/database_admin_client.go
deleted file mode 100644
index 1af1d1ed5..000000000
--- a/vendor/cloud.google.com/go/spanner/admin/database/apiv1/database_admin_client.go
+++ /dev/null
@@ -1,4194 +0,0 @@
-// Copyright 2024 Google LLC
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// https://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-// Code generated by protoc-gen-go_gapic. DO NOT EDIT.
-
-package database
-
-import (
- "bytes"
- "context"
- "fmt"
- "io"
- "math"
- "net/http"
- "net/url"
- "time"
-
- iampb "cloud.google.com/go/iam/apiv1/iampb"
- "cloud.google.com/go/longrunning"
- lroauto "cloud.google.com/go/longrunning/autogen"
- longrunningpb "cloud.google.com/go/longrunning/autogen/longrunningpb"
- databasepb "cloud.google.com/go/spanner/admin/database/apiv1/databasepb"
- gax "github.com/googleapis/gax-go/v2"
- "google.golang.org/api/googleapi"
- "google.golang.org/api/iterator"
- "google.golang.org/api/option"
- "google.golang.org/api/option/internaloption"
- gtransport "google.golang.org/api/transport/grpc"
- httptransport "google.golang.org/api/transport/http"
- "google.golang.org/grpc"
- "google.golang.org/grpc/codes"
- "google.golang.org/protobuf/encoding/protojson"
- "google.golang.org/protobuf/proto"
-)
-
-var newDatabaseAdminClientHook clientHook
-
-// DatabaseAdminCallOptions contains the retry settings for each method of DatabaseAdminClient.
-type DatabaseAdminCallOptions struct {
- ListDatabases []gax.CallOption
- CreateDatabase []gax.CallOption
- GetDatabase []gax.CallOption
- UpdateDatabase []gax.CallOption
- UpdateDatabaseDdl []gax.CallOption
- DropDatabase []gax.CallOption
- GetDatabaseDdl []gax.CallOption
- SetIamPolicy []gax.CallOption
- GetIamPolicy []gax.CallOption
- TestIamPermissions []gax.CallOption
- CreateBackup []gax.CallOption
- CopyBackup []gax.CallOption
- GetBackup []gax.CallOption
- UpdateBackup []gax.CallOption
- DeleteBackup []gax.CallOption
- ListBackups []gax.CallOption
- RestoreDatabase []gax.CallOption
- ListDatabaseOperations []gax.CallOption
- ListBackupOperations []gax.CallOption
- ListDatabaseRoles []gax.CallOption
- CreateBackupSchedule []gax.CallOption
- GetBackupSchedule []gax.CallOption
- UpdateBackupSchedule []gax.CallOption
- DeleteBackupSchedule []gax.CallOption
- ListBackupSchedules []gax.CallOption
- CancelOperation []gax.CallOption
- DeleteOperation []gax.CallOption
- GetOperation []gax.CallOption
- ListOperations []gax.CallOption
-}
-
-func defaultDatabaseAdminGRPCClientOptions() []option.ClientOption {
- return []option.ClientOption{
- internaloption.WithDefaultEndpoint("spanner.googleapis.com:443"),
- internaloption.WithDefaultEndpointTemplate("spanner.UNIVERSE_DOMAIN:443"),
- internaloption.WithDefaultMTLSEndpoint("spanner.mtls.googleapis.com:443"),
- internaloption.WithDefaultUniverseDomain("googleapis.com"),
- internaloption.WithDefaultAudience("https://spanner.googleapis.com/"),
- internaloption.WithDefaultScopes(DefaultAuthScopes()...),
- internaloption.EnableJwtWithScope(),
- option.WithGRPCDialOption(grpc.WithDefaultCallOptions(
- grpc.MaxCallRecvMsgSize(math.MaxInt32))),
- }
-}
-
-func defaultDatabaseAdminCallOptions() *DatabaseAdminCallOptions {
- return &DatabaseAdminCallOptions{
- ListDatabases: []gax.CallOption{
- gax.WithTimeout(3600000 * time.Millisecond),
- gax.WithRetry(func() gax.Retryer {
- return gax.OnCodes([]codes.Code{
- codes.Unavailable,
- codes.DeadlineExceeded,
- }, gax.Backoff{
- Initial: 1000 * time.Millisecond,
- Max: 32000 * time.Millisecond,
- Multiplier: 1.30,
- })
- }),
- },
- CreateDatabase: []gax.CallOption{
- gax.WithTimeout(3600000 * time.Millisecond),
- },
- GetDatabase: []gax.CallOption{
- gax.WithTimeout(3600000 * time.Millisecond),
- gax.WithRetry(func() gax.Retryer {
- return gax.OnCodes([]codes.Code{
- codes.Unavailable,
- codes.DeadlineExceeded,
- }, gax.Backoff{
- Initial: 1000 * time.Millisecond,
- Max: 32000 * time.Millisecond,
- Multiplier: 1.30,
- })
- }),
- },
- UpdateDatabase: []gax.CallOption{
- gax.WithTimeout(3600000 * time.Millisecond),
- gax.WithRetry(func() gax.Retryer {
- return gax.OnCodes([]codes.Code{
- codes.Unavailable,
- codes.DeadlineExceeded,
- }, gax.Backoff{
- Initial: 1000 * time.Millisecond,
- Max: 32000 * time.Millisecond,
- Multiplier: 1.30,
- })
- }),
- },
- UpdateDatabaseDdl: []gax.CallOption{
- gax.WithTimeout(3600000 * time.Millisecond),
- gax.WithRetry(func() gax.Retryer {
- return gax.OnCodes([]codes.Code{
- codes.Unavailable,
- codes.DeadlineExceeded,
- }, gax.Backoff{
- Initial: 1000 * time.Millisecond,
- Max: 32000 * time.Millisecond,
- Multiplier: 1.30,
- })
- }),
- },
- DropDatabase: []gax.CallOption{
- gax.WithTimeout(3600000 * time.Millisecond),
- gax.WithRetry(func() gax.Retryer {
- return gax.OnCodes([]codes.Code{
- codes.Unavailable,
- codes.DeadlineExceeded,
- }, gax.Backoff{
- Initial: 1000 * time.Millisecond,
- Max: 32000 * time.Millisecond,
- Multiplier: 1.30,
- })
- }),
- },
- GetDatabaseDdl: []gax.CallOption{
- gax.WithTimeout(3600000 * time.Millisecond),
- gax.WithRetry(func() gax.Retryer {
- return gax.OnCodes([]codes.Code{
- codes.Unavailable,
- codes.DeadlineExceeded,
- }, gax.Backoff{
- Initial: 1000 * time.Millisecond,
- Max: 32000 * time.Millisecond,
- Multiplier: 1.30,
- })
- }),
- },
- SetIamPolicy: []gax.CallOption{
- gax.WithTimeout(30000 * time.Millisecond),
- },
- GetIamPolicy: []gax.CallOption{
- gax.WithTimeout(30000 * time.Millisecond),
- gax.WithRetry(func() gax.Retryer {
- return gax.OnCodes([]codes.Code{
- codes.Unavailable,
- codes.DeadlineExceeded,
- }, gax.Backoff{
- Initial: 1000 * time.Millisecond,
- Max: 32000 * time.Millisecond,
- Multiplier: 1.30,
- })
- }),
- },
- TestIamPermissions: []gax.CallOption{
- gax.WithTimeout(30000 * time.Millisecond),
- },
- CreateBackup: []gax.CallOption{
- gax.WithTimeout(3600000 * time.Millisecond),
- },
- CopyBackup: []gax.CallOption{
- gax.WithTimeout(3600000 * time.Millisecond),
- },
- GetBackup: []gax.CallOption{
- gax.WithTimeout(3600000 * time.Millisecond),
- gax.WithRetry(func() gax.Retryer {
- return gax.OnCodes([]codes.Code{
- codes.Unavailable,
- codes.DeadlineExceeded,
- }, gax.Backoff{
- Initial: 1000 * time.Millisecond,
- Max: 32000 * time.Millisecond,
- Multiplier: 1.30,
- })
- }),
- },
- UpdateBackup: []gax.CallOption{
- gax.WithTimeout(3600000 * time.Millisecond),
- gax.WithRetry(func() gax.Retryer {
- return gax.OnCodes([]codes.Code{
- codes.Unavailable,
- codes.DeadlineExceeded,
- }, gax.Backoff{
- Initial: 1000 * time.Millisecond,
- Max: 32000 * time.Millisecond,
- Multiplier: 1.30,
- })
- }),
- },
- DeleteBackup: []gax.CallOption{
- gax.WithTimeout(3600000 * time.Millisecond),
- gax.WithRetry(func() gax.Retryer {
- return gax.OnCodes([]codes.Code{
- codes.Unavailable,
- codes.DeadlineExceeded,
- }, gax.Backoff{
- Initial: 1000 * time.Millisecond,
- Max: 32000 * time.Millisecond,
- Multiplier: 1.30,
- })
- }),
- },
- ListBackups: []gax.CallOption{
- gax.WithTimeout(3600000 * time.Millisecond),
- gax.WithRetry(func() gax.Retryer {
- return gax.OnCodes([]codes.Code{
- codes.Unavailable,
- codes.DeadlineExceeded,
- }, gax.Backoff{
- Initial: 1000 * time.Millisecond,
- Max: 32000 * time.Millisecond,
- Multiplier: 1.30,
- })
- }),
- },
- RestoreDatabase: []gax.CallOption{
- gax.WithTimeout(3600000 * time.Millisecond),
- },
- ListDatabaseOperations: []gax.CallOption{
- gax.WithTimeout(3600000 * time.Millisecond),
- gax.WithRetry(func() gax.Retryer {
- return gax.OnCodes([]codes.Code{
- codes.Unavailable,
- codes.DeadlineExceeded,
- }, gax.Backoff{
- Initial: 1000 * time.Millisecond,
- Max: 32000 * time.Millisecond,
- Multiplier: 1.30,
- })
- }),
- },
- ListBackupOperations: []gax.CallOption{
- gax.WithTimeout(3600000 * time.Millisecond),
- gax.WithRetry(func() gax.Retryer {
- return gax.OnCodes([]codes.Code{
- codes.Unavailable,
- codes.DeadlineExceeded,
- }, gax.Backoff{
- Initial: 1000 * time.Millisecond,
- Max: 32000 * time.Millisecond,
- Multiplier: 1.30,
- })
- }),
- },
- ListDatabaseRoles: []gax.CallOption{
- gax.WithTimeout(3600000 * time.Millisecond),
- gax.WithRetry(func() gax.Retryer {
- return gax.OnCodes([]codes.Code{
- codes.Unavailable,
- codes.DeadlineExceeded,
- }, gax.Backoff{
- Initial: 1000 * time.Millisecond,
- Max: 32000 * time.Millisecond,
- Multiplier: 1.30,
- })
- }),
- },
- CreateBackupSchedule: []gax.CallOption{
- gax.WithTimeout(3600000 * time.Millisecond),
- gax.WithRetry(func() gax.Retryer {
- return gax.OnCodes([]codes.Code{
- codes.Unavailable,
- codes.DeadlineExceeded,
- }, gax.Backoff{
- Initial: 1000 * time.Millisecond,
- Max: 32000 * time.Millisecond,
- Multiplier: 1.30,
- })
- }),
- },
- GetBackupSchedule: []gax.CallOption{
- gax.WithTimeout(3600000 * time.Millisecond),
- gax.WithRetry(func() gax.Retryer {
- return gax.OnCodes([]codes.Code{
- codes.Unavailable,
- codes.DeadlineExceeded,
- }, gax.Backoff{
- Initial: 1000 * time.Millisecond,
- Max: 32000 * time.Millisecond,
- Multiplier: 1.30,
- })
- }),
- },
- UpdateBackupSchedule: []gax.CallOption{
- gax.WithTimeout(3600000 * time.Millisecond),
- gax.WithRetry(func() gax.Retryer {
- return gax.OnCodes([]codes.Code{
- codes.Unavailable,
- codes.DeadlineExceeded,
- }, gax.Backoff{
- Initial: 1000 * time.Millisecond,
- Max: 32000 * time.Millisecond,
- Multiplier: 1.30,
- })
- }),
- },
- DeleteBackupSchedule: []gax.CallOption{
- gax.WithTimeout(3600000 * time.Millisecond),
- gax.WithRetry(func() gax.Retryer {
- return gax.OnCodes([]codes.Code{
- codes.Unavailable,
- codes.DeadlineExceeded,
- }, gax.Backoff{
- Initial: 1000 * time.Millisecond,
- Max: 32000 * time.Millisecond,
- Multiplier: 1.30,
- })
- }),
- },
- ListBackupSchedules: []gax.CallOption{
- gax.WithTimeout(3600000 * time.Millisecond),
- gax.WithRetry(func() gax.Retryer {
- return gax.OnCodes([]codes.Code{
- codes.Unavailable,
- codes.DeadlineExceeded,
- }, gax.Backoff{
- Initial: 1000 * time.Millisecond,
- Max: 32000 * time.Millisecond,
- Multiplier: 1.30,
- })
- }),
- },
- CancelOperation: []gax.CallOption{},
- DeleteOperation: []gax.CallOption{},
- GetOperation: []gax.CallOption{},
- ListOperations: []gax.CallOption{},
- }
-}
-
-func defaultDatabaseAdminRESTCallOptions() *DatabaseAdminCallOptions {
- return &DatabaseAdminCallOptions{
- ListDatabases: []gax.CallOption{
- gax.WithTimeout(3600000 * time.Millisecond),
- gax.WithRetry(func() gax.Retryer {
- return gax.OnHTTPCodes(gax.Backoff{
- Initial: 1000 * time.Millisecond,
- Max: 32000 * time.Millisecond,
- Multiplier: 1.30,
- },
- http.StatusServiceUnavailable,
- http.StatusGatewayTimeout)
- }),
- },
- CreateDatabase: []gax.CallOption{
- gax.WithTimeout(3600000 * time.Millisecond),
- },
- GetDatabase: []gax.CallOption{
- gax.WithTimeout(3600000 * time.Millisecond),
- gax.WithRetry(func() gax.Retryer {
- return gax.OnHTTPCodes(gax.Backoff{
- Initial: 1000 * time.Millisecond,
- Max: 32000 * time.Millisecond,
- Multiplier: 1.30,
- },
- http.StatusServiceUnavailable,
- http.StatusGatewayTimeout)
- }),
- },
- UpdateDatabase: []gax.CallOption{
- gax.WithTimeout(3600000 * time.Millisecond),
- gax.WithRetry(func() gax.Retryer {
- return gax.OnHTTPCodes(gax.Backoff{
- Initial: 1000 * time.Millisecond,
- Max: 32000 * time.Millisecond,
- Multiplier: 1.30,
- },
- http.StatusServiceUnavailable,
- http.StatusGatewayTimeout)
- }),
- },
- UpdateDatabaseDdl: []gax.CallOption{
- gax.WithTimeout(3600000 * time.Millisecond),
- gax.WithRetry(func() gax.Retryer {
- return gax.OnHTTPCodes(gax.Backoff{
- Initial: 1000 * time.Millisecond,
- Max: 32000 * time.Millisecond,
- Multiplier: 1.30,
- },
- http.StatusServiceUnavailable,
- http.StatusGatewayTimeout)
- }),
- },
- DropDatabase: []gax.CallOption{
- gax.WithTimeout(3600000 * time.Millisecond),
- gax.WithRetry(func() gax.Retryer {
- return gax.OnHTTPCodes(gax.Backoff{
- Initial: 1000 * time.Millisecond,
- Max: 32000 * time.Millisecond,
- Multiplier: 1.30,
- },
- http.StatusServiceUnavailable,
- http.StatusGatewayTimeout)
- }),
- },
- GetDatabaseDdl: []gax.CallOption{
- gax.WithTimeout(3600000 * time.Millisecond),
- gax.WithRetry(func() gax.Retryer {
- return gax.OnHTTPCodes(gax.Backoff{
- Initial: 1000 * time.Millisecond,
- Max: 32000 * time.Millisecond,
- Multiplier: 1.30,
- },
- http.StatusServiceUnavailable,
- http.StatusGatewayTimeout)
- }),
- },
- SetIamPolicy: []gax.CallOption{
- gax.WithTimeout(30000 * time.Millisecond),
- },
- GetIamPolicy: []gax.CallOption{
- gax.WithTimeout(30000 * time.Millisecond),
- gax.WithRetry(func() gax.Retryer {
- return gax.OnHTTPCodes(gax.Backoff{
- Initial: 1000 * time.Millisecond,
- Max: 32000 * time.Millisecond,
- Multiplier: 1.30,
- },
- http.StatusServiceUnavailable,
- http.StatusGatewayTimeout)
- }),
- },
- TestIamPermissions: []gax.CallOption{
- gax.WithTimeout(30000 * time.Millisecond),
- },
- CreateBackup: []gax.CallOption{
- gax.WithTimeout(3600000 * time.Millisecond),
- },
- CopyBackup: []gax.CallOption{
- gax.WithTimeout(3600000 * time.Millisecond),
- },
- GetBackup: []gax.CallOption{
- gax.WithTimeout(3600000 * time.Millisecond),
- gax.WithRetry(func() gax.Retryer {
- return gax.OnHTTPCodes(gax.Backoff{
- Initial: 1000 * time.Millisecond,
- Max: 32000 * time.Millisecond,
- Multiplier: 1.30,
- },
- http.StatusServiceUnavailable,
- http.StatusGatewayTimeout)
- }),
- },
- UpdateBackup: []gax.CallOption{
- gax.WithTimeout(3600000 * time.Millisecond),
- gax.WithRetry(func() gax.Retryer {
- return gax.OnHTTPCodes(gax.Backoff{
- Initial: 1000 * time.Millisecond,
- Max: 32000 * time.Millisecond,
- Multiplier: 1.30,
- },
- http.StatusServiceUnavailable,
- http.StatusGatewayTimeout)
- }),
- },
- DeleteBackup: []gax.CallOption{
- gax.WithTimeout(3600000 * time.Millisecond),
- gax.WithRetry(func() gax.Retryer {
- return gax.OnHTTPCodes(gax.Backoff{
- Initial: 1000 * time.Millisecond,
- Max: 32000 * time.Millisecond,
- Multiplier: 1.30,
- },
- http.StatusServiceUnavailable,
- http.StatusGatewayTimeout)
- }),
- },
- ListBackups: []gax.CallOption{
- gax.WithTimeout(3600000 * time.Millisecond),
- gax.WithRetry(func() gax.Retryer {
- return gax.OnHTTPCodes(gax.Backoff{
- Initial: 1000 * time.Millisecond,
- Max: 32000 * time.Millisecond,
- Multiplier: 1.30,
- },
- http.StatusServiceUnavailable,
- http.StatusGatewayTimeout)
- }),
- },
- RestoreDatabase: []gax.CallOption{
- gax.WithTimeout(3600000 * time.Millisecond),
- },
- ListDatabaseOperations: []gax.CallOption{
- gax.WithTimeout(3600000 * time.Millisecond),
- gax.WithRetry(func() gax.Retryer {
- return gax.OnHTTPCodes(gax.Backoff{
- Initial: 1000 * time.Millisecond,
- Max: 32000 * time.Millisecond,
- Multiplier: 1.30,
- },
- http.StatusServiceUnavailable,
- http.StatusGatewayTimeout)
- }),
- },
- ListBackupOperations: []gax.CallOption{
- gax.WithTimeout(3600000 * time.Millisecond),
- gax.WithRetry(func() gax.Retryer {
- return gax.OnHTTPCodes(gax.Backoff{
- Initial: 1000 * time.Millisecond,
- Max: 32000 * time.Millisecond,
- Multiplier: 1.30,
- },
- http.StatusServiceUnavailable,
- http.StatusGatewayTimeout)
- }),
- },
- ListDatabaseRoles: []gax.CallOption{
- gax.WithTimeout(3600000 * time.Millisecond),
- gax.WithRetry(func() gax.Retryer {
- return gax.OnHTTPCodes(gax.Backoff{
- Initial: 1000 * time.Millisecond,
- Max: 32000 * time.Millisecond,
- Multiplier: 1.30,
- },
- http.StatusServiceUnavailable,
- http.StatusGatewayTimeout)
- }),
- },
- CreateBackupSchedule: []gax.CallOption{
- gax.WithTimeout(3600000 * time.Millisecond),
- gax.WithRetry(func() gax.Retryer {
- return gax.OnHTTPCodes(gax.Backoff{
- Initial: 1000 * time.Millisecond,
- Max: 32000 * time.Millisecond,
- Multiplier: 1.30,
- },
- http.StatusServiceUnavailable,
- http.StatusGatewayTimeout)
- }),
- },
- GetBackupSchedule: []gax.CallOption{
- gax.WithTimeout(3600000 * time.Millisecond),
- gax.WithRetry(func() gax.Retryer {
- return gax.OnHTTPCodes(gax.Backoff{
- Initial: 1000 * time.Millisecond,
- Max: 32000 * time.Millisecond,
- Multiplier: 1.30,
- },
- http.StatusServiceUnavailable,
- http.StatusGatewayTimeout)
- }),
- },
- UpdateBackupSchedule: []gax.CallOption{
- gax.WithTimeout(3600000 * time.Millisecond),
- gax.WithRetry(func() gax.Retryer {
- return gax.OnHTTPCodes(gax.Backoff{
- Initial: 1000 * time.Millisecond,
- Max: 32000 * time.Millisecond,
- Multiplier: 1.30,
- },
- http.StatusServiceUnavailable,
- http.StatusGatewayTimeout)
- }),
- },
- DeleteBackupSchedule: []gax.CallOption{
- gax.WithTimeout(3600000 * time.Millisecond),
- gax.WithRetry(func() gax.Retryer {
- return gax.OnHTTPCodes(gax.Backoff{
- Initial: 1000 * time.Millisecond,
- Max: 32000 * time.Millisecond,
- Multiplier: 1.30,
- },
- http.StatusServiceUnavailable,
- http.StatusGatewayTimeout)
- }),
- },
- ListBackupSchedules: []gax.CallOption{
- gax.WithTimeout(3600000 * time.Millisecond),
- gax.WithRetry(func() gax.Retryer {
- return gax.OnHTTPCodes(gax.Backoff{
- Initial: 1000 * time.Millisecond,
- Max: 32000 * time.Millisecond,
- Multiplier: 1.30,
- },
- http.StatusServiceUnavailable,
- http.StatusGatewayTimeout)
- }),
- },
- CancelOperation: []gax.CallOption{},
- DeleteOperation: []gax.CallOption{},
- GetOperation: []gax.CallOption{},
- ListOperations: []gax.CallOption{},
- }
-}
-
-// internalDatabaseAdminClient is an interface that defines the methods available from Cloud Spanner API.
-type internalDatabaseAdminClient interface {
- Close() error
- setGoogleClientInfo(...string)
- Connection() *grpc.ClientConn
- ListDatabases(context.Context, *databasepb.ListDatabasesRequest, ...gax.CallOption) *DatabaseIterator
- CreateDatabase(context.Context, *databasepb.CreateDatabaseRequest, ...gax.CallOption) (*CreateDatabaseOperation, error)
- CreateDatabaseOperation(name string) *CreateDatabaseOperation
- GetDatabase(context.Context, *databasepb.GetDatabaseRequest, ...gax.CallOption) (*databasepb.Database, error)
- UpdateDatabase(context.Context, *databasepb.UpdateDatabaseRequest, ...gax.CallOption) (*UpdateDatabaseOperation, error)
- UpdateDatabaseOperation(name string) *UpdateDatabaseOperation
- UpdateDatabaseDdl(context.Context, *databasepb.UpdateDatabaseDdlRequest, ...gax.CallOption) (*UpdateDatabaseDdlOperation, error)
- UpdateDatabaseDdlOperation(name string) *UpdateDatabaseDdlOperation
- DropDatabase(context.Context, *databasepb.DropDatabaseRequest, ...gax.CallOption) error
- GetDatabaseDdl(context.Context, *databasepb.GetDatabaseDdlRequest, ...gax.CallOption) (*databasepb.GetDatabaseDdlResponse, error)
- SetIamPolicy(context.Context, *iampb.SetIamPolicyRequest, ...gax.CallOption) (*iampb.Policy, error)
- GetIamPolicy(context.Context, *iampb.GetIamPolicyRequest, ...gax.CallOption) (*iampb.Policy, error)
- TestIamPermissions(context.Context, *iampb.TestIamPermissionsRequest, ...gax.CallOption) (*iampb.TestIamPermissionsResponse, error)
- CreateBackup(context.Context, *databasepb.CreateBackupRequest, ...gax.CallOption) (*CreateBackupOperation, error)
- CreateBackupOperation(name string) *CreateBackupOperation
- CopyBackup(context.Context, *databasepb.CopyBackupRequest, ...gax.CallOption) (*CopyBackupOperation, error)
- CopyBackupOperation(name string) *CopyBackupOperation
- GetBackup(context.Context, *databasepb.GetBackupRequest, ...gax.CallOption) (*databasepb.Backup, error)
- UpdateBackup(context.Context, *databasepb.UpdateBackupRequest, ...gax.CallOption) (*databasepb.Backup, error)
- DeleteBackup(context.Context, *databasepb.DeleteBackupRequest, ...gax.CallOption) error
- ListBackups(context.Context, *databasepb.ListBackupsRequest, ...gax.CallOption) *BackupIterator
- RestoreDatabase(context.Context, *databasepb.RestoreDatabaseRequest, ...gax.CallOption) (*RestoreDatabaseOperation, error)
- RestoreDatabaseOperation(name string) *RestoreDatabaseOperation
- ListDatabaseOperations(context.Context, *databasepb.ListDatabaseOperationsRequest, ...gax.CallOption) *OperationIterator
- ListBackupOperations(context.Context, *databasepb.ListBackupOperationsRequest, ...gax.CallOption) *OperationIterator
- ListDatabaseRoles(context.Context, *databasepb.ListDatabaseRolesRequest, ...gax.CallOption) *DatabaseRoleIterator
- CreateBackupSchedule(context.Context, *databasepb.CreateBackupScheduleRequest, ...gax.CallOption) (*databasepb.BackupSchedule, error)
- GetBackupSchedule(context.Context, *databasepb.GetBackupScheduleRequest, ...gax.CallOption) (*databasepb.BackupSchedule, error)
- UpdateBackupSchedule(context.Context, *databasepb.UpdateBackupScheduleRequest, ...gax.CallOption) (*databasepb.BackupSchedule, error)
- DeleteBackupSchedule(context.Context, *databasepb.DeleteBackupScheduleRequest, ...gax.CallOption) error
- ListBackupSchedules(context.Context, *databasepb.ListBackupSchedulesRequest, ...gax.CallOption) *BackupScheduleIterator
- CancelOperation(context.Context, *longrunningpb.CancelOperationRequest, ...gax.CallOption) error
- DeleteOperation(context.Context, *longrunningpb.DeleteOperationRequest, ...gax.CallOption) error
- GetOperation(context.Context, *longrunningpb.GetOperationRequest, ...gax.CallOption) (*longrunningpb.Operation, error)
- ListOperations(context.Context, *longrunningpb.ListOperationsRequest, ...gax.CallOption) *OperationIterator
-}
-
-// DatabaseAdminClient is a client for interacting with Cloud Spanner API.
-// Methods, except Close, may be called concurrently. However, fields must not be modified concurrently with method calls.
-//
-// # Cloud Spanner Database Admin API
-//
-// The Cloud Spanner Database Admin API can be used to:
-//
-// create, drop, and list databases
-//
-// update the schema of pre-existing databases
-//
-// create, delete, copy and list backups for a database
-//
-// restore a database from an existing backup
-type DatabaseAdminClient struct {
- // The internal transport-dependent client.
- internalClient internalDatabaseAdminClient
-
- // The call options for this service.
- CallOptions *DatabaseAdminCallOptions
-
- // LROClient is used internally to handle long-running operations.
- // It is exposed so that its CallOptions can be modified if required.
- // Users should not Close this client.
- LROClient *lroauto.OperationsClient
-}
-
-// Wrapper methods routed to the internal client.
-
-// Close closes the connection to the API service. The user should invoke this when
-// the client is no longer required.
-func (c *DatabaseAdminClient) Close() error {
- return c.internalClient.Close()
-}
-
-// setGoogleClientInfo sets the name and version of the application in
-// the `x-goog-api-client` header passed on each request. Intended for
-// use by Google-written clients.
-func (c *DatabaseAdminClient) setGoogleClientInfo(keyval ...string) {
- c.internalClient.setGoogleClientInfo(keyval...)
-}
-
-// Connection returns a connection to the API service.
-//
-// Deprecated: Connections are now pooled so this method does not always
-// return the same resource.
-func (c *DatabaseAdminClient) Connection() *grpc.ClientConn {
- return c.internalClient.Connection()
-}
-
-// ListDatabases lists Cloud Spanner databases.
-func (c *DatabaseAdminClient) ListDatabases(ctx context.Context, req *databasepb.ListDatabasesRequest, opts ...gax.CallOption) *DatabaseIterator {
- return c.internalClient.ListDatabases(ctx, req, opts...)
-}
-
-// CreateDatabase creates a new Cloud Spanner database and starts to prepare it for serving.
-// The returned [long-running operation][google.longrunning.Operation] will
-// have a name of the format <database_name>/operations/<operation_id> and
-// can be used to track preparation of the database. The
-// metadata field type is
-// CreateDatabaseMetadata.
-// The response field type is
-// Database, if successful.
-func (c *DatabaseAdminClient) CreateDatabase(ctx context.Context, req *databasepb.CreateDatabaseRequest, opts ...gax.CallOption) (*CreateDatabaseOperation, error) {
- return c.internalClient.CreateDatabase(ctx, req, opts...)
-}
-
-// CreateDatabaseOperation returns a new CreateDatabaseOperation from a given name.
-// The name must be that of a previously created CreateDatabaseOperation, possibly from a different process.
-func (c *DatabaseAdminClient) CreateDatabaseOperation(name string) *CreateDatabaseOperation {
- return c.internalClient.CreateDatabaseOperation(name)
-}
-
-// GetDatabase gets the state of a Cloud Spanner database.
-func (c *DatabaseAdminClient) GetDatabase(ctx context.Context, req *databasepb.GetDatabaseRequest, opts ...gax.CallOption) (*databasepb.Database, error) {
- return c.internalClient.GetDatabase(ctx, req, opts...)
-}
-
-// UpdateDatabase updates a Cloud Spanner database. The returned
-// [long-running operation][google.longrunning.Operation] can be used to track
-// the progress of updating the database. If the named database does not
-// exist, returns NOT_FOUND.
-//
-// While the operation is pending:
-//
-// The database’s
-// reconciling
-// field is set to true.
-//
-// Cancelling the operation is best-effort. If the cancellation succeeds,
-// the operation metadata’s
-// cancel_time
-// is set, the updates are reverted, and the operation terminates with a
-// CANCELLED status.
-//
-// New UpdateDatabase requests will return a FAILED_PRECONDITION error
-// until the pending operation is done (returns successfully or with
-// error).
-//
-// Reading the database via the API continues to give the pre-request
-// values.
-//
-// Upon completion of the returned operation:
-//
-// The new values are in effect and readable via the API.
-//
-// The database’s
-// reconciling
-// field becomes false.
-//
-// The returned [long-running operation][google.longrunning.Operation] will
-// have a name of the format
-// projects/<project>/instances/<instance>/databases/<database>/operations/<operation_id>
-// and can be used to track the database modification. The
-// metadata field type is
-// UpdateDatabaseMetadata.
-// The response field type is
-// Database, if successful.
-func (c *DatabaseAdminClient) UpdateDatabase(ctx context.Context, req *databasepb.UpdateDatabaseRequest, opts ...gax.CallOption) (*UpdateDatabaseOperation, error) {
- return c.internalClient.UpdateDatabase(ctx, req, opts...)
-}
-
-// UpdateDatabaseOperation returns a new UpdateDatabaseOperation from a given name.
-// The name must be that of a previously created UpdateDatabaseOperation, possibly from a different process.
-func (c *DatabaseAdminClient) UpdateDatabaseOperation(name string) *UpdateDatabaseOperation {
- return c.internalClient.UpdateDatabaseOperation(name)
-}
-
-// UpdateDatabaseDdl updates the schema of a Cloud Spanner database by
-// creating/altering/dropping tables, columns, indexes, etc. The returned
-// [long-running operation][google.longrunning.Operation] will have a name of
-// the format <database_name>/operations/<operation_id> and can be used to
-// track execution of the schema change(s). The
-// metadata field type is
-// UpdateDatabaseDdlMetadata.
-// The operation has no response.
-func (c *DatabaseAdminClient) UpdateDatabaseDdl(ctx context.Context, req *databasepb.UpdateDatabaseDdlRequest, opts ...gax.CallOption) (*UpdateDatabaseDdlOperation, error) {
- return c.internalClient.UpdateDatabaseDdl(ctx, req, opts...)
-}
-
-// UpdateDatabaseDdlOperation returns a new UpdateDatabaseDdlOperation from a given name.
-// The name must be that of a previously created UpdateDatabaseDdlOperation, possibly from a different process.
-func (c *DatabaseAdminClient) UpdateDatabaseDdlOperation(name string) *UpdateDatabaseDdlOperation {
- return c.internalClient.UpdateDatabaseDdlOperation(name)
-}
-
-// DropDatabase drops (aka deletes) a Cloud Spanner database.
-// Completed backups for the database will be retained according to their
-// expire_time.
-// Note: Cloud Spanner might continue to accept requests for a few seconds
-// after the database has been deleted.
-func (c *DatabaseAdminClient) DropDatabase(ctx context.Context, req *databasepb.DropDatabaseRequest, opts ...gax.CallOption) error {
- return c.internalClient.DropDatabase(ctx, req, opts...)
-}
-
-// GetDatabaseDdl returns the schema of a Cloud Spanner database as a list of formatted
-// DDL statements. This method does not show pending schema updates, those may
-// be queried using the Operations API.
-func (c *DatabaseAdminClient) GetDatabaseDdl(ctx context.Context, req *databasepb.GetDatabaseDdlRequest, opts ...gax.CallOption) (*databasepb.GetDatabaseDdlResponse, error) {
- return c.internalClient.GetDatabaseDdl(ctx, req, opts...)
-}
-
-// SetIamPolicy sets the access control policy on a database or backup resource.
-// Replaces any existing policy.
-//
-// Authorization requires spanner.databases.setIamPolicy
-// permission on resource.
-// For backups, authorization requires spanner.backups.setIamPolicy
-// permission on resource.
-func (c *DatabaseAdminClient) SetIamPolicy(ctx context.Context, req *iampb.SetIamPolicyRequest, opts ...gax.CallOption) (*iampb.Policy, error) {
- return c.internalClient.SetIamPolicy(ctx, req, opts...)
-}
-
-// GetIamPolicy gets the access control policy for a database or backup resource.
-// Returns an empty policy if a database or backup exists but does not have a
-// policy set.
-//
-// Authorization requires spanner.databases.getIamPolicy permission on
-// resource.
-// For backups, authorization requires spanner.backups.getIamPolicy
-// permission on resource.
-func (c *DatabaseAdminClient) GetIamPolicy(ctx context.Context, req *iampb.GetIamPolicyRequest, opts ...gax.CallOption) (*iampb.Policy, error) {
- return c.internalClient.GetIamPolicy(ctx, req, opts...)
-}
-
-// TestIamPermissions returns permissions that the caller has on the specified database or backup
-// resource.
-//
-// Attempting this RPC on a non-existent Cloud Spanner database will
-// result in a NOT_FOUND error if the user has
-// spanner.databases.list permission on the containing Cloud
-// Spanner instance. Otherwise returns an empty set of permissions.
-// Calling this method on a backup that does not exist will
-// result in a NOT_FOUND error if the user has
-// spanner.backups.list permission on the containing instance.
-func (c *DatabaseAdminClient) TestIamPermissions(ctx context.Context, req *iampb.TestIamPermissionsRequest, opts ...gax.CallOption) (*iampb.TestIamPermissionsResponse, error) {
- return c.internalClient.TestIamPermissions(ctx, req, opts...)
-}
-
-// CreateBackup starts creating a new Cloud Spanner Backup.
-// The returned backup [long-running operation][google.longrunning.Operation]
-// will have a name of the format
-// projects/<project>/instances/<instance>/backups/<backup>/operations/<operation_id>
-// and can be used to track creation of the backup. The
-// metadata field type is
-// CreateBackupMetadata.
-// The response field type is
-// Backup, if successful.
-// Cancelling the returned operation will stop the creation and delete the
-// backup. There can be only one pending backup creation per database. Backup
-// creation of different databases can run concurrently.
-func (c *DatabaseAdminClient) CreateBackup(ctx context.Context, req *databasepb.CreateBackupRequest, opts ...gax.CallOption) (*CreateBackupOperation, error) {
- return c.internalClient.CreateBackup(ctx, req, opts...)
-}
-
-// CreateBackupOperation returns a new CreateBackupOperation from a given name.
-// The name must be that of a previously created CreateBackupOperation, possibly from a different process.
-func (c *DatabaseAdminClient) CreateBackupOperation(name string) *CreateBackupOperation {
- return c.internalClient.CreateBackupOperation(name)
-}
-
-// CopyBackup starts copying a Cloud Spanner Backup.
-// The returned backup [long-running operation][google.longrunning.Operation]
-// will have a name of the format
-// projects/<project>/instances/<instance>/backups/<backup>/operations/<operation_id>
-// and can be used to track copying of the backup. The operation is associated
-// with the destination backup.
-// The metadata field type is
-// CopyBackupMetadata.
-// The response field type is
-// Backup, if successful.
-// Cancelling the returned operation will stop the copying and delete the
-// destination backup. Concurrent CopyBackup requests can run on the same
-// source backup.
-func (c *DatabaseAdminClient) CopyBackup(ctx context.Context, req *databasepb.CopyBackupRequest, opts ...gax.CallOption) (*CopyBackupOperation, error) {
- return c.internalClient.CopyBackup(ctx, req, opts...)
-}
-
-// CopyBackupOperation returns a new CopyBackupOperation from a given name.
-// The name must be that of a previously created CopyBackupOperation, possibly from a different process.
-func (c *DatabaseAdminClient) CopyBackupOperation(name string) *CopyBackupOperation {
- return c.internalClient.CopyBackupOperation(name)
-}
-
-// GetBackup gets metadata on a pending or completed
-// Backup.
-func (c *DatabaseAdminClient) GetBackup(ctx context.Context, req *databasepb.GetBackupRequest, opts ...gax.CallOption) (*databasepb.Backup, error) {
- return c.internalClient.GetBackup(ctx, req, opts...)
-}
-
-// UpdateBackup updates a pending or completed
-// Backup.
-func (c *DatabaseAdminClient) UpdateBackup(ctx context.Context, req *databasepb.UpdateBackupRequest, opts ...gax.CallOption) (*databasepb.Backup, error) {
- return c.internalClient.UpdateBackup(ctx, req, opts...)
-}
-
-// DeleteBackup deletes a pending or completed
-// Backup.
-func (c *DatabaseAdminClient) DeleteBackup(ctx context.Context, req *databasepb.DeleteBackupRequest, opts ...gax.CallOption) error {
- return c.internalClient.DeleteBackup(ctx, req, opts...)
-}
-
-// ListBackups lists completed and pending backups.
-// Backups returned are ordered by create_time in descending order,
-// starting from the most recent create_time.
-func (c *DatabaseAdminClient) ListBackups(ctx context.Context, req *databasepb.ListBackupsRequest, opts ...gax.CallOption) *BackupIterator {
- return c.internalClient.ListBackups(ctx, req, opts...)
-}
-
-// RestoreDatabase create a new database by restoring from a completed backup. The new
-// database must be in the same project and in an instance with the same
-// instance configuration as the instance containing
-// the backup. The returned database [long-running
-// operation][google.longrunning.Operation] has a name of the format
-// projects/<project>/instances/<instance>/databases/<database>/operations/<operation_id>,
-// and can be used to track the progress of the operation, and to cancel it.
-// The metadata field type is
-// RestoreDatabaseMetadata.
-// The response type
-// is Database, if
-// successful. Cancelling the returned operation will stop the restore and
-// delete the database.
-// There can be only one database being restored into an instance at a time.
-// Once the restore operation completes, a new restore operation can be
-// initiated, without waiting for the optimize operation associated with the
-// first restore to complete.
-func (c *DatabaseAdminClient) RestoreDatabase(ctx context.Context, req *databasepb.RestoreDatabaseRequest, opts ...gax.CallOption) (*RestoreDatabaseOperation, error) {
- return c.internalClient.RestoreDatabase(ctx, req, opts...)
-}
-
-// RestoreDatabaseOperation returns a new RestoreDatabaseOperation from a given name.
-// The name must be that of a previously created RestoreDatabaseOperation, possibly from a different process.
-func (c *DatabaseAdminClient) RestoreDatabaseOperation(name string) *RestoreDatabaseOperation {
- return c.internalClient.RestoreDatabaseOperation(name)
-}
-
-// ListDatabaseOperations lists database [longrunning-operations][google.longrunning.Operation].
-// A database operation has a name of the form
-// projects/<project>/instances/<instance>/databases/<database>/operations/<operation>.
-// The long-running operation
-// metadata field type
-// metadata.type_url describes the type of the metadata. Operations returned
-// include those that have completed/failed/canceled within the last 7 days,
-// and pending operations.
-func (c *DatabaseAdminClient) ListDatabaseOperations(ctx context.Context, req *databasepb.ListDatabaseOperationsRequest, opts ...gax.CallOption) *OperationIterator {
- return c.internalClient.ListDatabaseOperations(ctx, req, opts...)
-}
-
-// ListBackupOperations lists the backup [long-running operations][google.longrunning.Operation] in
-// the given instance. A backup operation has a name of the form
-// projects/<project>/instances/<instance>/backups/<backup>/operations/<operation>.
-// The long-running operation
-// metadata field type
-// metadata.type_url describes the type of the metadata. Operations returned
-// include those that have completed/failed/canceled within the last 7 days,
-// and pending operations. Operations returned are ordered by
-// operation.metadata.value.progress.start_time in descending order starting
-// from the most recently started operation.
-func (c *DatabaseAdminClient) ListBackupOperations(ctx context.Context, req *databasepb.ListBackupOperationsRequest, opts ...gax.CallOption) *OperationIterator {
- return c.internalClient.ListBackupOperations(ctx, req, opts...)
-}
-
-// ListDatabaseRoles lists Cloud Spanner database roles.
-func (c *DatabaseAdminClient) ListDatabaseRoles(ctx context.Context, req *databasepb.ListDatabaseRolesRequest, opts ...gax.CallOption) *DatabaseRoleIterator {
- return c.internalClient.ListDatabaseRoles(ctx, req, opts...)
-}
-
-// CreateBackupSchedule creates a new backup schedule.
-func (c *DatabaseAdminClient) CreateBackupSchedule(ctx context.Context, req *databasepb.CreateBackupScheduleRequest, opts ...gax.CallOption) (*databasepb.BackupSchedule, error) {
- return c.internalClient.CreateBackupSchedule(ctx, req, opts...)
-}
-
-// GetBackupSchedule gets backup schedule for the input schedule name.
-func (c *DatabaseAdminClient) GetBackupSchedule(ctx context.Context, req *databasepb.GetBackupScheduleRequest, opts ...gax.CallOption) (*databasepb.BackupSchedule, error) {
- return c.internalClient.GetBackupSchedule(ctx, req, opts...)
-}
-
-// UpdateBackupSchedule updates a backup schedule.
-func (c *DatabaseAdminClient) UpdateBackupSchedule(ctx context.Context, req *databasepb.UpdateBackupScheduleRequest, opts ...gax.CallOption) (*databasepb.BackupSchedule, error) {
- return c.internalClient.UpdateBackupSchedule(ctx, req, opts...)
-}
-
-// DeleteBackupSchedule deletes a backup schedule.
-func (c *DatabaseAdminClient) DeleteBackupSchedule(ctx context.Context, req *databasepb.DeleteBackupScheduleRequest, opts ...gax.CallOption) error {
- return c.internalClient.DeleteBackupSchedule(ctx, req, opts...)
-}
-
-// ListBackupSchedules lists all the backup schedules for the database.
-func (c *DatabaseAdminClient) ListBackupSchedules(ctx context.Context, req *databasepb.ListBackupSchedulesRequest, opts ...gax.CallOption) *BackupScheduleIterator {
- return c.internalClient.ListBackupSchedules(ctx, req, opts...)
-}
-
-// CancelOperation is a utility method from google.longrunning.Operations.
-func (c *DatabaseAdminClient) CancelOperation(ctx context.Context, req *longrunningpb.CancelOperationRequest, opts ...gax.CallOption) error {
- return c.internalClient.CancelOperation(ctx, req, opts...)
-}
-
-// DeleteOperation is a utility method from google.longrunning.Operations.
-func (c *DatabaseAdminClient) DeleteOperation(ctx context.Context, req *longrunningpb.DeleteOperationRequest, opts ...gax.CallOption) error {
- return c.internalClient.DeleteOperation(ctx, req, opts...)
-}
-
-// GetOperation is a utility method from google.longrunning.Operations.
-func (c *DatabaseAdminClient) GetOperation(ctx context.Context, req *longrunningpb.GetOperationRequest, opts ...gax.CallOption) (*longrunningpb.Operation, error) {
- return c.internalClient.GetOperation(ctx, req, opts...)
-}
-
-// ListOperations is a utility method from google.longrunning.Operations.
-func (c *DatabaseAdminClient) ListOperations(ctx context.Context, req *longrunningpb.ListOperationsRequest, opts ...gax.CallOption) *OperationIterator {
- return c.internalClient.ListOperations(ctx, req, opts...)
-}
-
-// databaseAdminGRPCClient is a client for interacting with Cloud Spanner API over gRPC transport.
-//
-// Methods, except Close, may be called concurrently. However, fields must not be modified concurrently with method calls.
-type databaseAdminGRPCClient struct {
- // Connection pool of gRPC connections to the service.
- connPool gtransport.ConnPool
-
- // Points back to the CallOptions field of the containing DatabaseAdminClient
- CallOptions **DatabaseAdminCallOptions
-
- // The gRPC API client.
- databaseAdminClient databasepb.DatabaseAdminClient
-
- // LROClient is used internally to handle long-running operations.
- // It is exposed so that its CallOptions can be modified if required.
- // Users should not Close this client.
- LROClient **lroauto.OperationsClient
-
- operationsClient longrunningpb.OperationsClient
-
- // The x-goog-* metadata to be sent with each request.
- xGoogHeaders []string
-}
-
-// NewDatabaseAdminClient creates a new database admin client based on gRPC.
-// The returned client must be Closed when it is done being used to clean up its underlying connections.
-//
-// # Cloud Spanner Database Admin API
-//
-// The Cloud Spanner Database Admin API can be used to:
-//
-// create, drop, and list databases
-//
-// update the schema of pre-existing databases
-//
-// create, delete, copy and list backups for a database
-//
-// restore a database from an existing backup
-func NewDatabaseAdminClient(ctx context.Context, opts ...option.ClientOption) (*DatabaseAdminClient, error) {
- clientOpts := defaultDatabaseAdminGRPCClientOptions()
- if newDatabaseAdminClientHook != nil {
- hookOpts, err := newDatabaseAdminClientHook(ctx, clientHookParams{})
- if err != nil {
- return nil, err
- }
- clientOpts = append(clientOpts, hookOpts...)
- }
-
- connPool, err := gtransport.DialPool(ctx, append(clientOpts, opts...)...)
- if err != nil {
- return nil, err
- }
- client := DatabaseAdminClient{CallOptions: defaultDatabaseAdminCallOptions()}
-
- c := &databaseAdminGRPCClient{
- connPool: connPool,
- databaseAdminClient: databasepb.NewDatabaseAdminClient(connPool),
- CallOptions: &client.CallOptions,
- operationsClient: longrunningpb.NewOperationsClient(connPool),
- }
- c.setGoogleClientInfo()
-
- client.internalClient = c
-
- client.LROClient, err = lroauto.NewOperationsClient(ctx, gtransport.WithConnPool(connPool))
- if err != nil {
- // This error "should not happen", since we are just reusing old connection pool
- // and never actually need to dial.
- // If this does happen, we could leak connp. However, we cannot close conn:
- // If the user invoked the constructor with option.WithGRPCConn,
- // we would close a connection that's still in use.
- // TODO: investigate error conditions.
- return nil, err
- }
- c.LROClient = &client.LROClient
- return &client, nil
-}
-
-// Connection returns a connection to the API service.
-//
-// Deprecated: Connections are now pooled so this method does not always
-// return the same resource.
-func (c *databaseAdminGRPCClient) Connection() *grpc.ClientConn {
- return c.connPool.Conn()
-}
-
-// setGoogleClientInfo sets the name and version of the application in
-// the `x-goog-api-client` header passed on each request. Intended for
-// use by Google-written clients.
-func (c *databaseAdminGRPCClient) setGoogleClientInfo(keyval ...string) {
- kv := append([]string{"gl-go", gax.GoVersion}, keyval...)
- kv = append(kv, "gapic", getVersionClient(), "gax", gax.Version, "grpc", grpc.Version)
- c.xGoogHeaders = []string{
- "x-goog-api-client", gax.XGoogHeader(kv...),
- }
-}
-
-// Close closes the connection to the API service. The user should invoke this when
-// the client is no longer required.
-func (c *databaseAdminGRPCClient) Close() error {
- return c.connPool.Close()
-}
-
-// Methods, except Close, may be called concurrently. However, fields must not be modified concurrently with method calls.
-type databaseAdminRESTClient struct {
- // The http endpoint to connect to.
- endpoint string
-
- // The http client.
- httpClient *http.Client
-
- // LROClient is used internally to handle long-running operations.
- // It is exposed so that its CallOptions can be modified if required.
- // Users should not Close this client.
- LROClient **lroauto.OperationsClient
-
- // The x-goog-* headers to be sent with each request.
- xGoogHeaders []string
-
- // Points back to the CallOptions field of the containing DatabaseAdminClient
- CallOptions **DatabaseAdminCallOptions
-}
-
-// NewDatabaseAdminRESTClient creates a new database admin rest client.
-//
-// # Cloud Spanner Database Admin API
-//
-// The Cloud Spanner Database Admin API can be used to:
-//
-// create, drop, and list databases
-//
-// update the schema of pre-existing databases
-//
-// create, delete, copy and list backups for a database
-//
-// restore a database from an existing backup
-func NewDatabaseAdminRESTClient(ctx context.Context, opts ...option.ClientOption) (*DatabaseAdminClient, error) {
- clientOpts := append(defaultDatabaseAdminRESTClientOptions(), opts...)
- httpClient, endpoint, err := httptransport.NewClient(ctx, clientOpts...)
- if err != nil {
- return nil, err
- }
-
- callOpts := defaultDatabaseAdminRESTCallOptions()
- c := &databaseAdminRESTClient{
- endpoint: endpoint,
- httpClient: httpClient,
- CallOptions: &callOpts,
- }
- c.setGoogleClientInfo()
-
- lroOpts := []option.ClientOption{
- option.WithHTTPClient(httpClient),
- option.WithEndpoint(endpoint),
- }
- opClient, err := lroauto.NewOperationsRESTClient(ctx, lroOpts...)
- if err != nil {
- return nil, err
- }
- c.LROClient = &opClient
-
- return &DatabaseAdminClient{internalClient: c, CallOptions: callOpts}, nil
-}
-
-func defaultDatabaseAdminRESTClientOptions() []option.ClientOption {
- return []option.ClientOption{
- internaloption.WithDefaultEndpoint("https://spanner.googleapis.com"),
- internaloption.WithDefaultEndpointTemplate("https://spanner.UNIVERSE_DOMAIN"),
- internaloption.WithDefaultMTLSEndpoint("https://spanner.mtls.googleapis.com"),
- internaloption.WithDefaultUniverseDomain("googleapis.com"),
- internaloption.WithDefaultAudience("https://spanner.googleapis.com/"),
- internaloption.WithDefaultScopes(DefaultAuthScopes()...),
- }
-}
-
-// setGoogleClientInfo sets the name and version of the application in
-// the `x-goog-api-client` header passed on each request. Intended for
-// use by Google-written clients.
-func (c *databaseAdminRESTClient) setGoogleClientInfo(keyval ...string) {
- kv := append([]string{"gl-go", gax.GoVersion}, keyval...)
- kv = append(kv, "gapic", getVersionClient(), "gax", gax.Version, "rest", "UNKNOWN")
- c.xGoogHeaders = []string{
- "x-goog-api-client", gax.XGoogHeader(kv...),
- }
-}
-
-// Close closes the connection to the API service. The user should invoke this when
-// the client is no longer required.
-func (c *databaseAdminRESTClient) Close() error {
- // Replace httpClient with nil to force cleanup.
- c.httpClient = nil
- return nil
-}
-
-// Connection returns a connection to the API service.
-//
-// Deprecated: This method always returns nil.
-func (c *databaseAdminRESTClient) Connection() *grpc.ClientConn {
- return nil
-}
-func (c *databaseAdminGRPCClient) ListDatabases(ctx context.Context, req *databasepb.ListDatabasesRequest, opts ...gax.CallOption) *DatabaseIterator {
- hds := []string{"x-goog-request-params", fmt.Sprintf("%s=%v", "parent", url.QueryEscape(req.GetParent()))}
-
- hds = append(c.xGoogHeaders, hds...)
- ctx = gax.InsertMetadataIntoOutgoingContext(ctx, hds...)
- opts = append((*c.CallOptions).ListDatabases[0:len((*c.CallOptions).ListDatabases):len((*c.CallOptions).ListDatabases)], opts...)
- it := &DatabaseIterator{}
- req = proto.Clone(req).(*databasepb.ListDatabasesRequest)
- it.InternalFetch = func(pageSize int, pageToken string) ([]*databasepb.Database, string, error) {
- resp := &databasepb.ListDatabasesResponse{}
- if pageToken != "" {
- req.PageToken = pageToken
- }
- if pageSize > math.MaxInt32 {
- req.PageSize = math.MaxInt32
- } else if pageSize != 0 {
- req.PageSize = int32(pageSize)
- }
- err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
- var err error
- resp, err = c.databaseAdminClient.ListDatabases(ctx, req, settings.GRPC...)
- return err
- }, opts...)
- if err != nil {
- return nil, "", err
- }
-
- it.Response = resp
- return resp.GetDatabases(), resp.GetNextPageToken(), nil
- }
- fetch := func(pageSize int, pageToken string) (string, error) {
- items, nextPageToken, err := it.InternalFetch(pageSize, pageToken)
- if err != nil {
- return "", err
- }
- it.items = append(it.items, items...)
- return nextPageToken, nil
- }
-
- it.pageInfo, it.nextFunc = iterator.NewPageInfo(fetch, it.bufLen, it.takeBuf)
- it.pageInfo.MaxSize = int(req.GetPageSize())
- it.pageInfo.Token = req.GetPageToken()
-
- return it
-}
-
-func (c *databaseAdminGRPCClient) CreateDatabase(ctx context.Context, req *databasepb.CreateDatabaseRequest, opts ...gax.CallOption) (*CreateDatabaseOperation, error) {
- hds := []string{"x-goog-request-params", fmt.Sprintf("%s=%v", "parent", url.QueryEscape(req.GetParent()))}
-
- hds = append(c.xGoogHeaders, hds...)
- ctx = gax.InsertMetadataIntoOutgoingContext(ctx, hds...)
- opts = append((*c.CallOptions).CreateDatabase[0:len((*c.CallOptions).CreateDatabase):len((*c.CallOptions).CreateDatabase)], opts...)
- var resp *longrunningpb.Operation
- err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
- var err error
- resp, err = c.databaseAdminClient.CreateDatabase(ctx, req, settings.GRPC...)
- return err
- }, opts...)
- if err != nil {
- return nil, err
- }
- return &CreateDatabaseOperation{
- lro: longrunning.InternalNewOperation(*c.LROClient, resp),
- }, nil
-}
-
-func (c *databaseAdminGRPCClient) GetDatabase(ctx context.Context, req *databasepb.GetDatabaseRequest, opts ...gax.CallOption) (*databasepb.Database, error) {
- hds := []string{"x-goog-request-params", fmt.Sprintf("%s=%v", "name", url.QueryEscape(req.GetName()))}
-
- hds = append(c.xGoogHeaders, hds...)
- ctx = gax.InsertMetadataIntoOutgoingContext(ctx, hds...)
- opts = append((*c.CallOptions).GetDatabase[0:len((*c.CallOptions).GetDatabase):len((*c.CallOptions).GetDatabase)], opts...)
- var resp *databasepb.Database
- err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
- var err error
- resp, err = c.databaseAdminClient.GetDatabase(ctx, req, settings.GRPC...)
- return err
- }, opts...)
- if err != nil {
- return nil, err
- }
- return resp, nil
-}
-
-func (c *databaseAdminGRPCClient) UpdateDatabase(ctx context.Context, req *databasepb.UpdateDatabaseRequest, opts ...gax.CallOption) (*UpdateDatabaseOperation, error) {
- hds := []string{"x-goog-request-params", fmt.Sprintf("%s=%v", "database.name", url.QueryEscape(req.GetDatabase().GetName()))}
-
- hds = append(c.xGoogHeaders, hds...)
- ctx = gax.InsertMetadataIntoOutgoingContext(ctx, hds...)
- opts = append((*c.CallOptions).UpdateDatabase[0:len((*c.CallOptions).UpdateDatabase):len((*c.CallOptions).UpdateDatabase)], opts...)
- var resp *longrunningpb.Operation
- err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
- var err error
- resp, err = c.databaseAdminClient.UpdateDatabase(ctx, req, settings.GRPC...)
- return err
- }, opts...)
- if err != nil {
- return nil, err
- }
- return &UpdateDatabaseOperation{
- lro: longrunning.InternalNewOperation(*c.LROClient, resp),
- }, nil
-}
-
-func (c *databaseAdminGRPCClient) UpdateDatabaseDdl(ctx context.Context, req *databasepb.UpdateDatabaseDdlRequest, opts ...gax.CallOption) (*UpdateDatabaseDdlOperation, error) {
- hds := []string{"x-goog-request-params", fmt.Sprintf("%s=%v", "database", url.QueryEscape(req.GetDatabase()))}
-
- hds = append(c.xGoogHeaders, hds...)
- ctx = gax.InsertMetadataIntoOutgoingContext(ctx, hds...)
- opts = append((*c.CallOptions).UpdateDatabaseDdl[0:len((*c.CallOptions).UpdateDatabaseDdl):len((*c.CallOptions).UpdateDatabaseDdl)], opts...)
- var resp *longrunningpb.Operation
- err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
- var err error
- resp, err = c.databaseAdminClient.UpdateDatabaseDdl(ctx, req, settings.GRPC...)
- return err
- }, opts...)
- if err != nil {
- return nil, err
- }
- return &UpdateDatabaseDdlOperation{
- lro: longrunning.InternalNewOperation(*c.LROClient, resp),
- }, nil
-}
-
-func (c *databaseAdminGRPCClient) DropDatabase(ctx context.Context, req *databasepb.DropDatabaseRequest, opts ...gax.CallOption) error {
- hds := []string{"x-goog-request-params", fmt.Sprintf("%s=%v", "database", url.QueryEscape(req.GetDatabase()))}
-
- hds = append(c.xGoogHeaders, hds...)
- ctx = gax.InsertMetadataIntoOutgoingContext(ctx, hds...)
- opts = append((*c.CallOptions).DropDatabase[0:len((*c.CallOptions).DropDatabase):len((*c.CallOptions).DropDatabase)], opts...)
- err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
- var err error
- _, err = c.databaseAdminClient.DropDatabase(ctx, req, settings.GRPC...)
- return err
- }, opts...)
- return err
-}
-
-func (c *databaseAdminGRPCClient) GetDatabaseDdl(ctx context.Context, req *databasepb.GetDatabaseDdlRequest, opts ...gax.CallOption) (*databasepb.GetDatabaseDdlResponse, error) {
- hds := []string{"x-goog-request-params", fmt.Sprintf("%s=%v", "database", url.QueryEscape(req.GetDatabase()))}
-
- hds = append(c.xGoogHeaders, hds...)
- ctx = gax.InsertMetadataIntoOutgoingContext(ctx, hds...)
- opts = append((*c.CallOptions).GetDatabaseDdl[0:len((*c.CallOptions).GetDatabaseDdl):len((*c.CallOptions).GetDatabaseDdl)], opts...)
- var resp *databasepb.GetDatabaseDdlResponse
- err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
- var err error
- resp, err = c.databaseAdminClient.GetDatabaseDdl(ctx, req, settings.GRPC...)
- return err
- }, opts...)
- if err != nil {
- return nil, err
- }
- return resp, nil
-}
-
-func (c *databaseAdminGRPCClient) SetIamPolicy(ctx context.Context, req *iampb.SetIamPolicyRequest, opts ...gax.CallOption) (*iampb.Policy, error) {
- hds := []string{"x-goog-request-params", fmt.Sprintf("%s=%v", "resource", url.QueryEscape(req.GetResource()))}
-
- hds = append(c.xGoogHeaders, hds...)
- ctx = gax.InsertMetadataIntoOutgoingContext(ctx, hds...)
- opts = append((*c.CallOptions).SetIamPolicy[0:len((*c.CallOptions).SetIamPolicy):len((*c.CallOptions).SetIamPolicy)], opts...)
- var resp *iampb.Policy
- err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
- var err error
- resp, err = c.databaseAdminClient.SetIamPolicy(ctx, req, settings.GRPC...)
- return err
- }, opts...)
- if err != nil {
- return nil, err
- }
- return resp, nil
-}
-
-func (c *databaseAdminGRPCClient) GetIamPolicy(ctx context.Context, req *iampb.GetIamPolicyRequest, opts ...gax.CallOption) (*iampb.Policy, error) {
- hds := []string{"x-goog-request-params", fmt.Sprintf("%s=%v", "resource", url.QueryEscape(req.GetResource()))}
-
- hds = append(c.xGoogHeaders, hds...)
- ctx = gax.InsertMetadataIntoOutgoingContext(ctx, hds...)
- opts = append((*c.CallOptions).GetIamPolicy[0:len((*c.CallOptions).GetIamPolicy):len((*c.CallOptions).GetIamPolicy)], opts...)
- var resp *iampb.Policy
- err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
- var err error
- resp, err = c.databaseAdminClient.GetIamPolicy(ctx, req, settings.GRPC...)
- return err
- }, opts...)
- if err != nil {
- return nil, err
- }
- return resp, nil
-}
-
-func (c *databaseAdminGRPCClient) TestIamPermissions(ctx context.Context, req *iampb.TestIamPermissionsRequest, opts ...gax.CallOption) (*iampb.TestIamPermissionsResponse, error) {
- hds := []string{"x-goog-request-params", fmt.Sprintf("%s=%v", "resource", url.QueryEscape(req.GetResource()))}
-
- hds = append(c.xGoogHeaders, hds...)
- ctx = gax.InsertMetadataIntoOutgoingContext(ctx, hds...)
- opts = append((*c.CallOptions).TestIamPermissions[0:len((*c.CallOptions).TestIamPermissions):len((*c.CallOptions).TestIamPermissions)], opts...)
- var resp *iampb.TestIamPermissionsResponse
- err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
- var err error
- resp, err = c.databaseAdminClient.TestIamPermissions(ctx, req, settings.GRPC...)
- return err
- }, opts...)
- if err != nil {
- return nil, err
- }
- return resp, nil
-}
-
-func (c *databaseAdminGRPCClient) CreateBackup(ctx context.Context, req *databasepb.CreateBackupRequest, opts ...gax.CallOption) (*CreateBackupOperation, error) {
- hds := []string{"x-goog-request-params", fmt.Sprintf("%s=%v", "parent", url.QueryEscape(req.GetParent()))}
-
- hds = append(c.xGoogHeaders, hds...)
- ctx = gax.InsertMetadataIntoOutgoingContext(ctx, hds...)
- opts = append((*c.CallOptions).CreateBackup[0:len((*c.CallOptions).CreateBackup):len((*c.CallOptions).CreateBackup)], opts...)
- var resp *longrunningpb.Operation
- err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
- var err error
- resp, err = c.databaseAdminClient.CreateBackup(ctx, req, settings.GRPC...)
- return err
- }, opts...)
- if err != nil {
- return nil, err
- }
- return &CreateBackupOperation{
- lro: longrunning.InternalNewOperation(*c.LROClient, resp),
- }, nil
-}
-
-func (c *databaseAdminGRPCClient) CopyBackup(ctx context.Context, req *databasepb.CopyBackupRequest, opts ...gax.CallOption) (*CopyBackupOperation, error) {
- hds := []string{"x-goog-request-params", fmt.Sprintf("%s=%v", "parent", url.QueryEscape(req.GetParent()))}
-
- hds = append(c.xGoogHeaders, hds...)
- ctx = gax.InsertMetadataIntoOutgoingContext(ctx, hds...)
- opts = append((*c.CallOptions).CopyBackup[0:len((*c.CallOptions).CopyBackup):len((*c.CallOptions).CopyBackup)], opts...)
- var resp *longrunningpb.Operation
- err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
- var err error
- resp, err = c.databaseAdminClient.CopyBackup(ctx, req, settings.GRPC...)
- return err
- }, opts...)
- if err != nil {
- return nil, err
- }
- return &CopyBackupOperation{
- lro: longrunning.InternalNewOperation(*c.LROClient, resp),
- }, nil
-}
-
-func (c *databaseAdminGRPCClient) GetBackup(ctx context.Context, req *databasepb.GetBackupRequest, opts ...gax.CallOption) (*databasepb.Backup, error) {
- hds := []string{"x-goog-request-params", fmt.Sprintf("%s=%v", "name", url.QueryEscape(req.GetName()))}
-
- hds = append(c.xGoogHeaders, hds...)
- ctx = gax.InsertMetadataIntoOutgoingContext(ctx, hds...)
- opts = append((*c.CallOptions).GetBackup[0:len((*c.CallOptions).GetBackup):len((*c.CallOptions).GetBackup)], opts...)
- var resp *databasepb.Backup
- err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
- var err error
- resp, err = c.databaseAdminClient.GetBackup(ctx, req, settings.GRPC...)
- return err
- }, opts...)
- if err != nil {
- return nil, err
- }
- return resp, nil
-}
-
-func (c *databaseAdminGRPCClient) UpdateBackup(ctx context.Context, req *databasepb.UpdateBackupRequest, opts ...gax.CallOption) (*databasepb.Backup, error) {
- hds := []string{"x-goog-request-params", fmt.Sprintf("%s=%v", "backup.name", url.QueryEscape(req.GetBackup().GetName()))}
-
- hds = append(c.xGoogHeaders, hds...)
- ctx = gax.InsertMetadataIntoOutgoingContext(ctx, hds...)
- opts = append((*c.CallOptions).UpdateBackup[0:len((*c.CallOptions).UpdateBackup):len((*c.CallOptions).UpdateBackup)], opts...)
- var resp *databasepb.Backup
- err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
- var err error
- resp, err = c.databaseAdminClient.UpdateBackup(ctx, req, settings.GRPC...)
- return err
- }, opts...)
- if err != nil {
- return nil, err
- }
- return resp, nil
-}
-
-func (c *databaseAdminGRPCClient) DeleteBackup(ctx context.Context, req *databasepb.DeleteBackupRequest, opts ...gax.CallOption) error {
- hds := []string{"x-goog-request-params", fmt.Sprintf("%s=%v", "name", url.QueryEscape(req.GetName()))}
-
- hds = append(c.xGoogHeaders, hds...)
- ctx = gax.InsertMetadataIntoOutgoingContext(ctx, hds...)
- opts = append((*c.CallOptions).DeleteBackup[0:len((*c.CallOptions).DeleteBackup):len((*c.CallOptions).DeleteBackup)], opts...)
- err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
- var err error
- _, err = c.databaseAdminClient.DeleteBackup(ctx, req, settings.GRPC...)
- return err
- }, opts...)
- return err
-}
-
-func (c *databaseAdminGRPCClient) ListBackups(ctx context.Context, req *databasepb.ListBackupsRequest, opts ...gax.CallOption) *BackupIterator {
- hds := []string{"x-goog-request-params", fmt.Sprintf("%s=%v", "parent", url.QueryEscape(req.GetParent()))}
-
- hds = append(c.xGoogHeaders, hds...)
- ctx = gax.InsertMetadataIntoOutgoingContext(ctx, hds...)
- opts = append((*c.CallOptions).ListBackups[0:len((*c.CallOptions).ListBackups):len((*c.CallOptions).ListBackups)], opts...)
- it := &BackupIterator{}
- req = proto.Clone(req).(*databasepb.ListBackupsRequest)
- it.InternalFetch = func(pageSize int, pageToken string) ([]*databasepb.Backup, string, error) {
- resp := &databasepb.ListBackupsResponse{}
- if pageToken != "" {
- req.PageToken = pageToken
- }
- if pageSize > math.MaxInt32 {
- req.PageSize = math.MaxInt32
- } else if pageSize != 0 {
- req.PageSize = int32(pageSize)
- }
- err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
- var err error
- resp, err = c.databaseAdminClient.ListBackups(ctx, req, settings.GRPC...)
- return err
- }, opts...)
- if err != nil {
- return nil, "", err
- }
-
- it.Response = resp
- return resp.GetBackups(), resp.GetNextPageToken(), nil
- }
- fetch := func(pageSize int, pageToken string) (string, error) {
- items, nextPageToken, err := it.InternalFetch(pageSize, pageToken)
- if err != nil {
- return "", err
- }
- it.items = append(it.items, items...)
- return nextPageToken, nil
- }
-
- it.pageInfo, it.nextFunc = iterator.NewPageInfo(fetch, it.bufLen, it.takeBuf)
- it.pageInfo.MaxSize = int(req.GetPageSize())
- it.pageInfo.Token = req.GetPageToken()
-
- return it
-}
-
-func (c *databaseAdminGRPCClient) RestoreDatabase(ctx context.Context, req *databasepb.RestoreDatabaseRequest, opts ...gax.CallOption) (*RestoreDatabaseOperation, error) {
- hds := []string{"x-goog-request-params", fmt.Sprintf("%s=%v", "parent", url.QueryEscape(req.GetParent()))}
-
- hds = append(c.xGoogHeaders, hds...)
- ctx = gax.InsertMetadataIntoOutgoingContext(ctx, hds...)
- opts = append((*c.CallOptions).RestoreDatabase[0:len((*c.CallOptions).RestoreDatabase):len((*c.CallOptions).RestoreDatabase)], opts...)
- var resp *longrunningpb.Operation
- err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
- var err error
- resp, err = c.databaseAdminClient.RestoreDatabase(ctx, req, settings.GRPC...)
- return err
- }, opts...)
- if err != nil {
- return nil, err
- }
- return &RestoreDatabaseOperation{
- lro: longrunning.InternalNewOperation(*c.LROClient, resp),
- }, nil
-}
-
-func (c *databaseAdminGRPCClient) ListDatabaseOperations(ctx context.Context, req *databasepb.ListDatabaseOperationsRequest, opts ...gax.CallOption) *OperationIterator {
- hds := []string{"x-goog-request-params", fmt.Sprintf("%s=%v", "parent", url.QueryEscape(req.GetParent()))}
-
- hds = append(c.xGoogHeaders, hds...)
- ctx = gax.InsertMetadataIntoOutgoingContext(ctx, hds...)
- opts = append((*c.CallOptions).ListDatabaseOperations[0:len((*c.CallOptions).ListDatabaseOperations):len((*c.CallOptions).ListDatabaseOperations)], opts...)
- it := &OperationIterator{}
- req = proto.Clone(req).(*databasepb.ListDatabaseOperationsRequest)
- it.InternalFetch = func(pageSize int, pageToken string) ([]*longrunningpb.Operation, string, error) {
- resp := &databasepb.ListDatabaseOperationsResponse{}
- if pageToken != "" {
- req.PageToken = pageToken
- }
- if pageSize > math.MaxInt32 {
- req.PageSize = math.MaxInt32
- } else if pageSize != 0 {
- req.PageSize = int32(pageSize)
- }
- err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
- var err error
- resp, err = c.databaseAdminClient.ListDatabaseOperations(ctx, req, settings.GRPC...)
- return err
- }, opts...)
- if err != nil {
- return nil, "", err
- }
-
- it.Response = resp
- return resp.GetOperations(), resp.GetNextPageToken(), nil
- }
- fetch := func(pageSize int, pageToken string) (string, error) {
- items, nextPageToken, err := it.InternalFetch(pageSize, pageToken)
- if err != nil {
- return "", err
- }
- it.items = append(it.items, items...)
- return nextPageToken, nil
- }
-
- it.pageInfo, it.nextFunc = iterator.NewPageInfo(fetch, it.bufLen, it.takeBuf)
- it.pageInfo.MaxSize = int(req.GetPageSize())
- it.pageInfo.Token = req.GetPageToken()
-
- return it
-}
-
-func (c *databaseAdminGRPCClient) ListBackupOperations(ctx context.Context, req *databasepb.ListBackupOperationsRequest, opts ...gax.CallOption) *OperationIterator {
- hds := []string{"x-goog-request-params", fmt.Sprintf("%s=%v", "parent", url.QueryEscape(req.GetParent()))}
-
- hds = append(c.xGoogHeaders, hds...)
- ctx = gax.InsertMetadataIntoOutgoingContext(ctx, hds...)
- opts = append((*c.CallOptions).ListBackupOperations[0:len((*c.CallOptions).ListBackupOperations):len((*c.CallOptions).ListBackupOperations)], opts...)
- it := &OperationIterator{}
- req = proto.Clone(req).(*databasepb.ListBackupOperationsRequest)
- it.InternalFetch = func(pageSize int, pageToken string) ([]*longrunningpb.Operation, string, error) {
- resp := &databasepb.ListBackupOperationsResponse{}
- if pageToken != "" {
- req.PageToken = pageToken
- }
- if pageSize > math.MaxInt32 {
- req.PageSize = math.MaxInt32
- } else if pageSize != 0 {
- req.PageSize = int32(pageSize)
- }
- err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
- var err error
- resp, err = c.databaseAdminClient.ListBackupOperations(ctx, req, settings.GRPC...)
- return err
- }, opts...)
- if err != nil {
- return nil, "", err
- }
-
- it.Response = resp
- return resp.GetOperations(), resp.GetNextPageToken(), nil
- }
- fetch := func(pageSize int, pageToken string) (string, error) {
- items, nextPageToken, err := it.InternalFetch(pageSize, pageToken)
- if err != nil {
- return "", err
- }
- it.items = append(it.items, items...)
- return nextPageToken, nil
- }
-
- it.pageInfo, it.nextFunc = iterator.NewPageInfo(fetch, it.bufLen, it.takeBuf)
- it.pageInfo.MaxSize = int(req.GetPageSize())
- it.pageInfo.Token = req.GetPageToken()
-
- return it
-}
-
-func (c *databaseAdminGRPCClient) ListDatabaseRoles(ctx context.Context, req *databasepb.ListDatabaseRolesRequest, opts ...gax.CallOption) *DatabaseRoleIterator {
- hds := []string{"x-goog-request-params", fmt.Sprintf("%s=%v", "parent", url.QueryEscape(req.GetParent()))}
-
- hds = append(c.xGoogHeaders, hds...)
- ctx = gax.InsertMetadataIntoOutgoingContext(ctx, hds...)
- opts = append((*c.CallOptions).ListDatabaseRoles[0:len((*c.CallOptions).ListDatabaseRoles):len((*c.CallOptions).ListDatabaseRoles)], opts...)
- it := &DatabaseRoleIterator{}
- req = proto.Clone(req).(*databasepb.ListDatabaseRolesRequest)
- it.InternalFetch = func(pageSize int, pageToken string) ([]*databasepb.DatabaseRole, string, error) {
- resp := &databasepb.ListDatabaseRolesResponse{}
- if pageToken != "" {
- req.PageToken = pageToken
- }
- if pageSize > math.MaxInt32 {
- req.PageSize = math.MaxInt32
- } else if pageSize != 0 {
- req.PageSize = int32(pageSize)
- }
- err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
- var err error
- resp, err = c.databaseAdminClient.ListDatabaseRoles(ctx, req, settings.GRPC...)
- return err
- }, opts...)
- if err != nil {
- return nil, "", err
- }
-
- it.Response = resp
- return resp.GetDatabaseRoles(), resp.GetNextPageToken(), nil
- }
- fetch := func(pageSize int, pageToken string) (string, error) {
- items, nextPageToken, err := it.InternalFetch(pageSize, pageToken)
- if err != nil {
- return "", err
- }
- it.items = append(it.items, items...)
- return nextPageToken, nil
- }
-
- it.pageInfo, it.nextFunc = iterator.NewPageInfo(fetch, it.bufLen, it.takeBuf)
- it.pageInfo.MaxSize = int(req.GetPageSize())
- it.pageInfo.Token = req.GetPageToken()
-
- return it
-}
-
-func (c *databaseAdminGRPCClient) CreateBackupSchedule(ctx context.Context, req *databasepb.CreateBackupScheduleRequest, opts ...gax.CallOption) (*databasepb.BackupSchedule, error) {
- hds := []string{"x-goog-request-params", fmt.Sprintf("%s=%v", "parent", url.QueryEscape(req.GetParent()))}
-
- hds = append(c.xGoogHeaders, hds...)
- ctx = gax.InsertMetadataIntoOutgoingContext(ctx, hds...)
- opts = append((*c.CallOptions).CreateBackupSchedule[0:len((*c.CallOptions).CreateBackupSchedule):len((*c.CallOptions).CreateBackupSchedule)], opts...)
- var resp *databasepb.BackupSchedule
- err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
- var err error
- resp, err = c.databaseAdminClient.CreateBackupSchedule(ctx, req, settings.GRPC...)
- return err
- }, opts...)
- if err != nil {
- return nil, err
- }
- return resp, nil
-}
-
-func (c *databaseAdminGRPCClient) GetBackupSchedule(ctx context.Context, req *databasepb.GetBackupScheduleRequest, opts ...gax.CallOption) (*databasepb.BackupSchedule, error) {
- hds := []string{"x-goog-request-params", fmt.Sprintf("%s=%v", "name", url.QueryEscape(req.GetName()))}
-
- hds = append(c.xGoogHeaders, hds...)
- ctx = gax.InsertMetadataIntoOutgoingContext(ctx, hds...)
- opts = append((*c.CallOptions).GetBackupSchedule[0:len((*c.CallOptions).GetBackupSchedule):len((*c.CallOptions).GetBackupSchedule)], opts...)
- var resp *databasepb.BackupSchedule
- err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
- var err error
- resp, err = c.databaseAdminClient.GetBackupSchedule(ctx, req, settings.GRPC...)
- return err
- }, opts...)
- if err != nil {
- return nil, err
- }
- return resp, nil
-}
-
-func (c *databaseAdminGRPCClient) UpdateBackupSchedule(ctx context.Context, req *databasepb.UpdateBackupScheduleRequest, opts ...gax.CallOption) (*databasepb.BackupSchedule, error) {
- hds := []string{"x-goog-request-params", fmt.Sprintf("%s=%v", "backup_schedule.name", url.QueryEscape(req.GetBackupSchedule().GetName()))}
-
- hds = append(c.xGoogHeaders, hds...)
- ctx = gax.InsertMetadataIntoOutgoingContext(ctx, hds...)
- opts = append((*c.CallOptions).UpdateBackupSchedule[0:len((*c.CallOptions).UpdateBackupSchedule):len((*c.CallOptions).UpdateBackupSchedule)], opts...)
- var resp *databasepb.BackupSchedule
- err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
- var err error
- resp, err = c.databaseAdminClient.UpdateBackupSchedule(ctx, req, settings.GRPC...)
- return err
- }, opts...)
- if err != nil {
- return nil, err
- }
- return resp, nil
-}
-
-func (c *databaseAdminGRPCClient) DeleteBackupSchedule(ctx context.Context, req *databasepb.DeleteBackupScheduleRequest, opts ...gax.CallOption) error {
- hds := []string{"x-goog-request-params", fmt.Sprintf("%s=%v", "name", url.QueryEscape(req.GetName()))}
-
- hds = append(c.xGoogHeaders, hds...)
- ctx = gax.InsertMetadataIntoOutgoingContext(ctx, hds...)
- opts = append((*c.CallOptions).DeleteBackupSchedule[0:len((*c.CallOptions).DeleteBackupSchedule):len((*c.CallOptions).DeleteBackupSchedule)], opts...)
- err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
- var err error
- _, err = c.databaseAdminClient.DeleteBackupSchedule(ctx, req, settings.GRPC...)
- return err
- }, opts...)
- return err
-}
-
-func (c *databaseAdminGRPCClient) ListBackupSchedules(ctx context.Context, req *databasepb.ListBackupSchedulesRequest, opts ...gax.CallOption) *BackupScheduleIterator {
- hds := []string{"x-goog-request-params", fmt.Sprintf("%s=%v", "parent", url.QueryEscape(req.GetParent()))}
-
- hds = append(c.xGoogHeaders, hds...)
- ctx = gax.InsertMetadataIntoOutgoingContext(ctx, hds...)
- opts = append((*c.CallOptions).ListBackupSchedules[0:len((*c.CallOptions).ListBackupSchedules):len((*c.CallOptions).ListBackupSchedules)], opts...)
- it := &BackupScheduleIterator{}
- req = proto.Clone(req).(*databasepb.ListBackupSchedulesRequest)
- it.InternalFetch = func(pageSize int, pageToken string) ([]*databasepb.BackupSchedule, string, error) {
- resp := &databasepb.ListBackupSchedulesResponse{}
- if pageToken != "" {
- req.PageToken = pageToken
- }
- if pageSize > math.MaxInt32 {
- req.PageSize = math.MaxInt32
- } else if pageSize != 0 {
- req.PageSize = int32(pageSize)
- }
- err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
- var err error
- resp, err = c.databaseAdminClient.ListBackupSchedules(ctx, req, settings.GRPC...)
- return err
- }, opts...)
- if err != nil {
- return nil, "", err
- }
-
- it.Response = resp
- return resp.GetBackupSchedules(), resp.GetNextPageToken(), nil
- }
- fetch := func(pageSize int, pageToken string) (string, error) {
- items, nextPageToken, err := it.InternalFetch(pageSize, pageToken)
- if err != nil {
- return "", err
- }
- it.items = append(it.items, items...)
- return nextPageToken, nil
- }
-
- it.pageInfo, it.nextFunc = iterator.NewPageInfo(fetch, it.bufLen, it.takeBuf)
- it.pageInfo.MaxSize = int(req.GetPageSize())
- it.pageInfo.Token = req.GetPageToken()
-
- return it
-}
-
-func (c *databaseAdminGRPCClient) CancelOperation(ctx context.Context, req *longrunningpb.CancelOperationRequest, opts ...gax.CallOption) error {
- hds := []string{"x-goog-request-params", fmt.Sprintf("%s=%v", "name", url.QueryEscape(req.GetName()))}
-
- hds = append(c.xGoogHeaders, hds...)
- ctx = gax.InsertMetadataIntoOutgoingContext(ctx, hds...)
- opts = append((*c.CallOptions).CancelOperation[0:len((*c.CallOptions).CancelOperation):len((*c.CallOptions).CancelOperation)], opts...)
- err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
- var err error
- _, err = c.operationsClient.CancelOperation(ctx, req, settings.GRPC...)
- return err
- }, opts...)
- return err
-}
-
-func (c *databaseAdminGRPCClient) DeleteOperation(ctx context.Context, req *longrunningpb.DeleteOperationRequest, opts ...gax.CallOption) error {
- hds := []string{"x-goog-request-params", fmt.Sprintf("%s=%v", "name", url.QueryEscape(req.GetName()))}
-
- hds = append(c.xGoogHeaders, hds...)
- ctx = gax.InsertMetadataIntoOutgoingContext(ctx, hds...)
- opts = append((*c.CallOptions).DeleteOperation[0:len((*c.CallOptions).DeleteOperation):len((*c.CallOptions).DeleteOperation)], opts...)
- err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
- var err error
- _, err = c.operationsClient.DeleteOperation(ctx, req, settings.GRPC...)
- return err
- }, opts...)
- return err
-}
-
-func (c *databaseAdminGRPCClient) GetOperation(ctx context.Context, req *longrunningpb.GetOperationRequest, opts ...gax.CallOption) (*longrunningpb.Operation, error) {
- hds := []string{"x-goog-request-params", fmt.Sprintf("%s=%v", "name", url.QueryEscape(req.GetName()))}
-
- hds = append(c.xGoogHeaders, hds...)
- ctx = gax.InsertMetadataIntoOutgoingContext(ctx, hds...)
- opts = append((*c.CallOptions).GetOperation[0:len((*c.CallOptions).GetOperation):len((*c.CallOptions).GetOperation)], opts...)
- var resp *longrunningpb.Operation
- err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
- var err error
- resp, err = c.operationsClient.GetOperation(ctx, req, settings.GRPC...)
- return err
- }, opts...)
- if err != nil {
- return nil, err
- }
- return resp, nil
-}
-
-func (c *databaseAdminGRPCClient) ListOperations(ctx context.Context, req *longrunningpb.ListOperationsRequest, opts ...gax.CallOption) *OperationIterator {
- hds := []string{"x-goog-request-params", fmt.Sprintf("%s=%v", "name", url.QueryEscape(req.GetName()))}
-
- hds = append(c.xGoogHeaders, hds...)
- ctx = gax.InsertMetadataIntoOutgoingContext(ctx, hds...)
- opts = append((*c.CallOptions).ListOperations[0:len((*c.CallOptions).ListOperations):len((*c.CallOptions).ListOperations)], opts...)
- it := &OperationIterator{}
- req = proto.Clone(req).(*longrunningpb.ListOperationsRequest)
- it.InternalFetch = func(pageSize int, pageToken string) ([]*longrunningpb.Operation, string, error) {
- resp := &longrunningpb.ListOperationsResponse{}
- if pageToken != "" {
- req.PageToken = pageToken
- }
- if pageSize > math.MaxInt32 {
- req.PageSize = math.MaxInt32
- } else if pageSize != 0 {
- req.PageSize = int32(pageSize)
- }
- err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
- var err error
- resp, err = c.operationsClient.ListOperations(ctx, req, settings.GRPC...)
- return err
- }, opts...)
- if err != nil {
- return nil, "", err
- }
-
- it.Response = resp
- return resp.GetOperations(), resp.GetNextPageToken(), nil
- }
- fetch := func(pageSize int, pageToken string) (string, error) {
- items, nextPageToken, err := it.InternalFetch(pageSize, pageToken)
- if err != nil {
- return "", err
- }
- it.items = append(it.items, items...)
- return nextPageToken, nil
- }
-
- it.pageInfo, it.nextFunc = iterator.NewPageInfo(fetch, it.bufLen, it.takeBuf)
- it.pageInfo.MaxSize = int(req.GetPageSize())
- it.pageInfo.Token = req.GetPageToken()
-
- return it
-}
-
-// ListDatabases lists Cloud Spanner databases.
-func (c *databaseAdminRESTClient) ListDatabases(ctx context.Context, req *databasepb.ListDatabasesRequest, opts ...gax.CallOption) *DatabaseIterator {
- it := &DatabaseIterator{}
- req = proto.Clone(req).(*databasepb.ListDatabasesRequest)
- unm := protojson.UnmarshalOptions{AllowPartial: true, DiscardUnknown: true}
- it.InternalFetch = func(pageSize int, pageToken string) ([]*databasepb.Database, string, error) {
- resp := &databasepb.ListDatabasesResponse{}
- if pageToken != "" {
- req.PageToken = pageToken
- }
- if pageSize > math.MaxInt32 {
- req.PageSize = math.MaxInt32
- } else if pageSize != 0 {
- req.PageSize = int32(pageSize)
- }
- baseUrl, err := url.Parse(c.endpoint)
- if err != nil {
- return nil, "", err
- }
- baseUrl.Path += fmt.Sprintf("/v1/%v/databases", req.GetParent())
-
- params := url.Values{}
- params.Add("$alt", "json;enum-encoding=int")
- if req.GetPageSize() != 0 {
- params.Add("pageSize", fmt.Sprintf("%v", req.GetPageSize()))
- }
- if req.GetPageToken() != "" {
- params.Add("pageToken", fmt.Sprintf("%v", req.GetPageToken()))
- }
-
- baseUrl.RawQuery = params.Encode()
-
- // Build HTTP headers from client and context metadata.
- hds := append(c.xGoogHeaders, "Content-Type", "application/json")
- headers := gax.BuildHeaders(ctx, hds...)
- e := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
- if settings.Path != "" {
- baseUrl.Path = settings.Path
- }
- httpReq, err := http.NewRequest("GET", baseUrl.String(), nil)
- if err != nil {
- return err
- }
- httpReq.Header = headers
-
- httpRsp, err := c.httpClient.Do(httpReq)
- if err != nil {
- return err
- }
- defer httpRsp.Body.Close()
-
- if err = googleapi.CheckResponse(httpRsp); err != nil {
- return err
- }
-
- buf, err := io.ReadAll(httpRsp.Body)
- if err != nil {
- return err
- }
-
- if err := unm.Unmarshal(buf, resp); err != nil {
- return err
- }
-
- return nil
- }, opts...)
- if e != nil {
- return nil, "", e
- }
- it.Response = resp
- return resp.GetDatabases(), resp.GetNextPageToken(), nil
- }
-
- fetch := func(pageSize int, pageToken string) (string, error) {
- items, nextPageToken, err := it.InternalFetch(pageSize, pageToken)
- if err != nil {
- return "", err
- }
- it.items = append(it.items, items...)
- return nextPageToken, nil
- }
-
- it.pageInfo, it.nextFunc = iterator.NewPageInfo(fetch, it.bufLen, it.takeBuf)
- it.pageInfo.MaxSize = int(req.GetPageSize())
- it.pageInfo.Token = req.GetPageToken()
-
- return it
-}
-
-// CreateDatabase creates a new Cloud Spanner database and starts to prepare it for serving.
-// The returned [long-running operation][google.longrunning.Operation] will
-// have a name of the format <database_name>/operations/<operation_id> and
-// can be used to track preparation of the database. The
-// metadata field type is
-// CreateDatabaseMetadata.
-// The response field type is
-// Database, if successful.
-func (c *databaseAdminRESTClient) CreateDatabase(ctx context.Context, req *databasepb.CreateDatabaseRequest, opts ...gax.CallOption) (*CreateDatabaseOperation, error) {
- m := protojson.MarshalOptions{AllowPartial: true, UseEnumNumbers: true}
- jsonReq, err := m.Marshal(req)
- if err != nil {
- return nil, err
- }
-
- baseUrl, err := url.Parse(c.endpoint)
- if err != nil {
- return nil, err
- }
- baseUrl.Path += fmt.Sprintf("/v1/%v/databases", req.GetParent())
-
- params := url.Values{}
- params.Add("$alt", "json;enum-encoding=int")
-
- baseUrl.RawQuery = params.Encode()
-
- // Build HTTP headers from client and context metadata.
- hds := []string{"x-goog-request-params", fmt.Sprintf("%s=%v", "parent", url.QueryEscape(req.GetParent()))}
-
- hds = append(c.xGoogHeaders, hds...)
- hds = append(hds, "Content-Type", "application/json")
- headers := gax.BuildHeaders(ctx, hds...)
- unm := protojson.UnmarshalOptions{AllowPartial: true, DiscardUnknown: true}
- resp := &longrunningpb.Operation{}
- e := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
- if settings.Path != "" {
- baseUrl.Path = settings.Path
- }
- httpReq, err := http.NewRequest("POST", baseUrl.String(), bytes.NewReader(jsonReq))
- if err != nil {
- return err
- }
- httpReq = httpReq.WithContext(ctx)
- httpReq.Header = headers
-
- httpRsp, err := c.httpClient.Do(httpReq)
- if err != nil {
- return err
- }
- defer httpRsp.Body.Close()
-
- if err = googleapi.CheckResponse(httpRsp); err != nil {
- return err
- }
-
- buf, err := io.ReadAll(httpRsp.Body)
- if err != nil {
- return err
- }
-
- if err := unm.Unmarshal(buf, resp); err != nil {
- return err
- }
-
- return nil
- }, opts...)
- if e != nil {
- return nil, e
- }
-
- override := fmt.Sprintf("/v1/%s", resp.GetName())
- return &CreateDatabaseOperation{
- lro: longrunning.InternalNewOperation(*c.LROClient, resp),
- pollPath: override,
- }, nil
-}
-
-// GetDatabase gets the state of a Cloud Spanner database.
-func (c *databaseAdminRESTClient) GetDatabase(ctx context.Context, req *databasepb.GetDatabaseRequest, opts ...gax.CallOption) (*databasepb.Database, error) {
- baseUrl, err := url.Parse(c.endpoint)
- if err != nil {
- return nil, err
- }
- baseUrl.Path += fmt.Sprintf("/v1/%v", req.GetName())
-
- params := url.Values{}
- params.Add("$alt", "json;enum-encoding=int")
-
- baseUrl.RawQuery = params.Encode()
-
- // Build HTTP headers from client and context metadata.
- hds := []string{"x-goog-request-params", fmt.Sprintf("%s=%v", "name", url.QueryEscape(req.GetName()))}
-
- hds = append(c.xGoogHeaders, hds...)
- hds = append(hds, "Content-Type", "application/json")
- headers := gax.BuildHeaders(ctx, hds...)
- opts = append((*c.CallOptions).GetDatabase[0:len((*c.CallOptions).GetDatabase):len((*c.CallOptions).GetDatabase)], opts...)
- unm := protojson.UnmarshalOptions{AllowPartial: true, DiscardUnknown: true}
- resp := &databasepb.Database{}
- e := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
- if settings.Path != "" {
- baseUrl.Path = settings.Path
- }
- httpReq, err := http.NewRequest("GET", baseUrl.String(), nil)
- if err != nil {
- return err
- }
- httpReq = httpReq.WithContext(ctx)
- httpReq.Header = headers
-
- httpRsp, err := c.httpClient.Do(httpReq)
- if err != nil {
- return err
- }
- defer httpRsp.Body.Close()
-
- if err = googleapi.CheckResponse(httpRsp); err != nil {
- return err
- }
-
- buf, err := io.ReadAll(httpRsp.Body)
- if err != nil {
- return err
- }
-
- if err := unm.Unmarshal(buf, resp); err != nil {
- return err
- }
-
- return nil
- }, opts...)
- if e != nil {
- return nil, e
- }
- return resp, nil
-}
-
-// UpdateDatabase updates a Cloud Spanner database. The returned
-// [long-running operation][google.longrunning.Operation] can be used to track
-// the progress of updating the database. If the named database does not
-// exist, returns NOT_FOUND.
-//
-// While the operation is pending:
-//
-// The database’s
-// reconciling
-// field is set to true.
-//
-// Cancelling the operation is best-effort. If the cancellation succeeds,
-// the operation metadata’s
-// cancel_time
-// is set, the updates are reverted, and the operation terminates with a
-// CANCELLED status.
-//
-// New UpdateDatabase requests will return a FAILED_PRECONDITION error
-// until the pending operation is done (returns successfully or with
-// error).
-//
-// Reading the database via the API continues to give the pre-request
-// values.
-//
-// Upon completion of the returned operation:
-//
-// The new values are in effect and readable via the API.
-//
-// The database’s
-// reconciling
-// field becomes false.
-//
-// The returned [long-running operation][google.longrunning.Operation] will
-// have a name of the format
-// projects/<project>/instances/<instance>/databases/<database>/operations/<operation_id>
-// and can be used to track the database modification. The
-// metadata field type is
-// UpdateDatabaseMetadata.
-// The response field type is
-// Database, if successful.
-func (c *databaseAdminRESTClient) UpdateDatabase(ctx context.Context, req *databasepb.UpdateDatabaseRequest, opts ...gax.CallOption) (*UpdateDatabaseOperation, error) {
- m := protojson.MarshalOptions{AllowPartial: true, UseEnumNumbers: true}
- body := req.GetDatabase()
- jsonReq, err := m.Marshal(body)
- if err != nil {
- return nil, err
- }
-
- baseUrl, err := url.Parse(c.endpoint)
- if err != nil {
- return nil, err
- }
- baseUrl.Path += fmt.Sprintf("/v1/%v", req.GetDatabase().GetName())
-
- params := url.Values{}
- params.Add("$alt", "json;enum-encoding=int")
- if req.GetUpdateMask() != nil {
- updateMask, err := protojson.Marshal(req.GetUpdateMask())
- if err != nil {
- return nil, err
- }
- params.Add("updateMask", string(updateMask[1:len(updateMask)-1]))
- }
-
- baseUrl.RawQuery = params.Encode()
-
- // Build HTTP headers from client and context metadata.
- hds := []string{"x-goog-request-params", fmt.Sprintf("%s=%v", "database.name", url.QueryEscape(req.GetDatabase().GetName()))}
-
- hds = append(c.xGoogHeaders, hds...)
- hds = append(hds, "Content-Type", "application/json")
- headers := gax.BuildHeaders(ctx, hds...)
- unm := protojson.UnmarshalOptions{AllowPartial: true, DiscardUnknown: true}
- resp := &longrunningpb.Operation{}
- e := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
- if settings.Path != "" {
- baseUrl.Path = settings.Path
- }
- httpReq, err := http.NewRequest("PATCH", baseUrl.String(), bytes.NewReader(jsonReq))
- if err != nil {
- return err
- }
- httpReq = httpReq.WithContext(ctx)
- httpReq.Header = headers
-
- httpRsp, err := c.httpClient.Do(httpReq)
- if err != nil {
- return err
- }
- defer httpRsp.Body.Close()
-
- if err = googleapi.CheckResponse(httpRsp); err != nil {
- return err
- }
-
- buf, err := io.ReadAll(httpRsp.Body)
- if err != nil {
- return err
- }
-
- if err := unm.Unmarshal(buf, resp); err != nil {
- return err
- }
-
- return nil
- }, opts...)
- if e != nil {
- return nil, e
- }
-
- override := fmt.Sprintf("/v1/%s", resp.GetName())
- return &UpdateDatabaseOperation{
- lro: longrunning.InternalNewOperation(*c.LROClient, resp),
- pollPath: override,
- }, nil
-}
-
-// UpdateDatabaseDdl updates the schema of a Cloud Spanner database by
-// creating/altering/dropping tables, columns, indexes, etc. The returned
-// [long-running operation][google.longrunning.Operation] will have a name of
-// the format <database_name>/operations/<operation_id> and can be used to
-// track execution of the schema change(s). The
-// metadata field type is
-// UpdateDatabaseDdlMetadata.
-// The operation has no response.
-func (c *databaseAdminRESTClient) UpdateDatabaseDdl(ctx context.Context, req *databasepb.UpdateDatabaseDdlRequest, opts ...gax.CallOption) (*UpdateDatabaseDdlOperation, error) {
- m := protojson.MarshalOptions{AllowPartial: true, UseEnumNumbers: true}
- jsonReq, err := m.Marshal(req)
- if err != nil {
- return nil, err
- }
-
- baseUrl, err := url.Parse(c.endpoint)
- if err != nil {
- return nil, err
- }
- baseUrl.Path += fmt.Sprintf("/v1/%v/ddl", req.GetDatabase())
-
- params := url.Values{}
- params.Add("$alt", "json;enum-encoding=int")
-
- baseUrl.RawQuery = params.Encode()
-
- // Build HTTP headers from client and context metadata.
- hds := []string{"x-goog-request-params", fmt.Sprintf("%s=%v", "database", url.QueryEscape(req.GetDatabase()))}
-
- hds = append(c.xGoogHeaders, hds...)
- hds = append(hds, "Content-Type", "application/json")
- headers := gax.BuildHeaders(ctx, hds...)
- unm := protojson.UnmarshalOptions{AllowPartial: true, DiscardUnknown: true}
- resp := &longrunningpb.Operation{}
- e := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
- if settings.Path != "" {
- baseUrl.Path = settings.Path
- }
- httpReq, err := http.NewRequest("PATCH", baseUrl.String(), bytes.NewReader(jsonReq))
- if err != nil {
- return err
- }
- httpReq = httpReq.WithContext(ctx)
- httpReq.Header = headers
-
- httpRsp, err := c.httpClient.Do(httpReq)
- if err != nil {
- return err
- }
- defer httpRsp.Body.Close()
-
- if err = googleapi.CheckResponse(httpRsp); err != nil {
- return err
- }
-
- buf, err := io.ReadAll(httpRsp.Body)
- if err != nil {
- return err
- }
-
- if err := unm.Unmarshal(buf, resp); err != nil {
- return err
- }
-
- return nil
- }, opts...)
- if e != nil {
- return nil, e
- }
-
- override := fmt.Sprintf("/v1/%s", resp.GetName())
- return &UpdateDatabaseDdlOperation{
- lro: longrunning.InternalNewOperation(*c.LROClient, resp),
- pollPath: override,
- }, nil
-}
-
-// DropDatabase drops (aka deletes) a Cloud Spanner database.
-// Completed backups for the database will be retained according to their
-// expire_time.
-// Note: Cloud Spanner might continue to accept requests for a few seconds
-// after the database has been deleted.
-func (c *databaseAdminRESTClient) DropDatabase(ctx context.Context, req *databasepb.DropDatabaseRequest, opts ...gax.CallOption) error {
- baseUrl, err := url.Parse(c.endpoint)
- if err != nil {
- return err
- }
- baseUrl.Path += fmt.Sprintf("/v1/%v", req.GetDatabase())
-
- params := url.Values{}
- params.Add("$alt", "json;enum-encoding=int")
-
- baseUrl.RawQuery = params.Encode()
-
- // Build HTTP headers from client and context metadata.
- hds := []string{"x-goog-request-params", fmt.Sprintf("%s=%v", "database", url.QueryEscape(req.GetDatabase()))}
-
- hds = append(c.xGoogHeaders, hds...)
- hds = append(hds, "Content-Type", "application/json")
- headers := gax.BuildHeaders(ctx, hds...)
- return gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
- if settings.Path != "" {
- baseUrl.Path = settings.Path
- }
- httpReq, err := http.NewRequest("DELETE", baseUrl.String(), nil)
- if err != nil {
- return err
- }
- httpReq = httpReq.WithContext(ctx)
- httpReq.Header = headers
-
- httpRsp, err := c.httpClient.Do(httpReq)
- if err != nil {
- return err
- }
- defer httpRsp.Body.Close()
-
- // Returns nil if there is no error, otherwise wraps
- // the response code and body into a non-nil error
- return googleapi.CheckResponse(httpRsp)
- }, opts...)
-}
-
-// GetDatabaseDdl returns the schema of a Cloud Spanner database as a list of formatted
-// DDL statements. This method does not show pending schema updates, those may
-// be queried using the Operations API.
-func (c *databaseAdminRESTClient) GetDatabaseDdl(ctx context.Context, req *databasepb.GetDatabaseDdlRequest, opts ...gax.CallOption) (*databasepb.GetDatabaseDdlResponse, error) {
- baseUrl, err := url.Parse(c.endpoint)
- if err != nil {
- return nil, err
- }
- baseUrl.Path += fmt.Sprintf("/v1/%v/ddl", req.GetDatabase())
-
- params := url.Values{}
- params.Add("$alt", "json;enum-encoding=int")
-
- baseUrl.RawQuery = params.Encode()
-
- // Build HTTP headers from client and context metadata.
- hds := []string{"x-goog-request-params", fmt.Sprintf("%s=%v", "database", url.QueryEscape(req.GetDatabase()))}
-
- hds = append(c.xGoogHeaders, hds...)
- hds = append(hds, "Content-Type", "application/json")
- headers := gax.BuildHeaders(ctx, hds...)
- opts = append((*c.CallOptions).GetDatabaseDdl[0:len((*c.CallOptions).GetDatabaseDdl):len((*c.CallOptions).GetDatabaseDdl)], opts...)
- unm := protojson.UnmarshalOptions{AllowPartial: true, DiscardUnknown: true}
- resp := &databasepb.GetDatabaseDdlResponse{}
- e := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
- if settings.Path != "" {
- baseUrl.Path = settings.Path
- }
- httpReq, err := http.NewRequest("GET", baseUrl.String(), nil)
- if err != nil {
- return err
- }
- httpReq = httpReq.WithContext(ctx)
- httpReq.Header = headers
-
- httpRsp, err := c.httpClient.Do(httpReq)
- if err != nil {
- return err
- }
- defer httpRsp.Body.Close()
-
- if err = googleapi.CheckResponse(httpRsp); err != nil {
- return err
- }
-
- buf, err := io.ReadAll(httpRsp.Body)
- if err != nil {
- return err
- }
-
- if err := unm.Unmarshal(buf, resp); err != nil {
- return err
- }
-
- return nil
- }, opts...)
- if e != nil {
- return nil, e
- }
- return resp, nil
-}
-
-// SetIamPolicy sets the access control policy on a database or backup resource.
-// Replaces any existing policy.
-//
-// Authorization requires spanner.databases.setIamPolicy
-// permission on resource.
-// For backups, authorization requires spanner.backups.setIamPolicy
-// permission on resource.
-func (c *databaseAdminRESTClient) SetIamPolicy(ctx context.Context, req *iampb.SetIamPolicyRequest, opts ...gax.CallOption) (*iampb.Policy, error) {
- m := protojson.MarshalOptions{AllowPartial: true, UseEnumNumbers: true}
- jsonReq, err := m.Marshal(req)
- if err != nil {
- return nil, err
- }
-
- baseUrl, err := url.Parse(c.endpoint)
- if err != nil {
- return nil, err
- }
- baseUrl.Path += fmt.Sprintf("/v1/%v:setIamPolicy", req.GetResource())
-
- params := url.Values{}
- params.Add("$alt", "json;enum-encoding=int")
-
- baseUrl.RawQuery = params.Encode()
-
- // Build HTTP headers from client and context metadata.
- hds := []string{"x-goog-request-params", fmt.Sprintf("%s=%v", "resource", url.QueryEscape(req.GetResource()))}
-
- hds = append(c.xGoogHeaders, hds...)
- hds = append(hds, "Content-Type", "application/json")
- headers := gax.BuildHeaders(ctx, hds...)
- opts = append((*c.CallOptions).SetIamPolicy[0:len((*c.CallOptions).SetIamPolicy):len((*c.CallOptions).SetIamPolicy)], opts...)
- unm := protojson.UnmarshalOptions{AllowPartial: true, DiscardUnknown: true}
- resp := &iampb.Policy{}
- e := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
- if settings.Path != "" {
- baseUrl.Path = settings.Path
- }
- httpReq, err := http.NewRequest("POST", baseUrl.String(), bytes.NewReader(jsonReq))
- if err != nil {
- return err
- }
- httpReq = httpReq.WithContext(ctx)
- httpReq.Header = headers
-
- httpRsp, err := c.httpClient.Do(httpReq)
- if err != nil {
- return err
- }
- defer httpRsp.Body.Close()
-
- if err = googleapi.CheckResponse(httpRsp); err != nil {
- return err
- }
-
- buf, err := io.ReadAll(httpRsp.Body)
- if err != nil {
- return err
- }
-
- if err := unm.Unmarshal(buf, resp); err != nil {
- return err
- }
-
- return nil
- }, opts...)
- if e != nil {
- return nil, e
- }
- return resp, nil
-}
-
-// GetIamPolicy gets the access control policy for a database or backup resource.
-// Returns an empty policy if a database or backup exists but does not have a
-// policy set.
-//
-// Authorization requires spanner.databases.getIamPolicy permission on
-// resource.
-// For backups, authorization requires spanner.backups.getIamPolicy
-// permission on resource.
-func (c *databaseAdminRESTClient) GetIamPolicy(ctx context.Context, req *iampb.GetIamPolicyRequest, opts ...gax.CallOption) (*iampb.Policy, error) {
- m := protojson.MarshalOptions{AllowPartial: true, UseEnumNumbers: true}
- jsonReq, err := m.Marshal(req)
- if err != nil {
- return nil, err
- }
-
- baseUrl, err := url.Parse(c.endpoint)
- if err != nil {
- return nil, err
- }
- baseUrl.Path += fmt.Sprintf("/v1/%v:getIamPolicy", req.GetResource())
-
- params := url.Values{}
- params.Add("$alt", "json;enum-encoding=int")
-
- baseUrl.RawQuery = params.Encode()
-
- // Build HTTP headers from client and context metadata.
- hds := []string{"x-goog-request-params", fmt.Sprintf("%s=%v", "resource", url.QueryEscape(req.GetResource()))}
-
- hds = append(c.xGoogHeaders, hds...)
- hds = append(hds, "Content-Type", "application/json")
- headers := gax.BuildHeaders(ctx, hds...)
- opts = append((*c.CallOptions).GetIamPolicy[0:len((*c.CallOptions).GetIamPolicy):len((*c.CallOptions).GetIamPolicy)], opts...)
- unm := protojson.UnmarshalOptions{AllowPartial: true, DiscardUnknown: true}
- resp := &iampb.Policy{}
- e := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
- if settings.Path != "" {
- baseUrl.Path = settings.Path
- }
- httpReq, err := http.NewRequest("POST", baseUrl.String(), bytes.NewReader(jsonReq))
- if err != nil {
- return err
- }
- httpReq = httpReq.WithContext(ctx)
- httpReq.Header = headers
-
- httpRsp, err := c.httpClient.Do(httpReq)
- if err != nil {
- return err
- }
- defer httpRsp.Body.Close()
-
- if err = googleapi.CheckResponse(httpRsp); err != nil {
- return err
- }
-
- buf, err := io.ReadAll(httpRsp.Body)
- if err != nil {
- return err
- }
-
- if err := unm.Unmarshal(buf, resp); err != nil {
- return err
- }
-
- return nil
- }, opts...)
- if e != nil {
- return nil, e
- }
- return resp, nil
-}
-
-// TestIamPermissions returns permissions that the caller has on the specified database or backup
-// resource.
-//
-// Attempting this RPC on a non-existent Cloud Spanner database will
-// result in a NOT_FOUND error if the user has
-// spanner.databases.list permission on the containing Cloud
-// Spanner instance. Otherwise returns an empty set of permissions.
-// Calling this method on a backup that does not exist will
-// result in a NOT_FOUND error if the user has
-// spanner.backups.list permission on the containing instance.
-func (c *databaseAdminRESTClient) TestIamPermissions(ctx context.Context, req *iampb.TestIamPermissionsRequest, opts ...gax.CallOption) (*iampb.TestIamPermissionsResponse, error) {
- m := protojson.MarshalOptions{AllowPartial: true, UseEnumNumbers: true}
- jsonReq, err := m.Marshal(req)
- if err != nil {
- return nil, err
- }
-
- baseUrl, err := url.Parse(c.endpoint)
- if err != nil {
- return nil, err
- }
- baseUrl.Path += fmt.Sprintf("/v1/%v:testIamPermissions", req.GetResource())
-
- params := url.Values{}
- params.Add("$alt", "json;enum-encoding=int")
-
- baseUrl.RawQuery = params.Encode()
-
- // Build HTTP headers from client and context metadata.
- hds := []string{"x-goog-request-params", fmt.Sprintf("%s=%v", "resource", url.QueryEscape(req.GetResource()))}
-
- hds = append(c.xGoogHeaders, hds...)
- hds = append(hds, "Content-Type", "application/json")
- headers := gax.BuildHeaders(ctx, hds...)
- opts = append((*c.CallOptions).TestIamPermissions[0:len((*c.CallOptions).TestIamPermissions):len((*c.CallOptions).TestIamPermissions)], opts...)
- unm := protojson.UnmarshalOptions{AllowPartial: true, DiscardUnknown: true}
- resp := &iampb.TestIamPermissionsResponse{}
- e := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
- if settings.Path != "" {
- baseUrl.Path = settings.Path
- }
- httpReq, err := http.NewRequest("POST", baseUrl.String(), bytes.NewReader(jsonReq))
- if err != nil {
- return err
- }
- httpReq = httpReq.WithContext(ctx)
- httpReq.Header = headers
-
- httpRsp, err := c.httpClient.Do(httpReq)
- if err != nil {
- return err
- }
- defer httpRsp.Body.Close()
-
- if err = googleapi.CheckResponse(httpRsp); err != nil {
- return err
- }
-
- buf, err := io.ReadAll(httpRsp.Body)
- if err != nil {
- return err
- }
-
- if err := unm.Unmarshal(buf, resp); err != nil {
- return err
- }
-
- return nil
- }, opts...)
- if e != nil {
- return nil, e
- }
- return resp, nil
-}
-
-// CreateBackup starts creating a new Cloud Spanner Backup.
-// The returned backup [long-running operation][google.longrunning.Operation]
-// will have a name of the format
-// projects/<project>/instances/<instance>/backups/<backup>/operations/<operation_id>
-// and can be used to track creation of the backup. The
-// metadata field type is
-// CreateBackupMetadata.
-// The response field type is
-// Backup, if successful.
-// Cancelling the returned operation will stop the creation and delete the
-// backup. There can be only one pending backup creation per database. Backup
-// creation of different databases can run concurrently.
-func (c *databaseAdminRESTClient) CreateBackup(ctx context.Context, req *databasepb.CreateBackupRequest, opts ...gax.CallOption) (*CreateBackupOperation, error) {
- m := protojson.MarshalOptions{AllowPartial: true, UseEnumNumbers: true}
- body := req.GetBackup()
- jsonReq, err := m.Marshal(body)
- if err != nil {
- return nil, err
- }
-
- baseUrl, err := url.Parse(c.endpoint)
- if err != nil {
- return nil, err
- }
- baseUrl.Path += fmt.Sprintf("/v1/%v/backups", req.GetParent())
-
- params := url.Values{}
- params.Add("$alt", "json;enum-encoding=int")
- params.Add("backupId", fmt.Sprintf("%v", req.GetBackupId()))
- params.Add("encryptionConfig.encryptionType", fmt.Sprintf("%v", req.GetEncryptionConfig().GetEncryptionType()))
- if req.GetEncryptionConfig().GetKmsKeyName() != "" {
- params.Add("encryptionConfig.kmsKeyName", fmt.Sprintf("%v", req.GetEncryptionConfig().GetKmsKeyName()))
- }
- if items := req.GetEncryptionConfig().GetKmsKeyNames(); len(items) > 0 {
- for _, item := range items {
- params.Add("encryptionConfig.kmsKeyNames", fmt.Sprintf("%v", item))
- }
- }
-
- baseUrl.RawQuery = params.Encode()
-
- // Build HTTP headers from client and context metadata.
- hds := []string{"x-goog-request-params", fmt.Sprintf("%s=%v", "parent", url.QueryEscape(req.GetParent()))}
-
- hds = append(c.xGoogHeaders, hds...)
- hds = append(hds, "Content-Type", "application/json")
- headers := gax.BuildHeaders(ctx, hds...)
- unm := protojson.UnmarshalOptions{AllowPartial: true, DiscardUnknown: true}
- resp := &longrunningpb.Operation{}
- e := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
- if settings.Path != "" {
- baseUrl.Path = settings.Path
- }
- httpReq, err := http.NewRequest("POST", baseUrl.String(), bytes.NewReader(jsonReq))
- if err != nil {
- return err
- }
- httpReq = httpReq.WithContext(ctx)
- httpReq.Header = headers
-
- httpRsp, err := c.httpClient.Do(httpReq)
- if err != nil {
- return err
- }
- defer httpRsp.Body.Close()
-
- if err = googleapi.CheckResponse(httpRsp); err != nil {
- return err
- }
-
- buf, err := io.ReadAll(httpRsp.Body)
- if err != nil {
- return err
- }
-
- if err := unm.Unmarshal(buf, resp); err != nil {
- return err
- }
-
- return nil
- }, opts...)
- if e != nil {
- return nil, e
- }
-
- override := fmt.Sprintf("/v1/%s", resp.GetName())
- return &CreateBackupOperation{
- lro: longrunning.InternalNewOperation(*c.LROClient, resp),
- pollPath: override,
- }, nil
-}
-
-// CopyBackup starts copying a Cloud Spanner Backup.
-// The returned backup [long-running operation][google.longrunning.Operation]
-// will have a name of the format
-// projects/<project>/instances/<instance>/backups/<backup>/operations/<operation_id>
-// and can be used to track copying of the backup. The operation is associated
-// with the destination backup.
-// The metadata field type is
-// CopyBackupMetadata.
-// The response field type is
-// Backup, if successful.
-// Cancelling the returned operation will stop the copying and delete the
-// destination backup. Concurrent CopyBackup requests can run on the same
-// source backup.
-func (c *databaseAdminRESTClient) CopyBackup(ctx context.Context, req *databasepb.CopyBackupRequest, opts ...gax.CallOption) (*CopyBackupOperation, error) {
- m := protojson.MarshalOptions{AllowPartial: true, UseEnumNumbers: true}
- jsonReq, err := m.Marshal(req)
- if err != nil {
- return nil, err
- }
-
- baseUrl, err := url.Parse(c.endpoint)
- if err != nil {
- return nil, err
- }
- baseUrl.Path += fmt.Sprintf("/v1/%v/backups:copy", req.GetParent())
-
- params := url.Values{}
- params.Add("$alt", "json;enum-encoding=int")
-
- baseUrl.RawQuery = params.Encode()
-
- // Build HTTP headers from client and context metadata.
- hds := []string{"x-goog-request-params", fmt.Sprintf("%s=%v", "parent", url.QueryEscape(req.GetParent()))}
-
- hds = append(c.xGoogHeaders, hds...)
- hds = append(hds, "Content-Type", "application/json")
- headers := gax.BuildHeaders(ctx, hds...)
- unm := protojson.UnmarshalOptions{AllowPartial: true, DiscardUnknown: true}
- resp := &longrunningpb.Operation{}
- e := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
- if settings.Path != "" {
- baseUrl.Path = settings.Path
- }
- httpReq, err := http.NewRequest("POST", baseUrl.String(), bytes.NewReader(jsonReq))
- if err != nil {
- return err
- }
- httpReq = httpReq.WithContext(ctx)
- httpReq.Header = headers
-
- httpRsp, err := c.httpClient.Do(httpReq)
- if err != nil {
- return err
- }
- defer httpRsp.Body.Close()
-
- if err = googleapi.CheckResponse(httpRsp); err != nil {
- return err
- }
-
- buf, err := io.ReadAll(httpRsp.Body)
- if err != nil {
- return err
- }
-
- if err := unm.Unmarshal(buf, resp); err != nil {
- return err
- }
-
- return nil
- }, opts...)
- if e != nil {
- return nil, e
- }
-
- override := fmt.Sprintf("/v1/%s", resp.GetName())
- return &CopyBackupOperation{
- lro: longrunning.InternalNewOperation(*c.LROClient, resp),
- pollPath: override,
- }, nil
-}
-
-// GetBackup gets metadata on a pending or completed
-// Backup.
-func (c *databaseAdminRESTClient) GetBackup(ctx context.Context, req *databasepb.GetBackupRequest, opts ...gax.CallOption) (*databasepb.Backup, error) {
- baseUrl, err := url.Parse(c.endpoint)
- if err != nil {
- return nil, err
- }
- baseUrl.Path += fmt.Sprintf("/v1/%v", req.GetName())
-
- params := url.Values{}
- params.Add("$alt", "json;enum-encoding=int")
-
- baseUrl.RawQuery = params.Encode()
-
- // Build HTTP headers from client and context metadata.
- hds := []string{"x-goog-request-params", fmt.Sprintf("%s=%v", "name", url.QueryEscape(req.GetName()))}
-
- hds = append(c.xGoogHeaders, hds...)
- hds = append(hds, "Content-Type", "application/json")
- headers := gax.BuildHeaders(ctx, hds...)
- opts = append((*c.CallOptions).GetBackup[0:len((*c.CallOptions).GetBackup):len((*c.CallOptions).GetBackup)], opts...)
- unm := protojson.UnmarshalOptions{AllowPartial: true, DiscardUnknown: true}
- resp := &databasepb.Backup{}
- e := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
- if settings.Path != "" {
- baseUrl.Path = settings.Path
- }
- httpReq, err := http.NewRequest("GET", baseUrl.String(), nil)
- if err != nil {
- return err
- }
- httpReq = httpReq.WithContext(ctx)
- httpReq.Header = headers
-
- httpRsp, err := c.httpClient.Do(httpReq)
- if err != nil {
- return err
- }
- defer httpRsp.Body.Close()
-
- if err = googleapi.CheckResponse(httpRsp); err != nil {
- return err
- }
-
- buf, err := io.ReadAll(httpRsp.Body)
- if err != nil {
- return err
- }
-
- if err := unm.Unmarshal(buf, resp); err != nil {
- return err
- }
-
- return nil
- }, opts...)
- if e != nil {
- return nil, e
- }
- return resp, nil
-}
-
-// UpdateBackup updates a pending or completed
-// Backup.
-func (c *databaseAdminRESTClient) UpdateBackup(ctx context.Context, req *databasepb.UpdateBackupRequest, opts ...gax.CallOption) (*databasepb.Backup, error) {
- m := protojson.MarshalOptions{AllowPartial: true, UseEnumNumbers: true}
- body := req.GetBackup()
- jsonReq, err := m.Marshal(body)
- if err != nil {
- return nil, err
- }
-
- baseUrl, err := url.Parse(c.endpoint)
- if err != nil {
- return nil, err
- }
- baseUrl.Path += fmt.Sprintf("/v1/%v", req.GetBackup().GetName())
-
- params := url.Values{}
- params.Add("$alt", "json;enum-encoding=int")
- if req.GetUpdateMask() != nil {
- updateMask, err := protojson.Marshal(req.GetUpdateMask())
- if err != nil {
- return nil, err
- }
- params.Add("updateMask", string(updateMask[1:len(updateMask)-1]))
- }
-
- baseUrl.RawQuery = params.Encode()
-
- // Build HTTP headers from client and context metadata.
- hds := []string{"x-goog-request-params", fmt.Sprintf("%s=%v", "backup.name", url.QueryEscape(req.GetBackup().GetName()))}
-
- hds = append(c.xGoogHeaders, hds...)
- hds = append(hds, "Content-Type", "application/json")
- headers := gax.BuildHeaders(ctx, hds...)
- opts = append((*c.CallOptions).UpdateBackup[0:len((*c.CallOptions).UpdateBackup):len((*c.CallOptions).UpdateBackup)], opts...)
- unm := protojson.UnmarshalOptions{AllowPartial: true, DiscardUnknown: true}
- resp := &databasepb.Backup{}
- e := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
- if settings.Path != "" {
- baseUrl.Path = settings.Path
- }
- httpReq, err := http.NewRequest("PATCH", baseUrl.String(), bytes.NewReader(jsonReq))
- if err != nil {
- return err
- }
- httpReq = httpReq.WithContext(ctx)
- httpReq.Header = headers
-
- httpRsp, err := c.httpClient.Do(httpReq)
- if err != nil {
- return err
- }
- defer httpRsp.Body.Close()
-
- if err = googleapi.CheckResponse(httpRsp); err != nil {
- return err
- }
-
- buf, err := io.ReadAll(httpRsp.Body)
- if err != nil {
- return err
- }
-
- if err := unm.Unmarshal(buf, resp); err != nil {
- return err
- }
-
- return nil
- }, opts...)
- if e != nil {
- return nil, e
- }
- return resp, nil
-}
-
-// DeleteBackup deletes a pending or completed
-// Backup.
-func (c *databaseAdminRESTClient) DeleteBackup(ctx context.Context, req *databasepb.DeleteBackupRequest, opts ...gax.CallOption) error {
- baseUrl, err := url.Parse(c.endpoint)
- if err != nil {
- return err
- }
- baseUrl.Path += fmt.Sprintf("/v1/%v", req.GetName())
-
- params := url.Values{}
- params.Add("$alt", "json;enum-encoding=int")
-
- baseUrl.RawQuery = params.Encode()
-
- // Build HTTP headers from client and context metadata.
- hds := []string{"x-goog-request-params", fmt.Sprintf("%s=%v", "name", url.QueryEscape(req.GetName()))}
-
- hds = append(c.xGoogHeaders, hds...)
- hds = append(hds, "Content-Type", "application/json")
- headers := gax.BuildHeaders(ctx, hds...)
- return gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
- if settings.Path != "" {
- baseUrl.Path = settings.Path
- }
- httpReq, err := http.NewRequest("DELETE", baseUrl.String(), nil)
- if err != nil {
- return err
- }
- httpReq = httpReq.WithContext(ctx)
- httpReq.Header = headers
-
- httpRsp, err := c.httpClient.Do(httpReq)
- if err != nil {
- return err
- }
- defer httpRsp.Body.Close()
-
- // Returns nil if there is no error, otherwise wraps
- // the response code and body into a non-nil error
- return googleapi.CheckResponse(httpRsp)
- }, opts...)
-}
-
-// ListBackups lists completed and pending backups.
-// Backups returned are ordered by create_time in descending order,
-// starting from the most recent create_time.
-func (c *databaseAdminRESTClient) ListBackups(ctx context.Context, req *databasepb.ListBackupsRequest, opts ...gax.CallOption) *BackupIterator {
- it := &BackupIterator{}
- req = proto.Clone(req).(*databasepb.ListBackupsRequest)
- unm := protojson.UnmarshalOptions{AllowPartial: true, DiscardUnknown: true}
- it.InternalFetch = func(pageSize int, pageToken string) ([]*databasepb.Backup, string, error) {
- resp := &databasepb.ListBackupsResponse{}
- if pageToken != "" {
- req.PageToken = pageToken
- }
- if pageSize > math.MaxInt32 {
- req.PageSize = math.MaxInt32
- } else if pageSize != 0 {
- req.PageSize = int32(pageSize)
- }
- baseUrl, err := url.Parse(c.endpoint)
- if err != nil {
- return nil, "", err
- }
- baseUrl.Path += fmt.Sprintf("/v1/%v/backups", req.GetParent())
-
- params := url.Values{}
- params.Add("$alt", "json;enum-encoding=int")
- if req.GetFilter() != "" {
- params.Add("filter", fmt.Sprintf("%v", req.GetFilter()))
- }
- if req.GetPageSize() != 0 {
- params.Add("pageSize", fmt.Sprintf("%v", req.GetPageSize()))
- }
- if req.GetPageToken() != "" {
- params.Add("pageToken", fmt.Sprintf("%v", req.GetPageToken()))
- }
-
- baseUrl.RawQuery = params.Encode()
-
- // Build HTTP headers from client and context metadata.
- hds := append(c.xGoogHeaders, "Content-Type", "application/json")
- headers := gax.BuildHeaders(ctx, hds...)
- e := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
- if settings.Path != "" {
- baseUrl.Path = settings.Path
- }
- httpReq, err := http.NewRequest("GET", baseUrl.String(), nil)
- if err != nil {
- return err
- }
- httpReq.Header = headers
-
- httpRsp, err := c.httpClient.Do(httpReq)
- if err != nil {
- return err
- }
- defer httpRsp.Body.Close()
-
- if err = googleapi.CheckResponse(httpRsp); err != nil {
- return err
- }
-
- buf, err := io.ReadAll(httpRsp.Body)
- if err != nil {
- return err
- }
-
- if err := unm.Unmarshal(buf, resp); err != nil {
- return err
- }
-
- return nil
- }, opts...)
- if e != nil {
- return nil, "", e
- }
- it.Response = resp
- return resp.GetBackups(), resp.GetNextPageToken(), nil
- }
-
- fetch := func(pageSize int, pageToken string) (string, error) {
- items, nextPageToken, err := it.InternalFetch(pageSize, pageToken)
- if err != nil {
- return "", err
- }
- it.items = append(it.items, items...)
- return nextPageToken, nil
- }
-
- it.pageInfo, it.nextFunc = iterator.NewPageInfo(fetch, it.bufLen, it.takeBuf)
- it.pageInfo.MaxSize = int(req.GetPageSize())
- it.pageInfo.Token = req.GetPageToken()
-
- return it
-}
-
-// RestoreDatabase create a new database by restoring from a completed backup. The new
-// database must be in the same project and in an instance with the same
-// instance configuration as the instance containing
-// the backup. The returned database [long-running
-// operation][google.longrunning.Operation] has a name of the format
-// projects/<project>/instances/<instance>/databases/<database>/operations/<operation_id>,
-// and can be used to track the progress of the operation, and to cancel it.
-// The metadata field type is
-// RestoreDatabaseMetadata.
-// The response type
-// is Database, if
-// successful. Cancelling the returned operation will stop the restore and
-// delete the database.
-// There can be only one database being restored into an instance at a time.
-// Once the restore operation completes, a new restore operation can be
-// initiated, without waiting for the optimize operation associated with the
-// first restore to complete.
-func (c *databaseAdminRESTClient) RestoreDatabase(ctx context.Context, req *databasepb.RestoreDatabaseRequest, opts ...gax.CallOption) (*RestoreDatabaseOperation, error) {
- m := protojson.MarshalOptions{AllowPartial: true, UseEnumNumbers: true}
- jsonReq, err := m.Marshal(req)
- if err != nil {
- return nil, err
- }
-
- baseUrl, err := url.Parse(c.endpoint)
- if err != nil {
- return nil, err
- }
- baseUrl.Path += fmt.Sprintf("/v1/%v/databases:restore", req.GetParent())
-
- params := url.Values{}
- params.Add("$alt", "json;enum-encoding=int")
-
- baseUrl.RawQuery = params.Encode()
-
- // Build HTTP headers from client and context metadata.
- hds := []string{"x-goog-request-params", fmt.Sprintf("%s=%v", "parent", url.QueryEscape(req.GetParent()))}
-
- hds = append(c.xGoogHeaders, hds...)
- hds = append(hds, "Content-Type", "application/json")
- headers := gax.BuildHeaders(ctx, hds...)
- unm := protojson.UnmarshalOptions{AllowPartial: true, DiscardUnknown: true}
- resp := &longrunningpb.Operation{}
- e := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
- if settings.Path != "" {
- baseUrl.Path = settings.Path
- }
- httpReq, err := http.NewRequest("POST", baseUrl.String(), bytes.NewReader(jsonReq))
- if err != nil {
- return err
- }
- httpReq = httpReq.WithContext(ctx)
- httpReq.Header = headers
-
- httpRsp, err := c.httpClient.Do(httpReq)
- if err != nil {
- return err
- }
- defer httpRsp.Body.Close()
-
- if err = googleapi.CheckResponse(httpRsp); err != nil {
- return err
- }
-
- buf, err := io.ReadAll(httpRsp.Body)
- if err != nil {
- return err
- }
-
- if err := unm.Unmarshal(buf, resp); err != nil {
- return err
- }
-
- return nil
- }, opts...)
- if e != nil {
- return nil, e
- }
-
- override := fmt.Sprintf("/v1/%s", resp.GetName())
- return &RestoreDatabaseOperation{
- lro: longrunning.InternalNewOperation(*c.LROClient, resp),
- pollPath: override,
- }, nil
-}
-
-// ListDatabaseOperations lists database [longrunning-operations][google.longrunning.Operation].
-// A database operation has a name of the form
-// projects/<project>/instances/<instance>/databases/<database>/operations/<operation>.
-// The long-running operation
-// metadata field type
-// metadata.type_url describes the type of the metadata. Operations returned
-// include those that have completed/failed/canceled within the last 7 days,
-// and pending operations.
-func (c *databaseAdminRESTClient) ListDatabaseOperations(ctx context.Context, req *databasepb.ListDatabaseOperationsRequest, opts ...gax.CallOption) *OperationIterator {
- it := &OperationIterator{}
- req = proto.Clone(req).(*databasepb.ListDatabaseOperationsRequest)
- unm := protojson.UnmarshalOptions{AllowPartial: true, DiscardUnknown: true}
- it.InternalFetch = func(pageSize int, pageToken string) ([]*longrunningpb.Operation, string, error) {
- resp := &databasepb.ListDatabaseOperationsResponse{}
- if pageToken != "" {
- req.PageToken = pageToken
- }
- if pageSize > math.MaxInt32 {
- req.PageSize = math.MaxInt32
- } else if pageSize != 0 {
- req.PageSize = int32(pageSize)
- }
- baseUrl, err := url.Parse(c.endpoint)
- if err != nil {
- return nil, "", err
- }
- baseUrl.Path += fmt.Sprintf("/v1/%v/databaseOperations", req.GetParent())
-
- params := url.Values{}
- params.Add("$alt", "json;enum-encoding=int")
- if req.GetFilter() != "" {
- params.Add("filter", fmt.Sprintf("%v", req.GetFilter()))
- }
- if req.GetPageSize() != 0 {
- params.Add("pageSize", fmt.Sprintf("%v", req.GetPageSize()))
- }
- if req.GetPageToken() != "" {
- params.Add("pageToken", fmt.Sprintf("%v", req.GetPageToken()))
- }
-
- baseUrl.RawQuery = params.Encode()
-
- // Build HTTP headers from client and context metadata.
- hds := append(c.xGoogHeaders, "Content-Type", "application/json")
- headers := gax.BuildHeaders(ctx, hds...)
- e := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
- if settings.Path != "" {
- baseUrl.Path = settings.Path
- }
- httpReq, err := http.NewRequest("GET", baseUrl.String(), nil)
- if err != nil {
- return err
- }
- httpReq.Header = headers
-
- httpRsp, err := c.httpClient.Do(httpReq)
- if err != nil {
- return err
- }
- defer httpRsp.Body.Close()
-
- if err = googleapi.CheckResponse(httpRsp); err != nil {
- return err
- }
-
- buf, err := io.ReadAll(httpRsp.Body)
- if err != nil {
- return err
- }
-
- if err := unm.Unmarshal(buf, resp); err != nil {
- return err
- }
-
- return nil
- }, opts...)
- if e != nil {
- return nil, "", e
- }
- it.Response = resp
- return resp.GetOperations(), resp.GetNextPageToken(), nil
- }
-
- fetch := func(pageSize int, pageToken string) (string, error) {
- items, nextPageToken, err := it.InternalFetch(pageSize, pageToken)
- if err != nil {
- return "", err
- }
- it.items = append(it.items, items...)
- return nextPageToken, nil
- }
-
- it.pageInfo, it.nextFunc = iterator.NewPageInfo(fetch, it.bufLen, it.takeBuf)
- it.pageInfo.MaxSize = int(req.GetPageSize())
- it.pageInfo.Token = req.GetPageToken()
-
- return it
-}
-
-// ListBackupOperations lists the backup [long-running operations][google.longrunning.Operation] in
-// the given instance. A backup operation has a name of the form
-// projects/<project>/instances/<instance>/backups/<backup>/operations/<operation>.
-// The long-running operation
-// metadata field type
-// metadata.type_url describes the type of the metadata. Operations returned
-// include those that have completed/failed/canceled within the last 7 days,
-// and pending operations. Operations returned are ordered by
-// operation.metadata.value.progress.start_time in descending order starting
-// from the most recently started operation.
-func (c *databaseAdminRESTClient) ListBackupOperations(ctx context.Context, req *databasepb.ListBackupOperationsRequest, opts ...gax.CallOption) *OperationIterator {
- it := &OperationIterator{}
- req = proto.Clone(req).(*databasepb.ListBackupOperationsRequest)
- unm := protojson.UnmarshalOptions{AllowPartial: true, DiscardUnknown: true}
- it.InternalFetch = func(pageSize int, pageToken string) ([]*longrunningpb.Operation, string, error) {
- resp := &databasepb.ListBackupOperationsResponse{}
- if pageToken != "" {
- req.PageToken = pageToken
- }
- if pageSize > math.MaxInt32 {
- req.PageSize = math.MaxInt32
- } else if pageSize != 0 {
- req.PageSize = int32(pageSize)
- }
- baseUrl, err := url.Parse(c.endpoint)
- if err != nil {
- return nil, "", err
- }
- baseUrl.Path += fmt.Sprintf("/v1/%v/backupOperations", req.GetParent())
-
- params := url.Values{}
- params.Add("$alt", "json;enum-encoding=int")
- if req.GetFilter() != "" {
- params.Add("filter", fmt.Sprintf("%v", req.GetFilter()))
- }
- if req.GetPageSize() != 0 {
- params.Add("pageSize", fmt.Sprintf("%v", req.GetPageSize()))
- }
- if req.GetPageToken() != "" {
- params.Add("pageToken", fmt.Sprintf("%v", req.GetPageToken()))
- }
-
- baseUrl.RawQuery = params.Encode()
-
- // Build HTTP headers from client and context metadata.
- hds := append(c.xGoogHeaders, "Content-Type", "application/json")
- headers := gax.BuildHeaders(ctx, hds...)
- e := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
- if settings.Path != "" {
- baseUrl.Path = settings.Path
- }
- httpReq, err := http.NewRequest("GET", baseUrl.String(), nil)
- if err != nil {
- return err
- }
- httpReq.Header = headers
-
- httpRsp, err := c.httpClient.Do(httpReq)
- if err != nil {
- return err
- }
- defer httpRsp.Body.Close()
-
- if err = googleapi.CheckResponse(httpRsp); err != nil {
- return err
- }
-
- buf, err := io.ReadAll(httpRsp.Body)
- if err != nil {
- return err
- }
-
- if err := unm.Unmarshal(buf, resp); err != nil {
- return err
- }
-
- return nil
- }, opts...)
- if e != nil {
- return nil, "", e
- }
- it.Response = resp
- return resp.GetOperations(), resp.GetNextPageToken(), nil
- }
-
- fetch := func(pageSize int, pageToken string) (string, error) {
- items, nextPageToken, err := it.InternalFetch(pageSize, pageToken)
- if err != nil {
- return "", err
- }
- it.items = append(it.items, items...)
- return nextPageToken, nil
- }
-
- it.pageInfo, it.nextFunc = iterator.NewPageInfo(fetch, it.bufLen, it.takeBuf)
- it.pageInfo.MaxSize = int(req.GetPageSize())
- it.pageInfo.Token = req.GetPageToken()
-
- return it
-}
-
-// ListDatabaseRoles lists Cloud Spanner database roles.
-func (c *databaseAdminRESTClient) ListDatabaseRoles(ctx context.Context, req *databasepb.ListDatabaseRolesRequest, opts ...gax.CallOption) *DatabaseRoleIterator {
- it := &DatabaseRoleIterator{}
- req = proto.Clone(req).(*databasepb.ListDatabaseRolesRequest)
- unm := protojson.UnmarshalOptions{AllowPartial: true, DiscardUnknown: true}
- it.InternalFetch = func(pageSize int, pageToken string) ([]*databasepb.DatabaseRole, string, error) {
- resp := &databasepb.ListDatabaseRolesResponse{}
- if pageToken != "" {
- req.PageToken = pageToken
- }
- if pageSize > math.MaxInt32 {
- req.PageSize = math.MaxInt32
- } else if pageSize != 0 {
- req.PageSize = int32(pageSize)
- }
- baseUrl, err := url.Parse(c.endpoint)
- if err != nil {
- return nil, "", err
- }
- baseUrl.Path += fmt.Sprintf("/v1/%v/databaseRoles", req.GetParent())
-
- params := url.Values{}
- params.Add("$alt", "json;enum-encoding=int")
- if req.GetPageSize() != 0 {
- params.Add("pageSize", fmt.Sprintf("%v", req.GetPageSize()))
- }
- if req.GetPageToken() != "" {
- params.Add("pageToken", fmt.Sprintf("%v", req.GetPageToken()))
- }
-
- baseUrl.RawQuery = params.Encode()
-
- // Build HTTP headers from client and context metadata.
- hds := append(c.xGoogHeaders, "Content-Type", "application/json")
- headers := gax.BuildHeaders(ctx, hds...)
- e := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
- if settings.Path != "" {
- baseUrl.Path = settings.Path
- }
- httpReq, err := http.NewRequest("GET", baseUrl.String(), nil)
- if err != nil {
- return err
- }
- httpReq.Header = headers
-
- httpRsp, err := c.httpClient.Do(httpReq)
- if err != nil {
- return err
- }
- defer httpRsp.Body.Close()
-
- if err = googleapi.CheckResponse(httpRsp); err != nil {
- return err
- }
-
- buf, err := io.ReadAll(httpRsp.Body)
- if err != nil {
- return err
- }
-
- if err := unm.Unmarshal(buf, resp); err != nil {
- return err
- }
-
- return nil
- }, opts...)
- if e != nil {
- return nil, "", e
- }
- it.Response = resp
- return resp.GetDatabaseRoles(), resp.GetNextPageToken(), nil
- }
-
- fetch := func(pageSize int, pageToken string) (string, error) {
- items, nextPageToken, err := it.InternalFetch(pageSize, pageToken)
- if err != nil {
- return "", err
- }
- it.items = append(it.items, items...)
- return nextPageToken, nil
- }
-
- it.pageInfo, it.nextFunc = iterator.NewPageInfo(fetch, it.bufLen, it.takeBuf)
- it.pageInfo.MaxSize = int(req.GetPageSize())
- it.pageInfo.Token = req.GetPageToken()
-
- return it
-}
-
-// CreateBackupSchedule creates a new backup schedule.
-func (c *databaseAdminRESTClient) CreateBackupSchedule(ctx context.Context, req *databasepb.CreateBackupScheduleRequest, opts ...gax.CallOption) (*databasepb.BackupSchedule, error) {
- m := protojson.MarshalOptions{AllowPartial: true, UseEnumNumbers: true}
- body := req.GetBackupSchedule()
- jsonReq, err := m.Marshal(body)
- if err != nil {
- return nil, err
- }
-
- baseUrl, err := url.Parse(c.endpoint)
- if err != nil {
- return nil, err
- }
- baseUrl.Path += fmt.Sprintf("/v1/%v/backupSchedules", req.GetParent())
-
- params := url.Values{}
- params.Add("$alt", "json;enum-encoding=int")
- params.Add("backupScheduleId", fmt.Sprintf("%v", req.GetBackupScheduleId()))
-
- baseUrl.RawQuery = params.Encode()
-
- // Build HTTP headers from client and context metadata.
- hds := []string{"x-goog-request-params", fmt.Sprintf("%s=%v", "parent", url.QueryEscape(req.GetParent()))}
-
- hds = append(c.xGoogHeaders, hds...)
- hds = append(hds, "Content-Type", "application/json")
- headers := gax.BuildHeaders(ctx, hds...)
- opts = append((*c.CallOptions).CreateBackupSchedule[0:len((*c.CallOptions).CreateBackupSchedule):len((*c.CallOptions).CreateBackupSchedule)], opts...)
- unm := protojson.UnmarshalOptions{AllowPartial: true, DiscardUnknown: true}
- resp := &databasepb.BackupSchedule{}
- e := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
- if settings.Path != "" {
- baseUrl.Path = settings.Path
- }
- httpReq, err := http.NewRequest("POST", baseUrl.String(), bytes.NewReader(jsonReq))
- if err != nil {
- return err
- }
- httpReq = httpReq.WithContext(ctx)
- httpReq.Header = headers
-
- httpRsp, err := c.httpClient.Do(httpReq)
- if err != nil {
- return err
- }
- defer httpRsp.Body.Close()
-
- if err = googleapi.CheckResponse(httpRsp); err != nil {
- return err
- }
-
- buf, err := io.ReadAll(httpRsp.Body)
- if err != nil {
- return err
- }
-
- if err := unm.Unmarshal(buf, resp); err != nil {
- return err
- }
-
- return nil
- }, opts...)
- if e != nil {
- return nil, e
- }
- return resp, nil
-}
-
-// GetBackupSchedule gets backup schedule for the input schedule name.
-func (c *databaseAdminRESTClient) GetBackupSchedule(ctx context.Context, req *databasepb.GetBackupScheduleRequest, opts ...gax.CallOption) (*databasepb.BackupSchedule, error) {
- baseUrl, err := url.Parse(c.endpoint)
- if err != nil {
- return nil, err
- }
- baseUrl.Path += fmt.Sprintf("/v1/%v", req.GetName())
-
- params := url.Values{}
- params.Add("$alt", "json;enum-encoding=int")
-
- baseUrl.RawQuery = params.Encode()
-
- // Build HTTP headers from client and context metadata.
- hds := []string{"x-goog-request-params", fmt.Sprintf("%s=%v", "name", url.QueryEscape(req.GetName()))}
-
- hds = append(c.xGoogHeaders, hds...)
- hds = append(hds, "Content-Type", "application/json")
- headers := gax.BuildHeaders(ctx, hds...)
- opts = append((*c.CallOptions).GetBackupSchedule[0:len((*c.CallOptions).GetBackupSchedule):len((*c.CallOptions).GetBackupSchedule)], opts...)
- unm := protojson.UnmarshalOptions{AllowPartial: true, DiscardUnknown: true}
- resp := &databasepb.BackupSchedule{}
- e := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
- if settings.Path != "" {
- baseUrl.Path = settings.Path
- }
- httpReq, err := http.NewRequest("GET", baseUrl.String(), nil)
- if err != nil {
- return err
- }
- httpReq = httpReq.WithContext(ctx)
- httpReq.Header = headers
-
- httpRsp, err := c.httpClient.Do(httpReq)
- if err != nil {
- return err
- }
- defer httpRsp.Body.Close()
-
- if err = googleapi.CheckResponse(httpRsp); err != nil {
- return err
- }
-
- buf, err := io.ReadAll(httpRsp.Body)
- if err != nil {
- return err
- }
-
- if err := unm.Unmarshal(buf, resp); err != nil {
- return err
- }
-
- return nil
- }, opts...)
- if e != nil {
- return nil, e
- }
- return resp, nil
-}
-
-// UpdateBackupSchedule updates a backup schedule.
-func (c *databaseAdminRESTClient) UpdateBackupSchedule(ctx context.Context, req *databasepb.UpdateBackupScheduleRequest, opts ...gax.CallOption) (*databasepb.BackupSchedule, error) {
- m := protojson.MarshalOptions{AllowPartial: true, UseEnumNumbers: true}
- body := req.GetBackupSchedule()
- jsonReq, err := m.Marshal(body)
- if err != nil {
- return nil, err
- }
-
- baseUrl, err := url.Parse(c.endpoint)
- if err != nil {
- return nil, err
- }
- baseUrl.Path += fmt.Sprintf("/v1/%v", req.GetBackupSchedule().GetName())
-
- params := url.Values{}
- params.Add("$alt", "json;enum-encoding=int")
- if req.GetUpdateMask() != nil {
- updateMask, err := protojson.Marshal(req.GetUpdateMask())
- if err != nil {
- return nil, err
- }
- params.Add("updateMask", string(updateMask[1:len(updateMask)-1]))
- }
-
- baseUrl.RawQuery = params.Encode()
-
- // Build HTTP headers from client and context metadata.
- hds := []string{"x-goog-request-params", fmt.Sprintf("%s=%v", "backup_schedule.name", url.QueryEscape(req.GetBackupSchedule().GetName()))}
-
- hds = append(c.xGoogHeaders, hds...)
- hds = append(hds, "Content-Type", "application/json")
- headers := gax.BuildHeaders(ctx, hds...)
- opts = append((*c.CallOptions).UpdateBackupSchedule[0:len((*c.CallOptions).UpdateBackupSchedule):len((*c.CallOptions).UpdateBackupSchedule)], opts...)
- unm := protojson.UnmarshalOptions{AllowPartial: true, DiscardUnknown: true}
- resp := &databasepb.BackupSchedule{}
- e := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
- if settings.Path != "" {
- baseUrl.Path = settings.Path
- }
- httpReq, err := http.NewRequest("PATCH", baseUrl.String(), bytes.NewReader(jsonReq))
- if err != nil {
- return err
- }
- httpReq = httpReq.WithContext(ctx)
- httpReq.Header = headers
-
- httpRsp, err := c.httpClient.Do(httpReq)
- if err != nil {
- return err
- }
- defer httpRsp.Body.Close()
-
- if err = googleapi.CheckResponse(httpRsp); err != nil {
- return err
- }
-
- buf, err := io.ReadAll(httpRsp.Body)
- if err != nil {
- return err
- }
-
- if err := unm.Unmarshal(buf, resp); err != nil {
- return err
- }
-
- return nil
- }, opts...)
- if e != nil {
- return nil, e
- }
- return resp, nil
-}
-
-// DeleteBackupSchedule deletes a backup schedule.
-func (c *databaseAdminRESTClient) DeleteBackupSchedule(ctx context.Context, req *databasepb.DeleteBackupScheduleRequest, opts ...gax.CallOption) error {
- baseUrl, err := url.Parse(c.endpoint)
- if err != nil {
- return err
- }
- baseUrl.Path += fmt.Sprintf("/v1/%v", req.GetName())
-
- params := url.Values{}
- params.Add("$alt", "json;enum-encoding=int")
-
- baseUrl.RawQuery = params.Encode()
-
- // Build HTTP headers from client and context metadata.
- hds := []string{"x-goog-request-params", fmt.Sprintf("%s=%v", "name", url.QueryEscape(req.GetName()))}
-
- hds = append(c.xGoogHeaders, hds...)
- hds = append(hds, "Content-Type", "application/json")
- headers := gax.BuildHeaders(ctx, hds...)
- return gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
- if settings.Path != "" {
- baseUrl.Path = settings.Path
- }
- httpReq, err := http.NewRequest("DELETE", baseUrl.String(), nil)
- if err != nil {
- return err
- }
- httpReq = httpReq.WithContext(ctx)
- httpReq.Header = headers
-
- httpRsp, err := c.httpClient.Do(httpReq)
- if err != nil {
- return err
- }
- defer httpRsp.Body.Close()
-
- // Returns nil if there is no error, otherwise wraps
- // the response code and body into a non-nil error
- return googleapi.CheckResponse(httpRsp)
- }, opts...)
-}
-
-// ListBackupSchedules lists all the backup schedules for the database.
-func (c *databaseAdminRESTClient) ListBackupSchedules(ctx context.Context, req *databasepb.ListBackupSchedulesRequest, opts ...gax.CallOption) *BackupScheduleIterator {
- it := &BackupScheduleIterator{}
- req = proto.Clone(req).(*databasepb.ListBackupSchedulesRequest)
- unm := protojson.UnmarshalOptions{AllowPartial: true, DiscardUnknown: true}
- it.InternalFetch = func(pageSize int, pageToken string) ([]*databasepb.BackupSchedule, string, error) {
- resp := &databasepb.ListBackupSchedulesResponse{}
- if pageToken != "" {
- req.PageToken = pageToken
- }
- if pageSize > math.MaxInt32 {
- req.PageSize = math.MaxInt32
- } else if pageSize != 0 {
- req.PageSize = int32(pageSize)
- }
- baseUrl, err := url.Parse(c.endpoint)
- if err != nil {
- return nil, "", err
- }
- baseUrl.Path += fmt.Sprintf("/v1/%v/backupSchedules", req.GetParent())
-
- params := url.Values{}
- params.Add("$alt", "json;enum-encoding=int")
- if req.GetPageSize() != 0 {
- params.Add("pageSize", fmt.Sprintf("%v", req.GetPageSize()))
- }
- if req.GetPageToken() != "" {
- params.Add("pageToken", fmt.Sprintf("%v", req.GetPageToken()))
- }
-
- baseUrl.RawQuery = params.Encode()
-
- // Build HTTP headers from client and context metadata.
- hds := append(c.xGoogHeaders, "Content-Type", "application/json")
- headers := gax.BuildHeaders(ctx, hds...)
- e := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
- if settings.Path != "" {
- baseUrl.Path = settings.Path
- }
- httpReq, err := http.NewRequest("GET", baseUrl.String(), nil)
- if err != nil {
- return err
- }
- httpReq.Header = headers
-
- httpRsp, err := c.httpClient.Do(httpReq)
- if err != nil {
- return err
- }
- defer httpRsp.Body.Close()
-
- if err = googleapi.CheckResponse(httpRsp); err != nil {
- return err
- }
-
- buf, err := io.ReadAll(httpRsp.Body)
- if err != nil {
- return err
- }
-
- if err := unm.Unmarshal(buf, resp); err != nil {
- return err
- }
-
- return nil
- }, opts...)
- if e != nil {
- return nil, "", e
- }
- it.Response = resp
- return resp.GetBackupSchedules(), resp.GetNextPageToken(), nil
- }
-
- fetch := func(pageSize int, pageToken string) (string, error) {
- items, nextPageToken, err := it.InternalFetch(pageSize, pageToken)
- if err != nil {
- return "", err
- }
- it.items = append(it.items, items...)
- return nextPageToken, nil
- }
-
- it.pageInfo, it.nextFunc = iterator.NewPageInfo(fetch, it.bufLen, it.takeBuf)
- it.pageInfo.MaxSize = int(req.GetPageSize())
- it.pageInfo.Token = req.GetPageToken()
-
- return it
-}
-
-// CancelOperation is a utility method from google.longrunning.Operations.
-func (c *databaseAdminRESTClient) CancelOperation(ctx context.Context, req *longrunningpb.CancelOperationRequest, opts ...gax.CallOption) error {
- baseUrl, err := url.Parse(c.endpoint)
- if err != nil {
- return err
- }
- baseUrl.Path += fmt.Sprintf("/v1/%v:cancel", req.GetName())
-
- params := url.Values{}
- params.Add("$alt", "json;enum-encoding=int")
-
- baseUrl.RawQuery = params.Encode()
-
- // Build HTTP headers from client and context metadata.
- hds := []string{"x-goog-request-params", fmt.Sprintf("%s=%v", "name", url.QueryEscape(req.GetName()))}
-
- hds = append(c.xGoogHeaders, hds...)
- hds = append(hds, "Content-Type", "application/json")
- headers := gax.BuildHeaders(ctx, hds...)
- return gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
- if settings.Path != "" {
- baseUrl.Path = settings.Path
- }
- httpReq, err := http.NewRequest("POST", baseUrl.String(), nil)
- if err != nil {
- return err
- }
- httpReq = httpReq.WithContext(ctx)
- httpReq.Header = headers
-
- httpRsp, err := c.httpClient.Do(httpReq)
- if err != nil {
- return err
- }
- defer httpRsp.Body.Close()
-
- // Returns nil if there is no error, otherwise wraps
- // the response code and body into a non-nil error
- return googleapi.CheckResponse(httpRsp)
- }, opts...)
-}
-
-// DeleteOperation is a utility method from google.longrunning.Operations.
-func (c *databaseAdminRESTClient) DeleteOperation(ctx context.Context, req *longrunningpb.DeleteOperationRequest, opts ...gax.CallOption) error {
- baseUrl, err := url.Parse(c.endpoint)
- if err != nil {
- return err
- }
- baseUrl.Path += fmt.Sprintf("/v1/%v", req.GetName())
-
- params := url.Values{}
- params.Add("$alt", "json;enum-encoding=int")
-
- baseUrl.RawQuery = params.Encode()
-
- // Build HTTP headers from client and context metadata.
- hds := []string{"x-goog-request-params", fmt.Sprintf("%s=%v", "name", url.QueryEscape(req.GetName()))}
-
- hds = append(c.xGoogHeaders, hds...)
- hds = append(hds, "Content-Type", "application/json")
- headers := gax.BuildHeaders(ctx, hds...)
- return gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
- if settings.Path != "" {
- baseUrl.Path = settings.Path
- }
- httpReq, err := http.NewRequest("DELETE", baseUrl.String(), nil)
- if err != nil {
- return err
- }
- httpReq = httpReq.WithContext(ctx)
- httpReq.Header = headers
-
- httpRsp, err := c.httpClient.Do(httpReq)
- if err != nil {
- return err
- }
- defer httpRsp.Body.Close()
-
- // Returns nil if there is no error, otherwise wraps
- // the response code and body into a non-nil error
- return googleapi.CheckResponse(httpRsp)
- }, opts...)
-}
-
-// GetOperation is a utility method from google.longrunning.Operations.
-func (c *databaseAdminRESTClient) GetOperation(ctx context.Context, req *longrunningpb.GetOperationRequest, opts ...gax.CallOption) (*longrunningpb.Operation, error) {
- baseUrl, err := url.Parse(c.endpoint)
- if err != nil {
- return nil, err
- }
- baseUrl.Path += fmt.Sprintf("/v1/%v", req.GetName())
-
- params := url.Values{}
- params.Add("$alt", "json;enum-encoding=int")
-
- baseUrl.RawQuery = params.Encode()
-
- // Build HTTP headers from client and context metadata.
- hds := []string{"x-goog-request-params", fmt.Sprintf("%s=%v", "name", url.QueryEscape(req.GetName()))}
-
- hds = append(c.xGoogHeaders, hds...)
- hds = append(hds, "Content-Type", "application/json")
- headers := gax.BuildHeaders(ctx, hds...)
- opts = append((*c.CallOptions).GetOperation[0:len((*c.CallOptions).GetOperation):len((*c.CallOptions).GetOperation)], opts...)
- unm := protojson.UnmarshalOptions{AllowPartial: true, DiscardUnknown: true}
- resp := &longrunningpb.Operation{}
- e := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
- if settings.Path != "" {
- baseUrl.Path = settings.Path
- }
- httpReq, err := http.NewRequest("GET", baseUrl.String(), nil)
- if err != nil {
- return err
- }
- httpReq = httpReq.WithContext(ctx)
- httpReq.Header = headers
-
- httpRsp, err := c.httpClient.Do(httpReq)
- if err != nil {
- return err
- }
- defer httpRsp.Body.Close()
-
- if err = googleapi.CheckResponse(httpRsp); err != nil {
- return err
- }
-
- buf, err := io.ReadAll(httpRsp.Body)
- if err != nil {
- return err
- }
-
- if err := unm.Unmarshal(buf, resp); err != nil {
- return err
- }
-
- return nil
- }, opts...)
- if e != nil {
- return nil, e
- }
- return resp, nil
-}
-
-// ListOperations is a utility method from google.longrunning.Operations.
-func (c *databaseAdminRESTClient) ListOperations(ctx context.Context, req *longrunningpb.ListOperationsRequest, opts ...gax.CallOption) *OperationIterator {
- it := &OperationIterator{}
- req = proto.Clone(req).(*longrunningpb.ListOperationsRequest)
- unm := protojson.UnmarshalOptions{AllowPartial: true, DiscardUnknown: true}
- it.InternalFetch = func(pageSize int, pageToken string) ([]*longrunningpb.Operation, string, error) {
- resp := &longrunningpb.ListOperationsResponse{}
- if pageToken != "" {
- req.PageToken = pageToken
- }
- if pageSize > math.MaxInt32 {
- req.PageSize = math.MaxInt32
- } else if pageSize != 0 {
- req.PageSize = int32(pageSize)
- }
- baseUrl, err := url.Parse(c.endpoint)
- if err != nil {
- return nil, "", err
- }
- baseUrl.Path += fmt.Sprintf("/v1/%v", req.GetName())
-
- params := url.Values{}
- params.Add("$alt", "json;enum-encoding=int")
- if req.GetFilter() != "" {
- params.Add("filter", fmt.Sprintf("%v", req.GetFilter()))
- }
- if req.GetPageSize() != 0 {
- params.Add("pageSize", fmt.Sprintf("%v", req.GetPageSize()))
- }
- if req.GetPageToken() != "" {
- params.Add("pageToken", fmt.Sprintf("%v", req.GetPageToken()))
- }
-
- baseUrl.RawQuery = params.Encode()
-
- // Build HTTP headers from client and context metadata.
- hds := append(c.xGoogHeaders, "Content-Type", "application/json")
- headers := gax.BuildHeaders(ctx, hds...)
- e := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
- if settings.Path != "" {
- baseUrl.Path = settings.Path
- }
- httpReq, err := http.NewRequest("GET", baseUrl.String(), nil)
- if err != nil {
- return err
- }
- httpReq.Header = headers
-
- httpRsp, err := c.httpClient.Do(httpReq)
- if err != nil {
- return err
- }
- defer httpRsp.Body.Close()
-
- if err = googleapi.CheckResponse(httpRsp); err != nil {
- return err
- }
-
- buf, err := io.ReadAll(httpRsp.Body)
- if err != nil {
- return err
- }
-
- if err := unm.Unmarshal(buf, resp); err != nil {
- return err
- }
-
- return nil
- }, opts...)
- if e != nil {
- return nil, "", e
- }
- it.Response = resp
- return resp.GetOperations(), resp.GetNextPageToken(), nil
- }
-
- fetch := func(pageSize int, pageToken string) (string, error) {
- items, nextPageToken, err := it.InternalFetch(pageSize, pageToken)
- if err != nil {
- return "", err
- }
- it.items = append(it.items, items...)
- return nextPageToken, nil
- }
-
- it.pageInfo, it.nextFunc = iterator.NewPageInfo(fetch, it.bufLen, it.takeBuf)
- it.pageInfo.MaxSize = int(req.GetPageSize())
- it.pageInfo.Token = req.GetPageToken()
-
- return it
-}
-
-// CopyBackupOperation returns a new CopyBackupOperation from a given name.
-// The name must be that of a previously created CopyBackupOperation, possibly from a different process.
-func (c *databaseAdminGRPCClient) CopyBackupOperation(name string) *CopyBackupOperation {
- return &CopyBackupOperation{
- lro: longrunning.InternalNewOperation(*c.LROClient, &longrunningpb.Operation{Name: name}),
- }
-}
-
-// CopyBackupOperation returns a new CopyBackupOperation from a given name.
-// The name must be that of a previously created CopyBackupOperation, possibly from a different process.
-func (c *databaseAdminRESTClient) CopyBackupOperation(name string) *CopyBackupOperation {
- override := fmt.Sprintf("/v1/%s", name)
- return &CopyBackupOperation{
- lro: longrunning.InternalNewOperation(*c.LROClient, &longrunningpb.Operation{Name: name}),
- pollPath: override,
- }
-}
-
-// CreateBackupOperation returns a new CreateBackupOperation from a given name.
-// The name must be that of a previously created CreateBackupOperation, possibly from a different process.
-func (c *databaseAdminGRPCClient) CreateBackupOperation(name string) *CreateBackupOperation {
- return &CreateBackupOperation{
- lro: longrunning.InternalNewOperation(*c.LROClient, &longrunningpb.Operation{Name: name}),
- }
-}
-
-// CreateBackupOperation returns a new CreateBackupOperation from a given name.
-// The name must be that of a previously created CreateBackupOperation, possibly from a different process.
-func (c *databaseAdminRESTClient) CreateBackupOperation(name string) *CreateBackupOperation {
- override := fmt.Sprintf("/v1/%s", name)
- return &CreateBackupOperation{
- lro: longrunning.InternalNewOperation(*c.LROClient, &longrunningpb.Operation{Name: name}),
- pollPath: override,
- }
-}
-
-// CreateDatabaseOperation returns a new CreateDatabaseOperation from a given name.
-// The name must be that of a previously created CreateDatabaseOperation, possibly from a different process.
-func (c *databaseAdminGRPCClient) CreateDatabaseOperation(name string) *CreateDatabaseOperation {
- return &CreateDatabaseOperation{
- lro: longrunning.InternalNewOperation(*c.LROClient, &longrunningpb.Operation{Name: name}),
- }
-}
-
-// CreateDatabaseOperation returns a new CreateDatabaseOperation from a given name.
-// The name must be that of a previously created CreateDatabaseOperation, possibly from a different process.
-func (c *databaseAdminRESTClient) CreateDatabaseOperation(name string) *CreateDatabaseOperation {
- override := fmt.Sprintf("/v1/%s", name)
- return &CreateDatabaseOperation{
- lro: longrunning.InternalNewOperation(*c.LROClient, &longrunningpb.Operation{Name: name}),
- pollPath: override,
- }
-}
-
-// RestoreDatabaseOperation returns a new RestoreDatabaseOperation from a given name.
-// The name must be that of a previously created RestoreDatabaseOperation, possibly from a different process.
-func (c *databaseAdminGRPCClient) RestoreDatabaseOperation(name string) *RestoreDatabaseOperation {
- return &RestoreDatabaseOperation{
- lro: longrunning.InternalNewOperation(*c.LROClient, &longrunningpb.Operation{Name: name}),
- }
-}
-
-// RestoreDatabaseOperation returns a new RestoreDatabaseOperation from a given name.
-// The name must be that of a previously created RestoreDatabaseOperation, possibly from a different process.
-func (c *databaseAdminRESTClient) RestoreDatabaseOperation(name string) *RestoreDatabaseOperation {
- override := fmt.Sprintf("/v1/%s", name)
- return &RestoreDatabaseOperation{
- lro: longrunning.InternalNewOperation(*c.LROClient, &longrunningpb.Operation{Name: name}),
- pollPath: override,
- }
-}
-
-// UpdateDatabaseOperation returns a new UpdateDatabaseOperation from a given name.
-// The name must be that of a previously created UpdateDatabaseOperation, possibly from a different process.
-func (c *databaseAdminGRPCClient) UpdateDatabaseOperation(name string) *UpdateDatabaseOperation {
- return &UpdateDatabaseOperation{
- lro: longrunning.InternalNewOperation(*c.LROClient, &longrunningpb.Operation{Name: name}),
- }
-}
-
-// UpdateDatabaseOperation returns a new UpdateDatabaseOperation from a given name.
-// The name must be that of a previously created UpdateDatabaseOperation, possibly from a different process.
-func (c *databaseAdminRESTClient) UpdateDatabaseOperation(name string) *UpdateDatabaseOperation {
- override := fmt.Sprintf("/v1/%s", name)
- return &UpdateDatabaseOperation{
- lro: longrunning.InternalNewOperation(*c.LROClient, &longrunningpb.Operation{Name: name}),
- pollPath: override,
- }
-}
-
-// UpdateDatabaseDdlOperation returns a new UpdateDatabaseDdlOperation from a given name.
-// The name must be that of a previously created UpdateDatabaseDdlOperation, possibly from a different process.
-func (c *databaseAdminGRPCClient) UpdateDatabaseDdlOperation(name string) *UpdateDatabaseDdlOperation {
- return &UpdateDatabaseDdlOperation{
- lro: longrunning.InternalNewOperation(*c.LROClient, &longrunningpb.Operation{Name: name}),
- }
-}
-
-// UpdateDatabaseDdlOperation returns a new UpdateDatabaseDdlOperation from a given name.
-// The name must be that of a previously created UpdateDatabaseDdlOperation, possibly from a different process.
-func (c *databaseAdminRESTClient) UpdateDatabaseDdlOperation(name string) *UpdateDatabaseDdlOperation {
- override := fmt.Sprintf("/v1/%s", name)
- return &UpdateDatabaseDdlOperation{
- lro: longrunning.InternalNewOperation(*c.LROClient, &longrunningpb.Operation{Name: name}),
- pollPath: override,
- }
-}
diff --git a/vendor/cloud.google.com/go/spanner/admin/database/apiv1/databasepb/backup.pb.go b/vendor/cloud.google.com/go/spanner/admin/database/apiv1/databasepb/backup.pb.go
deleted file mode 100644
index 7a5db895c..000000000
--- a/vendor/cloud.google.com/go/spanner/admin/database/apiv1/databasepb/backup.pb.go
+++ /dev/null
@@ -1,2446 +0,0 @@
-// Copyright 2024 Google LLC
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-// Code generated by protoc-gen-go. DO NOT EDIT.
-// versions:
-// protoc-gen-go v1.34.2
-// protoc v4.25.3
-// source: google/spanner/admin/database/v1/backup.proto
-
-package databasepb
-
-import (
- reflect "reflect"
- sync "sync"
-
- longrunningpb "cloud.google.com/go/longrunning/autogen/longrunningpb"
- _ "google.golang.org/genproto/googleapis/api/annotations"
- protoreflect "google.golang.org/protobuf/reflect/protoreflect"
- protoimpl "google.golang.org/protobuf/runtime/protoimpl"
- fieldmaskpb "google.golang.org/protobuf/types/known/fieldmaskpb"
- timestamppb "google.golang.org/protobuf/types/known/timestamppb"
-)
-
-const (
- // Verify that this generated code is sufficiently up-to-date.
- _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion)
- // Verify that runtime/protoimpl is sufficiently up-to-date.
- _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20)
-)
-
-// Indicates the current state of the backup.
-type Backup_State int32
-
-const (
- // Not specified.
- Backup_STATE_UNSPECIFIED Backup_State = 0
- // The pending backup is still being created. Operations on the
- // backup may fail with `FAILED_PRECONDITION` in this state.
- Backup_CREATING Backup_State = 1
- // The backup is complete and ready for use.
- Backup_READY Backup_State = 2
-)
-
-// Enum value maps for Backup_State.
-var (
- Backup_State_name = map[int32]string{
- 0: "STATE_UNSPECIFIED",
- 1: "CREATING",
- 2: "READY",
- }
- Backup_State_value = map[string]int32{
- "STATE_UNSPECIFIED": 0,
- "CREATING": 1,
- "READY": 2,
- }
-)
-
-func (x Backup_State) Enum() *Backup_State {
- p := new(Backup_State)
- *p = x
- return p
-}
-
-func (x Backup_State) String() string {
- return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x))
-}
-
-func (Backup_State) Descriptor() protoreflect.EnumDescriptor {
- return file_google_spanner_admin_database_v1_backup_proto_enumTypes[0].Descriptor()
-}
-
-func (Backup_State) Type() protoreflect.EnumType {
- return &file_google_spanner_admin_database_v1_backup_proto_enumTypes[0]
-}
-
-func (x Backup_State) Number() protoreflect.EnumNumber {
- return protoreflect.EnumNumber(x)
-}
-
-// Deprecated: Use Backup_State.Descriptor instead.
-func (Backup_State) EnumDescriptor() ([]byte, []int) {
- return file_google_spanner_admin_database_v1_backup_proto_rawDescGZIP(), []int{0, 0}
-}
-
-// Encryption types for the backup.
-type CreateBackupEncryptionConfig_EncryptionType int32
-
-const (
- // Unspecified. Do not use.
- CreateBackupEncryptionConfig_ENCRYPTION_TYPE_UNSPECIFIED CreateBackupEncryptionConfig_EncryptionType = 0
- // Use the same encryption configuration as the database. This is the
- // default option when
- // [encryption_config][google.spanner.admin.database.v1.CreateBackupEncryptionConfig]
- // is empty. For example, if the database is using
- // `Customer_Managed_Encryption`, the backup will be using the same Cloud
- // KMS key as the database.
- CreateBackupEncryptionConfig_USE_DATABASE_ENCRYPTION CreateBackupEncryptionConfig_EncryptionType = 1
- // Use Google default encryption.
- CreateBackupEncryptionConfig_GOOGLE_DEFAULT_ENCRYPTION CreateBackupEncryptionConfig_EncryptionType = 2
- // Use customer managed encryption. If specified, `kms_key_name`
- // must contain a valid Cloud KMS key.
- CreateBackupEncryptionConfig_CUSTOMER_MANAGED_ENCRYPTION CreateBackupEncryptionConfig_EncryptionType = 3
-)
-
-// Enum value maps for CreateBackupEncryptionConfig_EncryptionType.
-var (
- CreateBackupEncryptionConfig_EncryptionType_name = map[int32]string{
- 0: "ENCRYPTION_TYPE_UNSPECIFIED",
- 1: "USE_DATABASE_ENCRYPTION",
- 2: "GOOGLE_DEFAULT_ENCRYPTION",
- 3: "CUSTOMER_MANAGED_ENCRYPTION",
- }
- CreateBackupEncryptionConfig_EncryptionType_value = map[string]int32{
- "ENCRYPTION_TYPE_UNSPECIFIED": 0,
- "USE_DATABASE_ENCRYPTION": 1,
- "GOOGLE_DEFAULT_ENCRYPTION": 2,
- "CUSTOMER_MANAGED_ENCRYPTION": 3,
- }
-)
-
-func (x CreateBackupEncryptionConfig_EncryptionType) Enum() *CreateBackupEncryptionConfig_EncryptionType {
- p := new(CreateBackupEncryptionConfig_EncryptionType)
- *p = x
- return p
-}
-
-func (x CreateBackupEncryptionConfig_EncryptionType) String() string {
- return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x))
-}
-
-func (CreateBackupEncryptionConfig_EncryptionType) Descriptor() protoreflect.EnumDescriptor {
- return file_google_spanner_admin_database_v1_backup_proto_enumTypes[1].Descriptor()
-}
-
-func (CreateBackupEncryptionConfig_EncryptionType) Type() protoreflect.EnumType {
- return &file_google_spanner_admin_database_v1_backup_proto_enumTypes[1]
-}
-
-func (x CreateBackupEncryptionConfig_EncryptionType) Number() protoreflect.EnumNumber {
- return protoreflect.EnumNumber(x)
-}
-
-// Deprecated: Use CreateBackupEncryptionConfig_EncryptionType.Descriptor instead.
-func (CreateBackupEncryptionConfig_EncryptionType) EnumDescriptor() ([]byte, []int) {
- return file_google_spanner_admin_database_v1_backup_proto_rawDescGZIP(), []int{13, 0}
-}
-
-// Encryption types for the backup.
-type CopyBackupEncryptionConfig_EncryptionType int32
-
-const (
- // Unspecified. Do not use.
- CopyBackupEncryptionConfig_ENCRYPTION_TYPE_UNSPECIFIED CopyBackupEncryptionConfig_EncryptionType = 0
- // This is the default option for
- // [CopyBackup][google.spanner.admin.database.v1.DatabaseAdmin.CopyBackup]
- // when
- // [encryption_config][google.spanner.admin.database.v1.CopyBackupEncryptionConfig]
- // is not specified. For example, if the source backup is using
- // `Customer_Managed_Encryption`, the backup will be using the same Cloud
- // KMS key as the source backup.
- CopyBackupEncryptionConfig_USE_CONFIG_DEFAULT_OR_BACKUP_ENCRYPTION CopyBackupEncryptionConfig_EncryptionType = 1
- // Use Google default encryption.
- CopyBackupEncryptionConfig_GOOGLE_DEFAULT_ENCRYPTION CopyBackupEncryptionConfig_EncryptionType = 2
- // Use customer managed encryption. If specified, either `kms_key_name` or
- // `kms_key_names` must contain valid Cloud KMS key(s).
- CopyBackupEncryptionConfig_CUSTOMER_MANAGED_ENCRYPTION CopyBackupEncryptionConfig_EncryptionType = 3
-)
-
-// Enum value maps for CopyBackupEncryptionConfig_EncryptionType.
-var (
- CopyBackupEncryptionConfig_EncryptionType_name = map[int32]string{
- 0: "ENCRYPTION_TYPE_UNSPECIFIED",
- 1: "USE_CONFIG_DEFAULT_OR_BACKUP_ENCRYPTION",
- 2: "GOOGLE_DEFAULT_ENCRYPTION",
- 3: "CUSTOMER_MANAGED_ENCRYPTION",
- }
- CopyBackupEncryptionConfig_EncryptionType_value = map[string]int32{
- "ENCRYPTION_TYPE_UNSPECIFIED": 0,
- "USE_CONFIG_DEFAULT_OR_BACKUP_ENCRYPTION": 1,
- "GOOGLE_DEFAULT_ENCRYPTION": 2,
- "CUSTOMER_MANAGED_ENCRYPTION": 3,
- }
-)
-
-func (x CopyBackupEncryptionConfig_EncryptionType) Enum() *CopyBackupEncryptionConfig_EncryptionType {
- p := new(CopyBackupEncryptionConfig_EncryptionType)
- *p = x
- return p
-}
-
-func (x CopyBackupEncryptionConfig_EncryptionType) String() string {
- return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x))
-}
-
-func (CopyBackupEncryptionConfig_EncryptionType) Descriptor() protoreflect.EnumDescriptor {
- return file_google_spanner_admin_database_v1_backup_proto_enumTypes[2].Descriptor()
-}
-
-func (CopyBackupEncryptionConfig_EncryptionType) Type() protoreflect.EnumType {
- return &file_google_spanner_admin_database_v1_backup_proto_enumTypes[2]
-}
-
-func (x CopyBackupEncryptionConfig_EncryptionType) Number() protoreflect.EnumNumber {
- return protoreflect.EnumNumber(x)
-}
-
-// Deprecated: Use CopyBackupEncryptionConfig_EncryptionType.Descriptor instead.
-func (CopyBackupEncryptionConfig_EncryptionType) EnumDescriptor() ([]byte, []int) {
- return file_google_spanner_admin_database_v1_backup_proto_rawDescGZIP(), []int{14, 0}
-}
-
-// A backup of a Cloud Spanner database.
-type Backup struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
- // Required for the
- // [CreateBackup][google.spanner.admin.database.v1.DatabaseAdmin.CreateBackup]
- // operation. Name of the database from which this backup was created. This
- // needs to be in the same instance as the backup. Values are of the form
- // `projects/<project>/instances/<instance>/databases/<database>`.
- Database string `protobuf:"bytes,2,opt,name=database,proto3" json:"database,omitempty"`
- // The backup will contain an externally consistent copy of the database at
- // the timestamp specified by `version_time`. If `version_time` is not
- // specified, the system will set `version_time` to the `create_time` of the
- // backup.
- VersionTime *timestamppb.Timestamp `protobuf:"bytes,9,opt,name=version_time,json=versionTime,proto3" json:"version_time,omitempty"`
- // Required for the
- // [CreateBackup][google.spanner.admin.database.v1.DatabaseAdmin.CreateBackup]
- // operation. The expiration time of the backup, with microseconds
- // granularity that must be at least 6 hours and at most 366 days
- // from the time the CreateBackup request is processed. Once the `expire_time`
- // has passed, the backup is eligible to be automatically deleted by Cloud
- // Spanner to free the resources used by the backup.
- ExpireTime *timestamppb.Timestamp `protobuf:"bytes,3,opt,name=expire_time,json=expireTime,proto3" json:"expire_time,omitempty"`
- // Output only for the
- // [CreateBackup][google.spanner.admin.database.v1.DatabaseAdmin.CreateBackup]
- // operation. Required for the
- // [UpdateBackup][google.spanner.admin.database.v1.DatabaseAdmin.UpdateBackup]
- // operation.
- //
- // A globally unique identifier for the backup which cannot be
- // changed. Values are of the form
- // `projects/<project>/instances/<instance>/backups/[a-z][a-z0-9_\-]*[a-z0-9]`
- // The final segment of the name must be between 2 and 60 characters
- // in length.
- //
- // The backup is stored in the location(s) specified in the instance
- // configuration of the instance containing the backup, identified
- // by the prefix of the backup name of the form
- // `projects/<project>/instances/<instance>`.
- Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"`
- // Output only. The time the
- // [CreateBackup][google.spanner.admin.database.v1.DatabaseAdmin.CreateBackup]
- // request is received. If the request does not specify `version_time`, the
- // `version_time` of the backup will be equivalent to the `create_time`.
- CreateTime *timestamppb.Timestamp `protobuf:"bytes,4,opt,name=create_time,json=createTime,proto3" json:"create_time,omitempty"`
- // Output only. Size of the backup in bytes.
- SizeBytes int64 `protobuf:"varint,5,opt,name=size_bytes,json=sizeBytes,proto3" json:"size_bytes,omitempty"`
- // Output only. The number of bytes that will be freed by deleting this
- // backup. This value will be zero if, for example, this backup is part of an
- // incremental backup chain and younger backups in the chain require that we
- // keep its data. For backups not in an incremental backup chain, this is
- // always the size of the backup. This value may change if backups on the same
- // chain get created, deleted or expired.
- FreeableSizeBytes int64 `protobuf:"varint,15,opt,name=freeable_size_bytes,json=freeableSizeBytes,proto3" json:"freeable_size_bytes,omitempty"`
- // Output only. For a backup in an incremental backup chain, this is the
- // storage space needed to keep the data that has changed since the previous
- // backup. For all other backups, this is always the size of the backup. This
- // value may change if backups on the same chain get deleted or expired.
- //
- // This field can be used to calculate the total storage space used by a set
- // of backups. For example, the total space used by all backups of a database
- // can be computed by summing up this field.
- ExclusiveSizeBytes int64 `protobuf:"varint,16,opt,name=exclusive_size_bytes,json=exclusiveSizeBytes,proto3" json:"exclusive_size_bytes,omitempty"`
- // Output only. The current state of the backup.
- State Backup_State `protobuf:"varint,6,opt,name=state,proto3,enum=google.spanner.admin.database.v1.Backup_State" json:"state,omitempty"`
- // Output only. The names of the restored databases that reference the backup.
- // The database names are of
- // the form `projects/<project>/instances/<instance>/databases/<database>`.
- // Referencing databases may exist in different instances. The existence of
- // any referencing database prevents the backup from being deleted. When a
- // restored database from the backup enters the `READY` state, the reference
- // to the backup is removed.
- ReferencingDatabases []string `protobuf:"bytes,7,rep,name=referencing_databases,json=referencingDatabases,proto3" json:"referencing_databases,omitempty"`
- // Output only. The encryption information for the backup.
- EncryptionInfo *EncryptionInfo `protobuf:"bytes,8,opt,name=encryption_info,json=encryptionInfo,proto3" json:"encryption_info,omitempty"`
- // Output only. The encryption information for the backup, whether it is
- // protected by one or more KMS keys. The information includes all Cloud
- // KMS key versions used to encrypt the backup. The `encryption_status' field
- // inside of each `EncryptionInfo` is not populated. At least one of the key
- // versions must be available for the backup to be restored. If a key version
- // is revoked in the middle of a restore, the restore behavior is undefined.
- EncryptionInformation []*EncryptionInfo `protobuf:"bytes,13,rep,name=encryption_information,json=encryptionInformation,proto3" json:"encryption_information,omitempty"`
- // Output only. The database dialect information for the backup.
- DatabaseDialect DatabaseDialect `protobuf:"varint,10,opt,name=database_dialect,json=databaseDialect,proto3,enum=google.spanner.admin.database.v1.DatabaseDialect" json:"database_dialect,omitempty"`
- // Output only. The names of the destination backups being created by copying
- // this source backup. The backup names are of the form
- // `projects/<project>/instances/<instance>/backups/<backup>`.
- // Referencing backups may exist in different instances. The existence of
- // any referencing backup prevents the backup from being deleted. When the
- // copy operation is done (either successfully completed or cancelled or the
- // destination backup is deleted), the reference to the backup is removed.
- ReferencingBackups []string `protobuf:"bytes,11,rep,name=referencing_backups,json=referencingBackups,proto3" json:"referencing_backups,omitempty"`
- // Output only. The max allowed expiration time of the backup, with
- // microseconds granularity. A backup's expiration time can be configured in
- // multiple APIs: CreateBackup, UpdateBackup, CopyBackup. When updating or
- // copying an existing backup, the expiration time specified must be
- // less than `Backup.max_expire_time`.
- MaxExpireTime *timestamppb.Timestamp `protobuf:"bytes,12,opt,name=max_expire_time,json=maxExpireTime,proto3" json:"max_expire_time,omitempty"`
- // Output only. List of backup schedule URIs that are associated with
- // creating this backup. This is only applicable for scheduled backups, and
- // is empty for on-demand backups.
- //
- // To optimize for storage, whenever possible, multiple schedules are
- // collapsed together to create one backup. In such cases, this field captures
- // the list of all backup schedule URIs that are associated with creating
- // this backup. If collapsing is not done, then this field captures the
- // single backup schedule URI associated with creating this backup.
- BackupSchedules []string `protobuf:"bytes,14,rep,name=backup_schedules,json=backupSchedules,proto3" json:"backup_schedules,omitempty"`
- // Output only. Populated only for backups in an incremental backup chain.
- // Backups share the same chain id if and only if they belong to the same
- // incremental backup chain. Use this field to determine which backups are
- // part of the same incremental backup chain. The ordering of backups in the
- // chain can be determined by ordering the backup `version_time`.
- IncrementalBackupChainId string `protobuf:"bytes,17,opt,name=incremental_backup_chain_id,json=incrementalBackupChainId,proto3" json:"incremental_backup_chain_id,omitempty"`
- // Output only. Data deleted at a time older than this is guaranteed not to be
- // retained in order to support this backup. For a backup in an incremental
- // backup chain, this is the version time of the oldest backup that exists or
- // ever existed in the chain. For all other backups, this is the version time
- // of the backup. This field can be used to understand what data is being
- // retained by the backup system.
- OldestVersionTime *timestamppb.Timestamp `protobuf:"bytes,18,opt,name=oldest_version_time,json=oldestVersionTime,proto3" json:"oldest_version_time,omitempty"`
-}
-
-func (x *Backup) Reset() {
- *x = Backup{}
- if protoimpl.UnsafeEnabled {
- mi := &file_google_spanner_admin_database_v1_backup_proto_msgTypes[0]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
-}
-
-func (x *Backup) String() string {
- return protoimpl.X.MessageStringOf(x)
-}
-
-func (*Backup) ProtoMessage() {}
-
-func (x *Backup) ProtoReflect() protoreflect.Message {
- mi := &file_google_spanner_admin_database_v1_backup_proto_msgTypes[0]
- if protoimpl.UnsafeEnabled && x != nil {
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- if ms.LoadMessageInfo() == nil {
- ms.StoreMessageInfo(mi)
- }
- return ms
- }
- return mi.MessageOf(x)
-}
-
-// Deprecated: Use Backup.ProtoReflect.Descriptor instead.
-func (*Backup) Descriptor() ([]byte, []int) {
- return file_google_spanner_admin_database_v1_backup_proto_rawDescGZIP(), []int{0}
-}
-
-func (x *Backup) GetDatabase() string {
- if x != nil {
- return x.Database
- }
- return ""
-}
-
-func (x *Backup) GetVersionTime() *timestamppb.Timestamp {
- if x != nil {
- return x.VersionTime
- }
- return nil
-}
-
-func (x *Backup) GetExpireTime() *timestamppb.Timestamp {
- if x != nil {
- return x.ExpireTime
- }
- return nil
-}
-
-func (x *Backup) GetName() string {
- if x != nil {
- return x.Name
- }
- return ""
-}
-
-func (x *Backup) GetCreateTime() *timestamppb.Timestamp {
- if x != nil {
- return x.CreateTime
- }
- return nil
-}
-
-func (x *Backup) GetSizeBytes() int64 {
- if x != nil {
- return x.SizeBytes
- }
- return 0
-}
-
-func (x *Backup) GetFreeableSizeBytes() int64 {
- if x != nil {
- return x.FreeableSizeBytes
- }
- return 0
-}
-
-func (x *Backup) GetExclusiveSizeBytes() int64 {
- if x != nil {
- return x.ExclusiveSizeBytes
- }
- return 0
-}
-
-func (x *Backup) GetState() Backup_State {
- if x != nil {
- return x.State
- }
- return Backup_STATE_UNSPECIFIED
-}
-
-func (x *Backup) GetReferencingDatabases() []string {
- if x != nil {
- return x.ReferencingDatabases
- }
- return nil
-}
-
-func (x *Backup) GetEncryptionInfo() *EncryptionInfo {
- if x != nil {
- return x.EncryptionInfo
- }
- return nil
-}
-
-func (x *Backup) GetEncryptionInformation() []*EncryptionInfo {
- if x != nil {
- return x.EncryptionInformation
- }
- return nil
-}
-
-func (x *Backup) GetDatabaseDialect() DatabaseDialect {
- if x != nil {
- return x.DatabaseDialect
- }
- return DatabaseDialect_DATABASE_DIALECT_UNSPECIFIED
-}
-
-func (x *Backup) GetReferencingBackups() []string {
- if x != nil {
- return x.ReferencingBackups
- }
- return nil
-}
-
-func (x *Backup) GetMaxExpireTime() *timestamppb.Timestamp {
- if x != nil {
- return x.MaxExpireTime
- }
- return nil
-}
-
-func (x *Backup) GetBackupSchedules() []string {
- if x != nil {
- return x.BackupSchedules
- }
- return nil
-}
-
-func (x *Backup) GetIncrementalBackupChainId() string {
- if x != nil {
- return x.IncrementalBackupChainId
- }
- return ""
-}
-
-func (x *Backup) GetOldestVersionTime() *timestamppb.Timestamp {
- if x != nil {
- return x.OldestVersionTime
- }
- return nil
-}
-
-// The request for
-// [CreateBackup][google.spanner.admin.database.v1.DatabaseAdmin.CreateBackup].
-type CreateBackupRequest struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
- // Required. The name of the instance in which the backup will be
- // created. This must be the same instance that contains the database the
- // backup will be created from. The backup will be stored in the
- // location(s) specified in the instance configuration of this
- // instance. Values are of the form
- // `projects/<project>/instances/<instance>`.
- Parent string `protobuf:"bytes,1,opt,name=parent,proto3" json:"parent,omitempty"`
- // Required. The id of the backup to be created. The `backup_id` appended to
- // `parent` forms the full backup name of the form
- // `projects/<project>/instances/<instance>/backups/<backup_id>`.
- BackupId string `protobuf:"bytes,2,opt,name=backup_id,json=backupId,proto3" json:"backup_id,omitempty"`
- // Required. The backup to create.
- Backup *Backup `protobuf:"bytes,3,opt,name=backup,proto3" json:"backup,omitempty"`
- // Optional. The encryption configuration used to encrypt the backup. If this
- // field is not specified, the backup will use the same encryption
- // configuration as the database by default, namely
- // [encryption_type][google.spanner.admin.database.v1.CreateBackupEncryptionConfig.encryption_type]
- // = `USE_DATABASE_ENCRYPTION`.
- EncryptionConfig *CreateBackupEncryptionConfig `protobuf:"bytes,4,opt,name=encryption_config,json=encryptionConfig,proto3" json:"encryption_config,omitempty"`
-}
-
-func (x *CreateBackupRequest) Reset() {
- *x = CreateBackupRequest{}
- if protoimpl.UnsafeEnabled {
- mi := &file_google_spanner_admin_database_v1_backup_proto_msgTypes[1]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
-}
-
-func (x *CreateBackupRequest) String() string {
- return protoimpl.X.MessageStringOf(x)
-}
-
-func (*CreateBackupRequest) ProtoMessage() {}
-
-func (x *CreateBackupRequest) ProtoReflect() protoreflect.Message {
- mi := &file_google_spanner_admin_database_v1_backup_proto_msgTypes[1]
- if protoimpl.UnsafeEnabled && x != nil {
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- if ms.LoadMessageInfo() == nil {
- ms.StoreMessageInfo(mi)
- }
- return ms
- }
- return mi.MessageOf(x)
-}
-
-// Deprecated: Use CreateBackupRequest.ProtoReflect.Descriptor instead.
-func (*CreateBackupRequest) Descriptor() ([]byte, []int) {
- return file_google_spanner_admin_database_v1_backup_proto_rawDescGZIP(), []int{1}
-}
-
-func (x *CreateBackupRequest) GetParent() string {
- if x != nil {
- return x.Parent
- }
- return ""
-}
-
-func (x *CreateBackupRequest) GetBackupId() string {
- if x != nil {
- return x.BackupId
- }
- return ""
-}
-
-func (x *CreateBackupRequest) GetBackup() *Backup {
- if x != nil {
- return x.Backup
- }
- return nil
-}
-
-func (x *CreateBackupRequest) GetEncryptionConfig() *CreateBackupEncryptionConfig {
- if x != nil {
- return x.EncryptionConfig
- }
- return nil
-}
-
-// Metadata type for the operation returned by
-// [CreateBackup][google.spanner.admin.database.v1.DatabaseAdmin.CreateBackup].
-type CreateBackupMetadata struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
- // The name of the backup being created.
- Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"`
- // The name of the database the backup is created from.
- Database string `protobuf:"bytes,2,opt,name=database,proto3" json:"database,omitempty"`
- // The progress of the
- // [CreateBackup][google.spanner.admin.database.v1.DatabaseAdmin.CreateBackup]
- // operation.
- Progress *OperationProgress `protobuf:"bytes,3,opt,name=progress,proto3" json:"progress,omitempty"`
- // The time at which cancellation of this operation was received.
- // [Operations.CancelOperation][google.longrunning.Operations.CancelOperation]
- // starts asynchronous cancellation on a long-running operation. The server
- // makes a best effort to cancel the operation, but success is not guaranteed.
- // Clients can use
- // [Operations.GetOperation][google.longrunning.Operations.GetOperation] or
- // other methods to check whether the cancellation succeeded or whether the
- // operation completed despite cancellation. On successful cancellation,
- // the operation is not deleted; instead, it becomes an operation with
- // an [Operation.error][google.longrunning.Operation.error] value with a
- // [google.rpc.Status.code][google.rpc.Status.code] of 1,
- // corresponding to `Code.CANCELLED`.
- CancelTime *timestamppb.Timestamp `protobuf:"bytes,4,opt,name=cancel_time,json=cancelTime,proto3" json:"cancel_time,omitempty"`
-}
-
-func (x *CreateBackupMetadata) Reset() {
- *x = CreateBackupMetadata{}
- if protoimpl.UnsafeEnabled {
- mi := &file_google_spanner_admin_database_v1_backup_proto_msgTypes[2]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
-}
-
-func (x *CreateBackupMetadata) String() string {
- return protoimpl.X.MessageStringOf(x)
-}
-
-func (*CreateBackupMetadata) ProtoMessage() {}
-
-func (x *CreateBackupMetadata) ProtoReflect() protoreflect.Message {
- mi := &file_google_spanner_admin_database_v1_backup_proto_msgTypes[2]
- if protoimpl.UnsafeEnabled && x != nil {
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- if ms.LoadMessageInfo() == nil {
- ms.StoreMessageInfo(mi)
- }
- return ms
- }
- return mi.MessageOf(x)
-}
-
-// Deprecated: Use CreateBackupMetadata.ProtoReflect.Descriptor instead.
-func (*CreateBackupMetadata) Descriptor() ([]byte, []int) {
- return file_google_spanner_admin_database_v1_backup_proto_rawDescGZIP(), []int{2}
-}
-
-func (x *CreateBackupMetadata) GetName() string {
- if x != nil {
- return x.Name
- }
- return ""
-}
-
-func (x *CreateBackupMetadata) GetDatabase() string {
- if x != nil {
- return x.Database
- }
- return ""
-}
-
-func (x *CreateBackupMetadata) GetProgress() *OperationProgress {
- if x != nil {
- return x.Progress
- }
- return nil
-}
-
-func (x *CreateBackupMetadata) GetCancelTime() *timestamppb.Timestamp {
- if x != nil {
- return x.CancelTime
- }
- return nil
-}
-
-// The request for
-// [CopyBackup][google.spanner.admin.database.v1.DatabaseAdmin.CopyBackup].
-type CopyBackupRequest struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
- // Required. The name of the destination instance that will contain the backup
- // copy. Values are of the form: `projects/<project>/instances/<instance>`.
- Parent string `protobuf:"bytes,1,opt,name=parent,proto3" json:"parent,omitempty"`
- // Required. The id of the backup copy.
- // The `backup_id` appended to `parent` forms the full backup_uri of the form
- // `projects/<project>/instances/<instance>/backups/<backup>`.
- BackupId string `protobuf:"bytes,2,opt,name=backup_id,json=backupId,proto3" json:"backup_id,omitempty"`
- // Required. The source backup to be copied.
- // The source backup needs to be in READY state for it to be copied.
- // Once CopyBackup is in progress, the source backup cannot be deleted or
- // cleaned up on expiration until CopyBackup is finished.
- // Values are of the form:
- // `projects/<project>/instances/<instance>/backups/<backup>`.
- SourceBackup string `protobuf:"bytes,3,opt,name=source_backup,json=sourceBackup,proto3" json:"source_backup,omitempty"`
- // Required. The expiration time of the backup in microsecond granularity.
- // The expiration time must be at least 6 hours and at most 366 days
- // from the `create_time` of the source backup. Once the `expire_time` has
- // passed, the backup is eligible to be automatically deleted by Cloud Spanner
- // to free the resources used by the backup.
- ExpireTime *timestamppb.Timestamp `protobuf:"bytes,4,opt,name=expire_time,json=expireTime,proto3" json:"expire_time,omitempty"`
- // Optional. The encryption configuration used to encrypt the backup. If this
- // field is not specified, the backup will use the same encryption
- // configuration as the source backup by default, namely
- // [encryption_type][google.spanner.admin.database.v1.CopyBackupEncryptionConfig.encryption_type]
- // = `USE_CONFIG_DEFAULT_OR_BACKUP_ENCRYPTION`.
- EncryptionConfig *CopyBackupEncryptionConfig `protobuf:"bytes,5,opt,name=encryption_config,json=encryptionConfig,proto3" json:"encryption_config,omitempty"`
-}
-
-func (x *CopyBackupRequest) Reset() {
- *x = CopyBackupRequest{}
- if protoimpl.UnsafeEnabled {
- mi := &file_google_spanner_admin_database_v1_backup_proto_msgTypes[3]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
-}
-
-func (x *CopyBackupRequest) String() string {
- return protoimpl.X.MessageStringOf(x)
-}
-
-func (*CopyBackupRequest) ProtoMessage() {}
-
-func (x *CopyBackupRequest) ProtoReflect() protoreflect.Message {
- mi := &file_google_spanner_admin_database_v1_backup_proto_msgTypes[3]
- if protoimpl.UnsafeEnabled && x != nil {
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- if ms.LoadMessageInfo() == nil {
- ms.StoreMessageInfo(mi)
- }
- return ms
- }
- return mi.MessageOf(x)
-}
-
-// Deprecated: Use CopyBackupRequest.ProtoReflect.Descriptor instead.
-func (*CopyBackupRequest) Descriptor() ([]byte, []int) {
- return file_google_spanner_admin_database_v1_backup_proto_rawDescGZIP(), []int{3}
-}
-
-func (x *CopyBackupRequest) GetParent() string {
- if x != nil {
- return x.Parent
- }
- return ""
-}
-
-func (x *CopyBackupRequest) GetBackupId() string {
- if x != nil {
- return x.BackupId
- }
- return ""
-}
-
-func (x *CopyBackupRequest) GetSourceBackup() string {
- if x != nil {
- return x.SourceBackup
- }
- return ""
-}
-
-func (x *CopyBackupRequest) GetExpireTime() *timestamppb.Timestamp {
- if x != nil {
- return x.ExpireTime
- }
- return nil
-}
-
-func (x *CopyBackupRequest) GetEncryptionConfig() *CopyBackupEncryptionConfig {
- if x != nil {
- return x.EncryptionConfig
- }
- return nil
-}
-
-// Metadata type for the operation returned by
-// [CopyBackup][google.spanner.admin.database.v1.DatabaseAdmin.CopyBackup].
-type CopyBackupMetadata struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
- // The name of the backup being created through the copy operation.
- // Values are of the form
- // `projects/<project>/instances/<instance>/backups/<backup>`.
- Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"`
- // The name of the source backup that is being copied.
- // Values are of the form
- // `projects/<project>/instances/<instance>/backups/<backup>`.
- SourceBackup string `protobuf:"bytes,2,opt,name=source_backup,json=sourceBackup,proto3" json:"source_backup,omitempty"`
- // The progress of the
- // [CopyBackup][google.spanner.admin.database.v1.DatabaseAdmin.CopyBackup]
- // operation.
- Progress *OperationProgress `protobuf:"bytes,3,opt,name=progress,proto3" json:"progress,omitempty"`
- // The time at which cancellation of CopyBackup operation was received.
- // [Operations.CancelOperation][google.longrunning.Operations.CancelOperation]
- // starts asynchronous cancellation on a long-running operation. The server
- // makes a best effort to cancel the operation, but success is not guaranteed.
- // Clients can use
- // [Operations.GetOperation][google.longrunning.Operations.GetOperation] or
- // other methods to check whether the cancellation succeeded or whether the
- // operation completed despite cancellation. On successful cancellation,
- // the operation is not deleted; instead, it becomes an operation with
- // an [Operation.error][google.longrunning.Operation.error] value with a
- // [google.rpc.Status.code][google.rpc.Status.code] of 1,
- // corresponding to `Code.CANCELLED`.
- CancelTime *timestamppb.Timestamp `protobuf:"bytes,4,opt,name=cancel_time,json=cancelTime,proto3" json:"cancel_time,omitempty"`
-}
-
-func (x *CopyBackupMetadata) Reset() {
- *x = CopyBackupMetadata{}
- if protoimpl.UnsafeEnabled {
- mi := &file_google_spanner_admin_database_v1_backup_proto_msgTypes[4]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
-}
-
-func (x *CopyBackupMetadata) String() string {
- return protoimpl.X.MessageStringOf(x)
-}
-
-func (*CopyBackupMetadata) ProtoMessage() {}
-
-func (x *CopyBackupMetadata) ProtoReflect() protoreflect.Message {
- mi := &file_google_spanner_admin_database_v1_backup_proto_msgTypes[4]
- if protoimpl.UnsafeEnabled && x != nil {
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- if ms.LoadMessageInfo() == nil {
- ms.StoreMessageInfo(mi)
- }
- return ms
- }
- return mi.MessageOf(x)
-}
-
-// Deprecated: Use CopyBackupMetadata.ProtoReflect.Descriptor instead.
-func (*CopyBackupMetadata) Descriptor() ([]byte, []int) {
- return file_google_spanner_admin_database_v1_backup_proto_rawDescGZIP(), []int{4}
-}
-
-func (x *CopyBackupMetadata) GetName() string {
- if x != nil {
- return x.Name
- }
- return ""
-}
-
-func (x *CopyBackupMetadata) GetSourceBackup() string {
- if x != nil {
- return x.SourceBackup
- }
- return ""
-}
-
-func (x *CopyBackupMetadata) GetProgress() *OperationProgress {
- if x != nil {
- return x.Progress
- }
- return nil
-}
-
-func (x *CopyBackupMetadata) GetCancelTime() *timestamppb.Timestamp {
- if x != nil {
- return x.CancelTime
- }
- return nil
-}
-
-// The request for
-// [UpdateBackup][google.spanner.admin.database.v1.DatabaseAdmin.UpdateBackup].
-type UpdateBackupRequest struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
- // Required. The backup to update. `backup.name`, and the fields to be updated
- // as specified by `update_mask` are required. Other fields are ignored.
- // Update is only supported for the following fields:
- // - `backup.expire_time`.
- Backup *Backup `protobuf:"bytes,1,opt,name=backup,proto3" json:"backup,omitempty"`
- // Required. A mask specifying which fields (e.g. `expire_time`) in the
- // Backup resource should be updated. This mask is relative to the Backup
- // resource, not to the request message. The field mask must always be
- // specified; this prevents any future fields from being erased accidentally
- // by clients that do not know about them.
- UpdateMask *fieldmaskpb.FieldMask `protobuf:"bytes,2,opt,name=update_mask,json=updateMask,proto3" json:"update_mask,omitempty"`
-}
-
-func (x *UpdateBackupRequest) Reset() {
- *x = UpdateBackupRequest{}
- if protoimpl.UnsafeEnabled {
- mi := &file_google_spanner_admin_database_v1_backup_proto_msgTypes[5]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
-}
-
-func (x *UpdateBackupRequest) String() string {
- return protoimpl.X.MessageStringOf(x)
-}
-
-func (*UpdateBackupRequest) ProtoMessage() {}
-
-func (x *UpdateBackupRequest) ProtoReflect() protoreflect.Message {
- mi := &file_google_spanner_admin_database_v1_backup_proto_msgTypes[5]
- if protoimpl.UnsafeEnabled && x != nil {
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- if ms.LoadMessageInfo() == nil {
- ms.StoreMessageInfo(mi)
- }
- return ms
- }
- return mi.MessageOf(x)
-}
-
-// Deprecated: Use UpdateBackupRequest.ProtoReflect.Descriptor instead.
-func (*UpdateBackupRequest) Descriptor() ([]byte, []int) {
- return file_google_spanner_admin_database_v1_backup_proto_rawDescGZIP(), []int{5}
-}
-
-func (x *UpdateBackupRequest) GetBackup() *Backup {
- if x != nil {
- return x.Backup
- }
- return nil
-}
-
-func (x *UpdateBackupRequest) GetUpdateMask() *fieldmaskpb.FieldMask {
- if x != nil {
- return x.UpdateMask
- }
- return nil
-}
-
-// The request for
-// [GetBackup][google.spanner.admin.database.v1.DatabaseAdmin.GetBackup].
-type GetBackupRequest struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
- // Required. Name of the backup.
- // Values are of the form
- // `projects/<project>/instances/<instance>/backups/<backup>`.
- Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"`
-}
-
-func (x *GetBackupRequest) Reset() {
- *x = GetBackupRequest{}
- if protoimpl.UnsafeEnabled {
- mi := &file_google_spanner_admin_database_v1_backup_proto_msgTypes[6]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
-}
-
-func (x *GetBackupRequest) String() string {
- return protoimpl.X.MessageStringOf(x)
-}
-
-func (*GetBackupRequest) ProtoMessage() {}
-
-func (x *GetBackupRequest) ProtoReflect() protoreflect.Message {
- mi := &file_google_spanner_admin_database_v1_backup_proto_msgTypes[6]
- if protoimpl.UnsafeEnabled && x != nil {
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- if ms.LoadMessageInfo() == nil {
- ms.StoreMessageInfo(mi)
- }
- return ms
- }
- return mi.MessageOf(x)
-}
-
-// Deprecated: Use GetBackupRequest.ProtoReflect.Descriptor instead.
-func (*GetBackupRequest) Descriptor() ([]byte, []int) {
- return file_google_spanner_admin_database_v1_backup_proto_rawDescGZIP(), []int{6}
-}
-
-func (x *GetBackupRequest) GetName() string {
- if x != nil {
- return x.Name
- }
- return ""
-}
-
-// The request for
-// [DeleteBackup][google.spanner.admin.database.v1.DatabaseAdmin.DeleteBackup].
-type DeleteBackupRequest struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
- // Required. Name of the backup to delete.
- // Values are of the form
- // `projects/<project>/instances/<instance>/backups/<backup>`.
- Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"`
-}
-
-func (x *DeleteBackupRequest) Reset() {
- *x = DeleteBackupRequest{}
- if protoimpl.UnsafeEnabled {
- mi := &file_google_spanner_admin_database_v1_backup_proto_msgTypes[7]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
-}
-
-func (x *DeleteBackupRequest) String() string {
- return protoimpl.X.MessageStringOf(x)
-}
-
-func (*DeleteBackupRequest) ProtoMessage() {}
-
-func (x *DeleteBackupRequest) ProtoReflect() protoreflect.Message {
- mi := &file_google_spanner_admin_database_v1_backup_proto_msgTypes[7]
- if protoimpl.UnsafeEnabled && x != nil {
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- if ms.LoadMessageInfo() == nil {
- ms.StoreMessageInfo(mi)
- }
- return ms
- }
- return mi.MessageOf(x)
-}
-
-// Deprecated: Use DeleteBackupRequest.ProtoReflect.Descriptor instead.
-func (*DeleteBackupRequest) Descriptor() ([]byte, []int) {
- return file_google_spanner_admin_database_v1_backup_proto_rawDescGZIP(), []int{7}
-}
-
-func (x *DeleteBackupRequest) GetName() string {
- if x != nil {
- return x.Name
- }
- return ""
-}
-
-// The request for
-// [ListBackups][google.spanner.admin.database.v1.DatabaseAdmin.ListBackups].
-type ListBackupsRequest struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
- // Required. The instance to list backups from. Values are of the
- // form `projects/<project>/instances/<instance>`.
- Parent string `protobuf:"bytes,1,opt,name=parent,proto3" json:"parent,omitempty"`
- // An expression that filters the list of returned backups.
- //
- // A filter expression consists of a field name, a comparison operator, and a
- // value for filtering.
- // The value must be a string, a number, or a boolean. The comparison operator
- // must be one of: `<`, `>`, `<=`, `>=`, `!=`, `=`, or `:`.
- // Colon `:` is the contains operator. Filter rules are not case sensitive.
- //
- // The following fields in the
- // [Backup][google.spanner.admin.database.v1.Backup] are eligible for
- // filtering:
- //
- // - `name`
- // - `database`
- // - `state`
- // - `create_time` (and values are of the format YYYY-MM-DDTHH:MM:SSZ)
- // - `expire_time` (and values are of the format YYYY-MM-DDTHH:MM:SSZ)
- // - `version_time` (and values are of the format YYYY-MM-DDTHH:MM:SSZ)
- // - `size_bytes`
- // - `backup_schedules`
- //
- // You can combine multiple expressions by enclosing each expression in
- // parentheses. By default, expressions are combined with AND logic, but
- // you can specify AND, OR, and NOT logic explicitly.
- //
- // Here are a few examples:
- //
- // - `name:Howl` - The backup's name contains the string "howl".
- // - `database:prod`
- // - The database's name contains the string "prod".
- // - `state:CREATING` - The backup is pending creation.
- // - `state:READY` - The backup is fully created and ready for use.
- // - `(name:howl) AND (create_time < \"2018-03-28T14:50:00Z\")`
- // - The backup name contains the string "howl" and `create_time`
- // of the backup is before 2018-03-28T14:50:00Z.
- // - `expire_time < \"2018-03-28T14:50:00Z\"`
- // - The backup `expire_time` is before 2018-03-28T14:50:00Z.
- // - `size_bytes > 10000000000` - The backup's size is greater than 10GB
- // - `backup_schedules:daily`
- // - The backup is created from a schedule with "daily" in its name.
- Filter string `protobuf:"bytes,2,opt,name=filter,proto3" json:"filter,omitempty"`
- // Number of backups to be returned in the response. If 0 or
- // less, defaults to the server's maximum allowed page size.
- PageSize int32 `protobuf:"varint,3,opt,name=page_size,json=pageSize,proto3" json:"page_size,omitempty"`
- // If non-empty, `page_token` should contain a
- // [next_page_token][google.spanner.admin.database.v1.ListBackupsResponse.next_page_token]
- // from a previous
- // [ListBackupsResponse][google.spanner.admin.database.v1.ListBackupsResponse]
- // to the same `parent` and with the same `filter`.
- PageToken string `protobuf:"bytes,4,opt,name=page_token,json=pageToken,proto3" json:"page_token,omitempty"`
-}
-
-func (x *ListBackupsRequest) Reset() {
- *x = ListBackupsRequest{}
- if protoimpl.UnsafeEnabled {
- mi := &file_google_spanner_admin_database_v1_backup_proto_msgTypes[8]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
-}
-
-func (x *ListBackupsRequest) String() string {
- return protoimpl.X.MessageStringOf(x)
-}
-
-func (*ListBackupsRequest) ProtoMessage() {}
-
-func (x *ListBackupsRequest) ProtoReflect() protoreflect.Message {
- mi := &file_google_spanner_admin_database_v1_backup_proto_msgTypes[8]
- if protoimpl.UnsafeEnabled && x != nil {
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- if ms.LoadMessageInfo() == nil {
- ms.StoreMessageInfo(mi)
- }
- return ms
- }
- return mi.MessageOf(x)
-}
-
-// Deprecated: Use ListBackupsRequest.ProtoReflect.Descriptor instead.
-func (*ListBackupsRequest) Descriptor() ([]byte, []int) {
- return file_google_spanner_admin_database_v1_backup_proto_rawDescGZIP(), []int{8}
-}
-
-func (x *ListBackupsRequest) GetParent() string {
- if x != nil {
- return x.Parent
- }
- return ""
-}
-
-func (x *ListBackupsRequest) GetFilter() string {
- if x != nil {
- return x.Filter
- }
- return ""
-}
-
-func (x *ListBackupsRequest) GetPageSize() int32 {
- if x != nil {
- return x.PageSize
- }
- return 0
-}
-
-func (x *ListBackupsRequest) GetPageToken() string {
- if x != nil {
- return x.PageToken
- }
- return ""
-}
-
-// The response for
-// [ListBackups][google.spanner.admin.database.v1.DatabaseAdmin.ListBackups].
-type ListBackupsResponse struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
- // The list of matching backups. Backups returned are ordered by `create_time`
- // in descending order, starting from the most recent `create_time`.
- Backups []*Backup `protobuf:"bytes,1,rep,name=backups,proto3" json:"backups,omitempty"`
- // `next_page_token` can be sent in a subsequent
- // [ListBackups][google.spanner.admin.database.v1.DatabaseAdmin.ListBackups]
- // call to fetch more of the matching backups.
- NextPageToken string `protobuf:"bytes,2,opt,name=next_page_token,json=nextPageToken,proto3" json:"next_page_token,omitempty"`
-}
-
-func (x *ListBackupsResponse) Reset() {
- *x = ListBackupsResponse{}
- if protoimpl.UnsafeEnabled {
- mi := &file_google_spanner_admin_database_v1_backup_proto_msgTypes[9]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
-}
-
-func (x *ListBackupsResponse) String() string {
- return protoimpl.X.MessageStringOf(x)
-}
-
-func (*ListBackupsResponse) ProtoMessage() {}
-
-func (x *ListBackupsResponse) ProtoReflect() protoreflect.Message {
- mi := &file_google_spanner_admin_database_v1_backup_proto_msgTypes[9]
- if protoimpl.UnsafeEnabled && x != nil {
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- if ms.LoadMessageInfo() == nil {
- ms.StoreMessageInfo(mi)
- }
- return ms
- }
- return mi.MessageOf(x)
-}
-
-// Deprecated: Use ListBackupsResponse.ProtoReflect.Descriptor instead.
-func (*ListBackupsResponse) Descriptor() ([]byte, []int) {
- return file_google_spanner_admin_database_v1_backup_proto_rawDescGZIP(), []int{9}
-}
-
-func (x *ListBackupsResponse) GetBackups() []*Backup {
- if x != nil {
- return x.Backups
- }
- return nil
-}
-
-func (x *ListBackupsResponse) GetNextPageToken() string {
- if x != nil {
- return x.NextPageToken
- }
- return ""
-}
-
-// The request for
-// [ListBackupOperations][google.spanner.admin.database.v1.DatabaseAdmin.ListBackupOperations].
-type ListBackupOperationsRequest struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
- // Required. The instance of the backup operations. Values are of
- // the form `projects/<project>/instances/<instance>`.
- Parent string `protobuf:"bytes,1,opt,name=parent,proto3" json:"parent,omitempty"`
- // An expression that filters the list of returned backup operations.
- //
- // A filter expression consists of a field name, a
- // comparison operator, and a value for filtering.
- // The value must be a string, a number, or a boolean. The comparison operator
- // must be one of: `<`, `>`, `<=`, `>=`, `!=`, `=`, or `:`.
- // Colon `:` is the contains operator. Filter rules are not case sensitive.
- //
- // The following fields in the [operation][google.longrunning.Operation]
- // are eligible for filtering:
- //
- // - `name` - The name of the long-running operation
- // - `done` - False if the operation is in progress, else true.
- // - `metadata.@type` - the type of metadata. For example, the type string
- // for
- // [CreateBackupMetadata][google.spanner.admin.database.v1.CreateBackupMetadata]
- // is
- // `type.googleapis.com/google.spanner.admin.database.v1.CreateBackupMetadata`.
- // - `metadata.<field_name>` - any field in metadata.value.
- // `metadata.@type` must be specified first if filtering on metadata
- // fields.
- // - `error` - Error associated with the long-running operation.
- // - `response.@type` - the type of response.
- // - `response.<field_name>` - any field in response.value.
- //
- // You can combine multiple expressions by enclosing each expression in
- // parentheses. By default, expressions are combined with AND logic, but
- // you can specify AND, OR, and NOT logic explicitly.
- //
- // Here are a few examples:
- //
- // - `done:true` - The operation is complete.
- // - `(metadata.@type=type.googleapis.com/google.spanner.admin.database.v1.CreateBackupMetadata) AND` \
- // `metadata.database:prod` - Returns operations where:
- // - The operation's metadata type is
- // [CreateBackupMetadata][google.spanner.admin.database.v1.CreateBackupMetadata].
- // - The source database name of backup contains the string "prod".
- // - `(metadata.@type=type.googleapis.com/google.spanner.admin.database.v1.CreateBackupMetadata) AND` \
- // `(metadata.name:howl) AND` \
- // `(metadata.progress.start_time < \"2018-03-28T14:50:00Z\") AND` \
- // `(error:*)` - Returns operations where:
- // - The operation's metadata type is
- // [CreateBackupMetadata][google.spanner.admin.database.v1.CreateBackupMetadata].
- // - The backup name contains the string "howl".
- // - The operation started before 2018-03-28T14:50:00Z.
- // - The operation resulted in an error.
- // - `(metadata.@type=type.googleapis.com/google.spanner.admin.database.v1.CopyBackupMetadata) AND` \
- // `(metadata.source_backup:test) AND` \
- // `(metadata.progress.start_time < \"2022-01-18T14:50:00Z\") AND` \
- // `(error:*)` - Returns operations where:
- // - The operation's metadata type is
- // [CopyBackupMetadata][google.spanner.admin.database.v1.CopyBackupMetadata].
- // - The source backup name contains the string "test".
- // - The operation started before 2022-01-18T14:50:00Z.
- // - The operation resulted in an error.
- // - `((metadata.@type=type.googleapis.com/google.spanner.admin.database.v1.CreateBackupMetadata) AND` \
- // `(metadata.database:test_db)) OR` \
- // `((metadata.@type=type.googleapis.com/google.spanner.admin.database.v1.CopyBackupMetadata)
- // AND` \
- // `(metadata.source_backup:test_bkp)) AND` \
- // `(error:*)` - Returns operations where:
- // - The operation's metadata matches either of criteria:
- // - The operation's metadata type is
- // [CreateBackupMetadata][google.spanner.admin.database.v1.CreateBackupMetadata]
- // AND the source database name of the backup contains the string
- // "test_db"
- // - The operation's metadata type is
- // [CopyBackupMetadata][google.spanner.admin.database.v1.CopyBackupMetadata]
- // AND the source backup name contains the string "test_bkp"
- // - The operation resulted in an error.
- Filter string `protobuf:"bytes,2,opt,name=filter,proto3" json:"filter,omitempty"`
- // Number of operations to be returned in the response. If 0 or
- // less, defaults to the server's maximum allowed page size.
- PageSize int32 `protobuf:"varint,3,opt,name=page_size,json=pageSize,proto3" json:"page_size,omitempty"`
- // If non-empty, `page_token` should contain a
- // [next_page_token][google.spanner.admin.database.v1.ListBackupOperationsResponse.next_page_token]
- // from a previous
- // [ListBackupOperationsResponse][google.spanner.admin.database.v1.ListBackupOperationsResponse]
- // to the same `parent` and with the same `filter`.
- PageToken string `protobuf:"bytes,4,opt,name=page_token,json=pageToken,proto3" json:"page_token,omitempty"`
-}
-
-func (x *ListBackupOperationsRequest) Reset() {
- *x = ListBackupOperationsRequest{}
- if protoimpl.UnsafeEnabled {
- mi := &file_google_spanner_admin_database_v1_backup_proto_msgTypes[10]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
-}
-
-func (x *ListBackupOperationsRequest) String() string {
- return protoimpl.X.MessageStringOf(x)
-}
-
-func (*ListBackupOperationsRequest) ProtoMessage() {}
-
-func (x *ListBackupOperationsRequest) ProtoReflect() protoreflect.Message {
- mi := &file_google_spanner_admin_database_v1_backup_proto_msgTypes[10]
- if protoimpl.UnsafeEnabled && x != nil {
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- if ms.LoadMessageInfo() == nil {
- ms.StoreMessageInfo(mi)
- }
- return ms
- }
- return mi.MessageOf(x)
-}
-
-// Deprecated: Use ListBackupOperationsRequest.ProtoReflect.Descriptor instead.
-func (*ListBackupOperationsRequest) Descriptor() ([]byte, []int) {
- return file_google_spanner_admin_database_v1_backup_proto_rawDescGZIP(), []int{10}
-}
-
-func (x *ListBackupOperationsRequest) GetParent() string {
- if x != nil {
- return x.Parent
- }
- return ""
-}
-
-func (x *ListBackupOperationsRequest) GetFilter() string {
- if x != nil {
- return x.Filter
- }
- return ""
-}
-
-func (x *ListBackupOperationsRequest) GetPageSize() int32 {
- if x != nil {
- return x.PageSize
- }
- return 0
-}
-
-func (x *ListBackupOperationsRequest) GetPageToken() string {
- if x != nil {
- return x.PageToken
- }
- return ""
-}
-
-// The response for
-// [ListBackupOperations][google.spanner.admin.database.v1.DatabaseAdmin.ListBackupOperations].
-type ListBackupOperationsResponse struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
- // The list of matching backup [long-running
- // operations][google.longrunning.Operation]. Each operation's name will be
- // prefixed by the backup's name. The operation's
- // [metadata][google.longrunning.Operation.metadata] field type
- // `metadata.type_url` describes the type of the metadata. Operations returned
- // include those that are pending or have completed/failed/canceled within the
- // last 7 days. Operations returned are ordered by
- // `operation.metadata.value.progress.start_time` in descending order starting
- // from the most recently started operation.
- Operations []*longrunningpb.Operation `protobuf:"bytes,1,rep,name=operations,proto3" json:"operations,omitempty"`
- // `next_page_token` can be sent in a subsequent
- // [ListBackupOperations][google.spanner.admin.database.v1.DatabaseAdmin.ListBackupOperations]
- // call to fetch more of the matching metadata.
- NextPageToken string `protobuf:"bytes,2,opt,name=next_page_token,json=nextPageToken,proto3" json:"next_page_token,omitempty"`
-}
-
-func (x *ListBackupOperationsResponse) Reset() {
- *x = ListBackupOperationsResponse{}
- if protoimpl.UnsafeEnabled {
- mi := &file_google_spanner_admin_database_v1_backup_proto_msgTypes[11]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
-}
-
-func (x *ListBackupOperationsResponse) String() string {
- return protoimpl.X.MessageStringOf(x)
-}
-
-func (*ListBackupOperationsResponse) ProtoMessage() {}
-
-func (x *ListBackupOperationsResponse) ProtoReflect() protoreflect.Message {
- mi := &file_google_spanner_admin_database_v1_backup_proto_msgTypes[11]
- if protoimpl.UnsafeEnabled && x != nil {
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- if ms.LoadMessageInfo() == nil {
- ms.StoreMessageInfo(mi)
- }
- return ms
- }
- return mi.MessageOf(x)
-}
-
-// Deprecated: Use ListBackupOperationsResponse.ProtoReflect.Descriptor instead.
-func (*ListBackupOperationsResponse) Descriptor() ([]byte, []int) {
- return file_google_spanner_admin_database_v1_backup_proto_rawDescGZIP(), []int{11}
-}
-
-func (x *ListBackupOperationsResponse) GetOperations() []*longrunningpb.Operation {
- if x != nil {
- return x.Operations
- }
- return nil
-}
-
-func (x *ListBackupOperationsResponse) GetNextPageToken() string {
- if x != nil {
- return x.NextPageToken
- }
- return ""
-}
-
-// Information about a backup.
-type BackupInfo struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
- // Name of the backup.
- Backup string `protobuf:"bytes,1,opt,name=backup,proto3" json:"backup,omitempty"`
- // The backup contains an externally consistent copy of `source_database` at
- // the timestamp specified by `version_time`. If the
- // [CreateBackup][google.spanner.admin.database.v1.DatabaseAdmin.CreateBackup]
- // request did not specify `version_time`, the `version_time` of the backup is
- // equivalent to the `create_time`.
- VersionTime *timestamppb.Timestamp `protobuf:"bytes,4,opt,name=version_time,json=versionTime,proto3" json:"version_time,omitempty"`
- // The time the
- // [CreateBackup][google.spanner.admin.database.v1.DatabaseAdmin.CreateBackup]
- // request was received.
- CreateTime *timestamppb.Timestamp `protobuf:"bytes,2,opt,name=create_time,json=createTime,proto3" json:"create_time,omitempty"`
- // Name of the database the backup was created from.
- SourceDatabase string `protobuf:"bytes,3,opt,name=source_database,json=sourceDatabase,proto3" json:"source_database,omitempty"`
-}
-
-func (x *BackupInfo) Reset() {
- *x = BackupInfo{}
- if protoimpl.UnsafeEnabled {
- mi := &file_google_spanner_admin_database_v1_backup_proto_msgTypes[12]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
-}
-
-func (x *BackupInfo) String() string {
- return protoimpl.X.MessageStringOf(x)
-}
-
-func (*BackupInfo) ProtoMessage() {}
-
-func (x *BackupInfo) ProtoReflect() protoreflect.Message {
- mi := &file_google_spanner_admin_database_v1_backup_proto_msgTypes[12]
- if protoimpl.UnsafeEnabled && x != nil {
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- if ms.LoadMessageInfo() == nil {
- ms.StoreMessageInfo(mi)
- }
- return ms
- }
- return mi.MessageOf(x)
-}
-
-// Deprecated: Use BackupInfo.ProtoReflect.Descriptor instead.
-func (*BackupInfo) Descriptor() ([]byte, []int) {
- return file_google_spanner_admin_database_v1_backup_proto_rawDescGZIP(), []int{12}
-}
-
-func (x *BackupInfo) GetBackup() string {
- if x != nil {
- return x.Backup
- }
- return ""
-}
-
-func (x *BackupInfo) GetVersionTime() *timestamppb.Timestamp {
- if x != nil {
- return x.VersionTime
- }
- return nil
-}
-
-func (x *BackupInfo) GetCreateTime() *timestamppb.Timestamp {
- if x != nil {
- return x.CreateTime
- }
- return nil
-}
-
-func (x *BackupInfo) GetSourceDatabase() string {
- if x != nil {
- return x.SourceDatabase
- }
- return ""
-}
-
-// Encryption configuration for the backup to create.
-type CreateBackupEncryptionConfig struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
- // Required. The encryption type of the backup.
- EncryptionType CreateBackupEncryptionConfig_EncryptionType `protobuf:"varint,1,opt,name=encryption_type,json=encryptionType,proto3,enum=google.spanner.admin.database.v1.CreateBackupEncryptionConfig_EncryptionType" json:"encryption_type,omitempty"`
- // Optional. The Cloud KMS key that will be used to protect the backup.
- // This field should be set only when
- // [encryption_type][google.spanner.admin.database.v1.CreateBackupEncryptionConfig.encryption_type]
- // is `CUSTOMER_MANAGED_ENCRYPTION`. Values are of the form
- // `projects/<project>/locations/<location>/keyRings/<key_ring>/cryptoKeys/<kms_key_name>`.
- KmsKeyName string `protobuf:"bytes,2,opt,name=kms_key_name,json=kmsKeyName,proto3" json:"kms_key_name,omitempty"`
- // Optional. Specifies the KMS configuration for the one or more keys used to
- // protect the backup. Values are of the form
- // `projects/<project>/locations/<location>/keyRings/<key_ring>/cryptoKeys/<kms_key_name>`.
- //
- // The keys referenced by kms_key_names must fully cover all
- // regions of the backup's instance configuration. Some examples:
- // * For single region instance configs, specify a single regional
- // location KMS key.
- // * For multi-regional instance configs of type GOOGLE_MANAGED,
- // either specify a multi-regional location KMS key or multiple regional
- // location KMS keys that cover all regions in the instance config.
- // * For an instance config of type USER_MANAGED, please specify only
- // regional location KMS keys to cover each region in the instance config.
- // Multi-regional location KMS keys are not supported for USER_MANAGED
- // instance configs.
- KmsKeyNames []string `protobuf:"bytes,3,rep,name=kms_key_names,json=kmsKeyNames,proto3" json:"kms_key_names,omitempty"`
-}
-
-func (x *CreateBackupEncryptionConfig) Reset() {
- *x = CreateBackupEncryptionConfig{}
- if protoimpl.UnsafeEnabled {
- mi := &file_google_spanner_admin_database_v1_backup_proto_msgTypes[13]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
-}
-
-func (x *CreateBackupEncryptionConfig) String() string {
- return protoimpl.X.MessageStringOf(x)
-}
-
-func (*CreateBackupEncryptionConfig) ProtoMessage() {}
-
-func (x *CreateBackupEncryptionConfig) ProtoReflect() protoreflect.Message {
- mi := &file_google_spanner_admin_database_v1_backup_proto_msgTypes[13]
- if protoimpl.UnsafeEnabled && x != nil {
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- if ms.LoadMessageInfo() == nil {
- ms.StoreMessageInfo(mi)
- }
- return ms
- }
- return mi.MessageOf(x)
-}
-
-// Deprecated: Use CreateBackupEncryptionConfig.ProtoReflect.Descriptor instead.
-func (*CreateBackupEncryptionConfig) Descriptor() ([]byte, []int) {
- return file_google_spanner_admin_database_v1_backup_proto_rawDescGZIP(), []int{13}
-}
-
-func (x *CreateBackupEncryptionConfig) GetEncryptionType() CreateBackupEncryptionConfig_EncryptionType {
- if x != nil {
- return x.EncryptionType
- }
- return CreateBackupEncryptionConfig_ENCRYPTION_TYPE_UNSPECIFIED
-}
-
-func (x *CreateBackupEncryptionConfig) GetKmsKeyName() string {
- if x != nil {
- return x.KmsKeyName
- }
- return ""
-}
-
-func (x *CreateBackupEncryptionConfig) GetKmsKeyNames() []string {
- if x != nil {
- return x.KmsKeyNames
- }
- return nil
-}
-
-// Encryption configuration for the copied backup.
-type CopyBackupEncryptionConfig struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
- // Required. The encryption type of the backup.
- EncryptionType CopyBackupEncryptionConfig_EncryptionType `protobuf:"varint,1,opt,name=encryption_type,json=encryptionType,proto3,enum=google.spanner.admin.database.v1.CopyBackupEncryptionConfig_EncryptionType" json:"encryption_type,omitempty"`
- // Optional. The Cloud KMS key that will be used to protect the backup.
- // This field should be set only when
- // [encryption_type][google.spanner.admin.database.v1.CopyBackupEncryptionConfig.encryption_type]
- // is `CUSTOMER_MANAGED_ENCRYPTION`. Values are of the form
- // `projects/<project>/locations/<location>/keyRings/<key_ring>/cryptoKeys/<kms_key_name>`.
- KmsKeyName string `protobuf:"bytes,2,opt,name=kms_key_name,json=kmsKeyName,proto3" json:"kms_key_name,omitempty"`
- // Optional. Specifies the KMS configuration for the one or more keys used to
- // protect the backup. Values are of the form
- // `projects/<project>/locations/<location>/keyRings/<key_ring>/cryptoKeys/<kms_key_name>`.
- // Kms keys specified can be in any order.
- //
- // The keys referenced by kms_key_names must fully cover all
- // regions of the backup's instance configuration. Some examples:
- // * For single region instance configs, specify a single regional
- // location KMS key.
- // * For multi-regional instance configs of type GOOGLE_MANAGED,
- // either specify a multi-regional location KMS key or multiple regional
- // location KMS keys that cover all regions in the instance config.
- // * For an instance config of type USER_MANAGED, please specify only
- // regional location KMS keys to cover each region in the instance config.
- // Multi-regional location KMS keys are not supported for USER_MANAGED
- // instance configs.
- KmsKeyNames []string `protobuf:"bytes,3,rep,name=kms_key_names,json=kmsKeyNames,proto3" json:"kms_key_names,omitempty"`
-}
-
-func (x *CopyBackupEncryptionConfig) Reset() {
- *x = CopyBackupEncryptionConfig{}
- if protoimpl.UnsafeEnabled {
- mi := &file_google_spanner_admin_database_v1_backup_proto_msgTypes[14]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
-}
-
-func (x *CopyBackupEncryptionConfig) String() string {
- return protoimpl.X.MessageStringOf(x)
-}
-
-func (*CopyBackupEncryptionConfig) ProtoMessage() {}
-
-func (x *CopyBackupEncryptionConfig) ProtoReflect() protoreflect.Message {
- mi := &file_google_spanner_admin_database_v1_backup_proto_msgTypes[14]
- if protoimpl.UnsafeEnabled && x != nil {
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- if ms.LoadMessageInfo() == nil {
- ms.StoreMessageInfo(mi)
- }
- return ms
- }
- return mi.MessageOf(x)
-}
-
-// Deprecated: Use CopyBackupEncryptionConfig.ProtoReflect.Descriptor instead.
-func (*CopyBackupEncryptionConfig) Descriptor() ([]byte, []int) {
- return file_google_spanner_admin_database_v1_backup_proto_rawDescGZIP(), []int{14}
-}
-
-func (x *CopyBackupEncryptionConfig) GetEncryptionType() CopyBackupEncryptionConfig_EncryptionType {
- if x != nil {
- return x.EncryptionType
- }
- return CopyBackupEncryptionConfig_ENCRYPTION_TYPE_UNSPECIFIED
-}
-
-func (x *CopyBackupEncryptionConfig) GetKmsKeyName() string {
- if x != nil {
- return x.KmsKeyName
- }
- return ""
-}
-
-func (x *CopyBackupEncryptionConfig) GetKmsKeyNames() []string {
- if x != nil {
- return x.KmsKeyNames
- }
- return nil
-}
-
-// The specification for full backups.
-// A full backup stores the entire contents of the database at a given
-// version time.
-type FullBackupSpec struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-}
-
-func (x *FullBackupSpec) Reset() {
- *x = FullBackupSpec{}
- if protoimpl.UnsafeEnabled {
- mi := &file_google_spanner_admin_database_v1_backup_proto_msgTypes[15]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
-}
-
-func (x *FullBackupSpec) String() string {
- return protoimpl.X.MessageStringOf(x)
-}
-
-func (*FullBackupSpec) ProtoMessage() {}
-
-func (x *FullBackupSpec) ProtoReflect() protoreflect.Message {
- mi := &file_google_spanner_admin_database_v1_backup_proto_msgTypes[15]
- if protoimpl.UnsafeEnabled && x != nil {
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- if ms.LoadMessageInfo() == nil {
- ms.StoreMessageInfo(mi)
- }
- return ms
- }
- return mi.MessageOf(x)
-}
-
-// Deprecated: Use FullBackupSpec.ProtoReflect.Descriptor instead.
-func (*FullBackupSpec) Descriptor() ([]byte, []int) {
- return file_google_spanner_admin_database_v1_backup_proto_rawDescGZIP(), []int{15}
-}
-
-// The specification for incremental backup chains.
-// An incremental backup stores the delta of changes between a previous
-// backup and the database contents at a given version time. An
-// incremental backup chain consists of a full backup and zero or more
-// successive incremental backups. The first backup created for an
-// incremental backup chain is always a full backup.
-type IncrementalBackupSpec struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-}
-
-func (x *IncrementalBackupSpec) Reset() {
- *x = IncrementalBackupSpec{}
- if protoimpl.UnsafeEnabled {
- mi := &file_google_spanner_admin_database_v1_backup_proto_msgTypes[16]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
-}
-
-func (x *IncrementalBackupSpec) String() string {
- return protoimpl.X.MessageStringOf(x)
-}
-
-func (*IncrementalBackupSpec) ProtoMessage() {}
-
-func (x *IncrementalBackupSpec) ProtoReflect() protoreflect.Message {
- mi := &file_google_spanner_admin_database_v1_backup_proto_msgTypes[16]
- if protoimpl.UnsafeEnabled && x != nil {
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- if ms.LoadMessageInfo() == nil {
- ms.StoreMessageInfo(mi)
- }
- return ms
- }
- return mi.MessageOf(x)
-}
-
-// Deprecated: Use IncrementalBackupSpec.ProtoReflect.Descriptor instead.
-func (*IncrementalBackupSpec) Descriptor() ([]byte, []int) {
- return file_google_spanner_admin_database_v1_backup_proto_rawDescGZIP(), []int{16}
-}
-
-var File_google_spanner_admin_database_v1_backup_proto protoreflect.FileDescriptor
-
-var file_google_spanner_admin_database_v1_backup_proto_rawDesc = []byte{
- 0x0a, 0x2d, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x73, 0x70, 0x61, 0x6e, 0x6e, 0x65, 0x72,
- 0x2f, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2f, 0x64, 0x61, 0x74, 0x61, 0x62, 0x61, 0x73, 0x65, 0x2f,
- 0x76, 0x31, 0x2f, 0x62, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12,
- 0x20, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x70, 0x61, 0x6e, 0x6e, 0x65, 0x72, 0x2e,
- 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x64, 0x61, 0x74, 0x61, 0x62, 0x61, 0x73, 0x65, 0x2e, 0x76,
- 0x31, 0x1a, 0x1f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x66, 0x69,
- 0x65, 0x6c, 0x64, 0x5f, 0x62, 0x65, 0x68, 0x61, 0x76, 0x69, 0x6f, 0x72, 0x2e, 0x70, 0x72, 0x6f,
- 0x74, 0x6f, 0x1a, 0x19, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x72,
- 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x23, 0x67,
- 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x6c, 0x6f, 0x6e, 0x67, 0x72, 0x75, 0x6e, 0x6e, 0x69, 0x6e,
- 0x67, 0x2f, 0x6f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x70, 0x72, 0x6f,
- 0x74, 0x6f, 0x1a, 0x20, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f,
- 0x62, 0x75, 0x66, 0x2f, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x5f, 0x6d, 0x61, 0x73, 0x6b, 0x2e, 0x70,
- 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f,
- 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x2e,
- 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x2d, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x73, 0x70,
- 0x61, 0x6e, 0x6e, 0x65, 0x72, 0x2f, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2f, 0x64, 0x61, 0x74, 0x61,
- 0x62, 0x61, 0x73, 0x65, 0x2f, 0x76, 0x31, 0x2f, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x2e, 0x70,
- 0x72, 0x6f, 0x74, 0x6f, 0x22, 0xad, 0x0b, 0x0a, 0x06, 0x42, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x12,
- 0x40, 0x0a, 0x08, 0x64, 0x61, 0x74, 0x61, 0x62, 0x61, 0x73, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28,
- 0x09, 0x42, 0x24, 0xfa, 0x41, 0x21, 0x0a, 0x1f, 0x73, 0x70, 0x61, 0x6e, 0x6e, 0x65, 0x72, 0x2e,
- 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x44,
- 0x61, 0x74, 0x61, 0x62, 0x61, 0x73, 0x65, 0x52, 0x08, 0x64, 0x61, 0x74, 0x61, 0x62, 0x61, 0x73,
- 0x65, 0x12, 0x3d, 0x0a, 0x0c, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x5f, 0x74, 0x69, 0x6d,
- 0x65, 0x18, 0x09, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65,
- 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74,
- 0x61, 0x6d, 0x70, 0x52, 0x0b, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x54, 0x69, 0x6d, 0x65,
- 0x12, 0x3b, 0x0a, 0x0b, 0x65, 0x78, 0x70, 0x69, 0x72, 0x65, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x18,
- 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70,
- 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d,
- 0x70, 0x52, 0x0a, 0x65, 0x78, 0x70, 0x69, 0x72, 0x65, 0x54, 0x69, 0x6d, 0x65, 0x12, 0x12, 0x0a,
- 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d,
- 0x65, 0x12, 0x40, 0x0a, 0x0b, 0x63, 0x72, 0x65, 0x61, 0x74, 0x65, 0x5f, 0x74, 0x69, 0x6d, 0x65,
- 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e,
- 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61,
- 0x6d, 0x70, 0x42, 0x03, 0xe0, 0x41, 0x03, 0x52, 0x0a, 0x63, 0x72, 0x65, 0x61, 0x74, 0x65, 0x54,
- 0x69, 0x6d, 0x65, 0x12, 0x22, 0x0a, 0x0a, 0x73, 0x69, 0x7a, 0x65, 0x5f, 0x62, 0x79, 0x74, 0x65,
- 0x73, 0x18, 0x05, 0x20, 0x01, 0x28, 0x03, 0x42, 0x03, 0xe0, 0x41, 0x03, 0x52, 0x09, 0x73, 0x69,
- 0x7a, 0x65, 0x42, 0x79, 0x74, 0x65, 0x73, 0x12, 0x33, 0x0a, 0x13, 0x66, 0x72, 0x65, 0x65, 0x61,
- 0x62, 0x6c, 0x65, 0x5f, 0x73, 0x69, 0x7a, 0x65, 0x5f, 0x62, 0x79, 0x74, 0x65, 0x73, 0x18, 0x0f,
- 0x20, 0x01, 0x28, 0x03, 0x42, 0x03, 0xe0, 0x41, 0x03, 0x52, 0x11, 0x66, 0x72, 0x65, 0x65, 0x61,
- 0x62, 0x6c, 0x65, 0x53, 0x69, 0x7a, 0x65, 0x42, 0x79, 0x74, 0x65, 0x73, 0x12, 0x35, 0x0a, 0x14,
- 0x65, 0x78, 0x63, 0x6c, 0x75, 0x73, 0x69, 0x76, 0x65, 0x5f, 0x73, 0x69, 0x7a, 0x65, 0x5f, 0x62,
- 0x79, 0x74, 0x65, 0x73, 0x18, 0x10, 0x20, 0x01, 0x28, 0x03, 0x42, 0x03, 0xe0, 0x41, 0x03, 0x52,
- 0x12, 0x65, 0x78, 0x63, 0x6c, 0x75, 0x73, 0x69, 0x76, 0x65, 0x53, 0x69, 0x7a, 0x65, 0x42, 0x79,
- 0x74, 0x65, 0x73, 0x12, 0x49, 0x0a, 0x05, 0x73, 0x74, 0x61, 0x74, 0x65, 0x18, 0x06, 0x20, 0x01,
- 0x28, 0x0e, 0x32, 0x2e, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x70, 0x61, 0x6e,
- 0x6e, 0x65, 0x72, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x64, 0x61, 0x74, 0x61, 0x62, 0x61,
- 0x73, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x42, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x2e, 0x53, 0x74, 0x61,
- 0x74, 0x65, 0x42, 0x03, 0xe0, 0x41, 0x03, 0x52, 0x05, 0x73, 0x74, 0x61, 0x74, 0x65, 0x12, 0x5c,
- 0x0a, 0x15, 0x72, 0x65, 0x66, 0x65, 0x72, 0x65, 0x6e, 0x63, 0x69, 0x6e, 0x67, 0x5f, 0x64, 0x61,
- 0x74, 0x61, 0x62, 0x61, 0x73, 0x65, 0x73, 0x18, 0x07, 0x20, 0x03, 0x28, 0x09, 0x42, 0x27, 0xe0,
- 0x41, 0x03, 0xfa, 0x41, 0x21, 0x0a, 0x1f, 0x73, 0x70, 0x61, 0x6e, 0x6e, 0x65, 0x72, 0x2e, 0x67,
- 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x44, 0x61,
- 0x74, 0x61, 0x62, 0x61, 0x73, 0x65, 0x52, 0x14, 0x72, 0x65, 0x66, 0x65, 0x72, 0x65, 0x6e, 0x63,
- 0x69, 0x6e, 0x67, 0x44, 0x61, 0x74, 0x61, 0x62, 0x61, 0x73, 0x65, 0x73, 0x12, 0x5e, 0x0a, 0x0f,
- 0x65, 0x6e, 0x63, 0x72, 0x79, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x69, 0x6e, 0x66, 0x6f, 0x18,
- 0x08, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x30, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73,
- 0x70, 0x61, 0x6e, 0x6e, 0x65, 0x72, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x64, 0x61, 0x74,
- 0x61, 0x62, 0x61, 0x73, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x45, 0x6e, 0x63, 0x72, 0x79, 0x70, 0x74,
- 0x69, 0x6f, 0x6e, 0x49, 0x6e, 0x66, 0x6f, 0x42, 0x03, 0xe0, 0x41, 0x03, 0x52, 0x0e, 0x65, 0x6e,
- 0x63, 0x72, 0x79, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x49, 0x6e, 0x66, 0x6f, 0x12, 0x6c, 0x0a, 0x16,
- 0x65, 0x6e, 0x63, 0x72, 0x79, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x69, 0x6e, 0x66, 0x6f, 0x72,
- 0x6d, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x0d, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x30, 0x2e, 0x67,
- 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x70, 0x61, 0x6e, 0x6e, 0x65, 0x72, 0x2e, 0x61, 0x64,
- 0x6d, 0x69, 0x6e, 0x2e, 0x64, 0x61, 0x74, 0x61, 0x62, 0x61, 0x73, 0x65, 0x2e, 0x76, 0x31, 0x2e,
- 0x45, 0x6e, 0x63, 0x72, 0x79, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x49, 0x6e, 0x66, 0x6f, 0x42, 0x03,
- 0xe0, 0x41, 0x03, 0x52, 0x15, 0x65, 0x6e, 0x63, 0x72, 0x79, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x49,
- 0x6e, 0x66, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x61, 0x0a, 0x10, 0x64, 0x61,
- 0x74, 0x61, 0x62, 0x61, 0x73, 0x65, 0x5f, 0x64, 0x69, 0x61, 0x6c, 0x65, 0x63, 0x74, 0x18, 0x0a,
- 0x20, 0x01, 0x28, 0x0e, 0x32, 0x31, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x70,
- 0x61, 0x6e, 0x6e, 0x65, 0x72, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x64, 0x61, 0x74, 0x61,
- 0x62, 0x61, 0x73, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x44, 0x61, 0x74, 0x61, 0x62, 0x61, 0x73, 0x65,
- 0x44, 0x69, 0x61, 0x6c, 0x65, 0x63, 0x74, 0x42, 0x03, 0xe0, 0x41, 0x03, 0x52, 0x0f, 0x64, 0x61,
- 0x74, 0x61, 0x62, 0x61, 0x73, 0x65, 0x44, 0x69, 0x61, 0x6c, 0x65, 0x63, 0x74, 0x12, 0x56, 0x0a,
- 0x13, 0x72, 0x65, 0x66, 0x65, 0x72, 0x65, 0x6e, 0x63, 0x69, 0x6e, 0x67, 0x5f, 0x62, 0x61, 0x63,
- 0x6b, 0x75, 0x70, 0x73, 0x18, 0x0b, 0x20, 0x03, 0x28, 0x09, 0x42, 0x25, 0xe0, 0x41, 0x03, 0xfa,
- 0x41, 0x1f, 0x0a, 0x1d, 0x73, 0x70, 0x61, 0x6e, 0x6e, 0x65, 0x72, 0x2e, 0x67, 0x6f, 0x6f, 0x67,
- 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x42, 0x61, 0x63, 0x6b, 0x75,
- 0x70, 0x52, 0x12, 0x72, 0x65, 0x66, 0x65, 0x72, 0x65, 0x6e, 0x63, 0x69, 0x6e, 0x67, 0x42, 0x61,
- 0x63, 0x6b, 0x75, 0x70, 0x73, 0x12, 0x47, 0x0a, 0x0f, 0x6d, 0x61, 0x78, 0x5f, 0x65, 0x78, 0x70,
- 0x69, 0x72, 0x65, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x18, 0x0c, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a,
- 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66,
- 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x42, 0x03, 0xe0, 0x41, 0x03, 0x52,
- 0x0d, 0x6d, 0x61, 0x78, 0x45, 0x78, 0x70, 0x69, 0x72, 0x65, 0x54, 0x69, 0x6d, 0x65, 0x12, 0x58,
- 0x0a, 0x10, 0x62, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x5f, 0x73, 0x63, 0x68, 0x65, 0x64, 0x75, 0x6c,
- 0x65, 0x73, 0x18, 0x0e, 0x20, 0x03, 0x28, 0x09, 0x42, 0x2d, 0xe0, 0x41, 0x03, 0xfa, 0x41, 0x27,
- 0x0a, 0x25, 0x73, 0x70, 0x61, 0x6e, 0x6e, 0x65, 0x72, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65,
- 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x42, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x53,
- 0x63, 0x68, 0x65, 0x64, 0x75, 0x6c, 0x65, 0x52, 0x0f, 0x62, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x53,
- 0x63, 0x68, 0x65, 0x64, 0x75, 0x6c, 0x65, 0x73, 0x12, 0x42, 0x0a, 0x1b, 0x69, 0x6e, 0x63, 0x72,
- 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x61, 0x6c, 0x5f, 0x62, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x5f, 0x63,
- 0x68, 0x61, 0x69, 0x6e, 0x5f, 0x69, 0x64, 0x18, 0x11, 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0,
- 0x41, 0x03, 0x52, 0x18, 0x69, 0x6e, 0x63, 0x72, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x61, 0x6c, 0x42,
- 0x61, 0x63, 0x6b, 0x75, 0x70, 0x43, 0x68, 0x61, 0x69, 0x6e, 0x49, 0x64, 0x12, 0x4f, 0x0a, 0x13,
- 0x6f, 0x6c, 0x64, 0x65, 0x73, 0x74, 0x5f, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x5f, 0x74,
- 0x69, 0x6d, 0x65, 0x18, 0x12, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67,
- 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65,
- 0x73, 0x74, 0x61, 0x6d, 0x70, 0x42, 0x03, 0xe0, 0x41, 0x03, 0x52, 0x11, 0x6f, 0x6c, 0x64, 0x65,
- 0x73, 0x74, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x54, 0x69, 0x6d, 0x65, 0x22, 0x37, 0x0a,
- 0x05, 0x53, 0x74, 0x61, 0x74, 0x65, 0x12, 0x15, 0x0a, 0x11, 0x53, 0x54, 0x41, 0x54, 0x45, 0x5f,
- 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, 0x49, 0x45, 0x44, 0x10, 0x00, 0x12, 0x0c, 0x0a,
- 0x08, 0x43, 0x52, 0x45, 0x41, 0x54, 0x49, 0x4e, 0x47, 0x10, 0x01, 0x12, 0x09, 0x0a, 0x05, 0x52,
- 0x45, 0x41, 0x44, 0x59, 0x10, 0x02, 0x3a, 0x5c, 0xea, 0x41, 0x59, 0x0a, 0x1d, 0x73, 0x70, 0x61,
- 0x6e, 0x6e, 0x65, 0x72, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e,
- 0x63, 0x6f, 0x6d, 0x2f, 0x42, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x12, 0x38, 0x70, 0x72, 0x6f, 0x6a,
- 0x65, 0x63, 0x74, 0x73, 0x2f, 0x7b, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x7d, 0x2f, 0x69,
- 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x73, 0x2f, 0x7b, 0x69, 0x6e, 0x73, 0x74, 0x61, 0x6e,
- 0x63, 0x65, 0x7d, 0x2f, 0x62, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x73, 0x2f, 0x7b, 0x62, 0x61, 0x63,
- 0x6b, 0x75, 0x70, 0x7d, 0x22, 0xb1, 0x02, 0x0a, 0x13, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x42,
- 0x61, 0x63, 0x6b, 0x75, 0x70, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x3f, 0x0a, 0x06,
- 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x27, 0xe0, 0x41,
- 0x02, 0xfa, 0x41, 0x21, 0x0a, 0x1f, 0x73, 0x70, 0x61, 0x6e, 0x6e, 0x65, 0x72, 0x2e, 0x67, 0x6f,
- 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x49, 0x6e, 0x73,
- 0x74, 0x61, 0x6e, 0x63, 0x65, 0x52, 0x06, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x12, 0x20, 0x0a,
- 0x09, 0x62, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x5f, 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09,
- 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x08, 0x62, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x49, 0x64, 0x12,
- 0x45, 0x0a, 0x06, 0x62, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32,
- 0x28, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x70, 0x61, 0x6e, 0x6e, 0x65, 0x72,
- 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x64, 0x61, 0x74, 0x61, 0x62, 0x61, 0x73, 0x65, 0x2e,
- 0x76, 0x31, 0x2e, 0x42, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x06,
- 0x62, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x12, 0x70, 0x0a, 0x11, 0x65, 0x6e, 0x63, 0x72, 0x79, 0x70,
- 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x18, 0x04, 0x20, 0x01, 0x28,
- 0x0b, 0x32, 0x3e, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x70, 0x61, 0x6e, 0x6e,
- 0x65, 0x72, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x64, 0x61, 0x74, 0x61, 0x62, 0x61, 0x73,
- 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x42, 0x61, 0x63, 0x6b, 0x75,
- 0x70, 0x45, 0x6e, 0x63, 0x72, 0x79, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x6f, 0x6e, 0x66, 0x69,
- 0x67, 0x42, 0x03, 0xe0, 0x41, 0x01, 0x52, 0x10, 0x65, 0x6e, 0x63, 0x72, 0x79, 0x70, 0x74, 0x69,
- 0x6f, 0x6e, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x22, 0x9e, 0x02, 0x0a, 0x14, 0x43, 0x72, 0x65,
- 0x61, 0x74, 0x65, 0x42, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74,
- 0x61, 0x12, 0x36, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42,
- 0x22, 0xfa, 0x41, 0x1f, 0x0a, 0x1d, 0x73, 0x70, 0x61, 0x6e, 0x6e, 0x65, 0x72, 0x2e, 0x67, 0x6f,
- 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x42, 0x61, 0x63,
- 0x6b, 0x75, 0x70, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x40, 0x0a, 0x08, 0x64, 0x61, 0x74,
- 0x61, 0x62, 0x61, 0x73, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x42, 0x24, 0xfa, 0x41, 0x21,
- 0x0a, 0x1f, 0x73, 0x70, 0x61, 0x6e, 0x6e, 0x65, 0x72, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65,
- 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x44, 0x61, 0x74, 0x61, 0x62, 0x61, 0x73,
- 0x65, 0x52, 0x08, 0x64, 0x61, 0x74, 0x61, 0x62, 0x61, 0x73, 0x65, 0x12, 0x4f, 0x0a, 0x08, 0x70,
- 0x72, 0x6f, 0x67, 0x72, 0x65, 0x73, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x33, 0x2e,
- 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x70, 0x61, 0x6e, 0x6e, 0x65, 0x72, 0x2e, 0x61,
- 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x64, 0x61, 0x74, 0x61, 0x62, 0x61, 0x73, 0x65, 0x2e, 0x76, 0x31,
- 0x2e, 0x4f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x50, 0x72, 0x6f, 0x67, 0x72, 0x65,
- 0x73, 0x73, 0x52, 0x08, 0x70, 0x72, 0x6f, 0x67, 0x72, 0x65, 0x73, 0x73, 0x12, 0x3b, 0x0a, 0x0b,
- 0x63, 0x61, 0x6e, 0x63, 0x65, 0x6c, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28,
- 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f,
- 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x0a, 0x63,
- 0x61, 0x6e, 0x63, 0x65, 0x6c, 0x54, 0x69, 0x6d, 0x65, 0x22, 0xf4, 0x02, 0x0a, 0x11, 0x43, 0x6f,
- 0x70, 0x79, 0x42, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12,
- 0x3f, 0x0a, 0x06, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42,
- 0x27, 0xe0, 0x41, 0x02, 0xfa, 0x41, 0x21, 0x0a, 0x1f, 0x73, 0x70, 0x61, 0x6e, 0x6e, 0x65, 0x72,
- 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f,
- 0x49, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x52, 0x06, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74,
- 0x12, 0x20, 0x0a, 0x09, 0x62, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x5f, 0x69, 0x64, 0x18, 0x02, 0x20,
- 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x08, 0x62, 0x61, 0x63, 0x6b, 0x75, 0x70,
- 0x49, 0x64, 0x12, 0x4a, 0x0a, 0x0d, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x62, 0x61, 0x63,
- 0x6b, 0x75, 0x70, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x42, 0x25, 0xe0, 0x41, 0x02, 0xfa, 0x41,
- 0x1f, 0x0a, 0x1d, 0x73, 0x70, 0x61, 0x6e, 0x6e, 0x65, 0x72, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c,
- 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x42, 0x61, 0x63, 0x6b, 0x75, 0x70,
- 0x52, 0x0c, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x42, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x12, 0x40,
- 0x0a, 0x0b, 0x65, 0x78, 0x70, 0x69, 0x72, 0x65, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x18, 0x04, 0x20,
- 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f,
- 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x42,
- 0x03, 0xe0, 0x41, 0x02, 0x52, 0x0a, 0x65, 0x78, 0x70, 0x69, 0x72, 0x65, 0x54, 0x69, 0x6d, 0x65,
- 0x12, 0x6e, 0x0a, 0x11, 0x65, 0x6e, 0x63, 0x72, 0x79, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x63,
- 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x3c, 0x2e, 0x67, 0x6f,
- 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x70, 0x61, 0x6e, 0x6e, 0x65, 0x72, 0x2e, 0x61, 0x64, 0x6d,
- 0x69, 0x6e, 0x2e, 0x64, 0x61, 0x74, 0x61, 0x62, 0x61, 0x73, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x43,
- 0x6f, 0x70, 0x79, 0x42, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x45, 0x6e, 0x63, 0x72, 0x79, 0x70, 0x74,
- 0x69, 0x6f, 0x6e, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x42, 0x03, 0xe0, 0x41, 0x01, 0x52, 0x10,
- 0x65, 0x6e, 0x63, 0x72, 0x79, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67,
- 0x22, 0xa3, 0x02, 0x0a, 0x12, 0x43, 0x6f, 0x70, 0x79, 0x42, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x4d,
- 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x12, 0x36, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18,
- 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x22, 0xfa, 0x41, 0x1f, 0x0a, 0x1d, 0x73, 0x70, 0x61, 0x6e,
- 0x6e, 0x65, 0x72, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63,
- 0x6f, 0x6d, 0x2f, 0x42, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12,
- 0x47, 0x0a, 0x0d, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x62, 0x61, 0x63, 0x6b, 0x75, 0x70,
- 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x42, 0x22, 0xfa, 0x41, 0x1f, 0x0a, 0x1d, 0x73, 0x70, 0x61,
- 0x6e, 0x6e, 0x65, 0x72, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e,
- 0x63, 0x6f, 0x6d, 0x2f, 0x42, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x52, 0x0c, 0x73, 0x6f, 0x75, 0x72,
- 0x63, 0x65, 0x42, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x12, 0x4f, 0x0a, 0x08, 0x70, 0x72, 0x6f, 0x67,
- 0x72, 0x65, 0x73, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x33, 0x2e, 0x67, 0x6f, 0x6f,
- 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x70, 0x61, 0x6e, 0x6e, 0x65, 0x72, 0x2e, 0x61, 0x64, 0x6d, 0x69,
- 0x6e, 0x2e, 0x64, 0x61, 0x74, 0x61, 0x62, 0x61, 0x73, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x4f, 0x70,
- 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x50, 0x72, 0x6f, 0x67, 0x72, 0x65, 0x73, 0x73, 0x52,
- 0x08, 0x70, 0x72, 0x6f, 0x67, 0x72, 0x65, 0x73, 0x73, 0x12, 0x3b, 0x0a, 0x0b, 0x63, 0x61, 0x6e,
- 0x63, 0x65, 0x6c, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a,
- 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66,
- 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x0a, 0x63, 0x61, 0x6e, 0x63,
- 0x65, 0x6c, 0x54, 0x69, 0x6d, 0x65, 0x22, 0x9e, 0x01, 0x0a, 0x13, 0x55, 0x70, 0x64, 0x61, 0x74,
- 0x65, 0x42, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x45,
- 0x0a, 0x06, 0x62, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x28,
- 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x70, 0x61, 0x6e, 0x6e, 0x65, 0x72, 0x2e,
- 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x64, 0x61, 0x74, 0x61, 0x62, 0x61, 0x73, 0x65, 0x2e, 0x76,
- 0x31, 0x2e, 0x42, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x06, 0x62,
- 0x61, 0x63, 0x6b, 0x75, 0x70, 0x12, 0x40, 0x0a, 0x0b, 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, 0x5f,
- 0x6d, 0x61, 0x73, 0x6b, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f,
- 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65,
- 0x6c, 0x64, 0x4d, 0x61, 0x73, 0x6b, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x0a, 0x75, 0x70, 0x64,
- 0x61, 0x74, 0x65, 0x4d, 0x61, 0x73, 0x6b, 0x22, 0x4d, 0x0a, 0x10, 0x47, 0x65, 0x74, 0x42, 0x61,
- 0x63, 0x6b, 0x75, 0x70, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x39, 0x0a, 0x04, 0x6e,
- 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x25, 0xe0, 0x41, 0x02, 0xfa, 0x41,
- 0x1f, 0x0a, 0x1d, 0x73, 0x70, 0x61, 0x6e, 0x6e, 0x65, 0x72, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c,
- 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x42, 0x61, 0x63, 0x6b, 0x75, 0x70,
- 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x22, 0x50, 0x0a, 0x13, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65,
- 0x42, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x39, 0x0a,
- 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x25, 0xe0, 0x41, 0x02,
- 0xfa, 0x41, 0x1f, 0x0a, 0x1d, 0x73, 0x70, 0x61, 0x6e, 0x6e, 0x65, 0x72, 0x2e, 0x67, 0x6f, 0x6f,
- 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x42, 0x61, 0x63, 0x6b,
- 0x75, 0x70, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x22, 0xa9, 0x01, 0x0a, 0x12, 0x4c, 0x69, 0x73,
- 0x74, 0x42, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12,
- 0x3f, 0x0a, 0x06, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42,
- 0x27, 0xe0, 0x41, 0x02, 0xfa, 0x41, 0x21, 0x0a, 0x1f, 0x73, 0x70, 0x61, 0x6e, 0x6e, 0x65, 0x72,
- 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f,
- 0x49, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x52, 0x06, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74,
- 0x12, 0x16, 0x0a, 0x06, 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09,
- 0x52, 0x06, 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x12, 0x1b, 0x0a, 0x09, 0x70, 0x61, 0x67, 0x65,
- 0x5f, 0x73, 0x69, 0x7a, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x05, 0x52, 0x08, 0x70, 0x61, 0x67,
- 0x65, 0x53, 0x69, 0x7a, 0x65, 0x12, 0x1d, 0x0a, 0x0a, 0x70, 0x61, 0x67, 0x65, 0x5f, 0x74, 0x6f,
- 0x6b, 0x65, 0x6e, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x70, 0x61, 0x67, 0x65, 0x54,
- 0x6f, 0x6b, 0x65, 0x6e, 0x22, 0x81, 0x01, 0x0a, 0x13, 0x4c, 0x69, 0x73, 0x74, 0x42, 0x61, 0x63,
- 0x6b, 0x75, 0x70, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x42, 0x0a, 0x07,
- 0x62, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x28, 0x2e,
- 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x70, 0x61, 0x6e, 0x6e, 0x65, 0x72, 0x2e, 0x61,
- 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x64, 0x61, 0x74, 0x61, 0x62, 0x61, 0x73, 0x65, 0x2e, 0x76, 0x31,
- 0x2e, 0x42, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x52, 0x07, 0x62, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x73,
- 0x12, 0x26, 0x0a, 0x0f, 0x6e, 0x65, 0x78, 0x74, 0x5f, 0x70, 0x61, 0x67, 0x65, 0x5f, 0x74, 0x6f,
- 0x6b, 0x65, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0d, 0x6e, 0x65, 0x78, 0x74, 0x50,
- 0x61, 0x67, 0x65, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x22, 0xb2, 0x01, 0x0a, 0x1b, 0x4c, 0x69, 0x73,
- 0x74, 0x42, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x4f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e,
- 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x3f, 0x0a, 0x06, 0x70, 0x61, 0x72, 0x65,
- 0x6e, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x27, 0xe0, 0x41, 0x02, 0xfa, 0x41, 0x21,
- 0x0a, 0x1f, 0x73, 0x70, 0x61, 0x6e, 0x6e, 0x65, 0x72, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65,
- 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x49, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63,
- 0x65, 0x52, 0x06, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x12, 0x16, 0x0a, 0x06, 0x66, 0x69, 0x6c,
- 0x74, 0x65, 0x72, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x66, 0x69, 0x6c, 0x74, 0x65,
- 0x72, 0x12, 0x1b, 0x0a, 0x09, 0x70, 0x61, 0x67, 0x65, 0x5f, 0x73, 0x69, 0x7a, 0x65, 0x18, 0x03,
- 0x20, 0x01, 0x28, 0x05, 0x52, 0x08, 0x70, 0x61, 0x67, 0x65, 0x53, 0x69, 0x7a, 0x65, 0x12, 0x1d,
- 0x0a, 0x0a, 0x70, 0x61, 0x67, 0x65, 0x5f, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x18, 0x04, 0x20, 0x01,
- 0x28, 0x09, 0x52, 0x09, 0x70, 0x61, 0x67, 0x65, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x22, 0x85, 0x01,
- 0x0a, 0x1c, 0x4c, 0x69, 0x73, 0x74, 0x42, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x4f, 0x70, 0x65, 0x72,
- 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x3d,
- 0x0a, 0x0a, 0x6f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x01, 0x20, 0x03,
- 0x28, 0x0b, 0x32, 0x1d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6c, 0x6f, 0x6e, 0x67,
- 0x72, 0x75, 0x6e, 0x6e, 0x69, 0x6e, 0x67, 0x2e, 0x4f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f,
- 0x6e, 0x52, 0x0a, 0x6f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x26, 0x0a,
- 0x0f, 0x6e, 0x65, 0x78, 0x74, 0x5f, 0x70, 0x61, 0x67, 0x65, 0x5f, 0x74, 0x6f, 0x6b, 0x65, 0x6e,
- 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0d, 0x6e, 0x65, 0x78, 0x74, 0x50, 0x61, 0x67, 0x65,
- 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x22, 0x93, 0x02, 0x0a, 0x0a, 0x42, 0x61, 0x63, 0x6b, 0x75, 0x70,
- 0x49, 0x6e, 0x66, 0x6f, 0x12, 0x3a, 0x0a, 0x06, 0x62, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x18, 0x01,
- 0x20, 0x01, 0x28, 0x09, 0x42, 0x22, 0xfa, 0x41, 0x1f, 0x0a, 0x1d, 0x73, 0x70, 0x61, 0x6e, 0x6e,
- 0x65, 0x72, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f,
- 0x6d, 0x2f, 0x42, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x52, 0x06, 0x62, 0x61, 0x63, 0x6b, 0x75, 0x70,
- 0x12, 0x3d, 0x0a, 0x0c, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x5f, 0x74, 0x69, 0x6d, 0x65,
- 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e,
- 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61,
- 0x6d, 0x70, 0x52, 0x0b, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x54, 0x69, 0x6d, 0x65, 0x12,
- 0x3b, 0x0a, 0x0b, 0x63, 0x72, 0x65, 0x61, 0x74, 0x65, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x18, 0x02,
- 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72,
- 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70,
- 0x52, 0x0a, 0x63, 0x72, 0x65, 0x61, 0x74, 0x65, 0x54, 0x69, 0x6d, 0x65, 0x12, 0x4d, 0x0a, 0x0f,
- 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x64, 0x61, 0x74, 0x61, 0x62, 0x61, 0x73, 0x65, 0x18,
- 0x03, 0x20, 0x01, 0x28, 0x09, 0x42, 0x24, 0xfa, 0x41, 0x21, 0x0a, 0x1f, 0x73, 0x70, 0x61, 0x6e,
- 0x6e, 0x65, 0x72, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63,
- 0x6f, 0x6d, 0x2f, 0x44, 0x61, 0x74, 0x61, 0x62, 0x61, 0x73, 0x65, 0x52, 0x0e, 0x73, 0x6f, 0x75,
- 0x72, 0x63, 0x65, 0x44, 0x61, 0x74, 0x61, 0x62, 0x61, 0x73, 0x65, 0x22, 0xc8, 0x03, 0x0a, 0x1c,
- 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x42, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x45, 0x6e, 0x63, 0x72,
- 0x79, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x7b, 0x0a, 0x0f,
- 0x65, 0x6e, 0x63, 0x72, 0x79, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18,
- 0x01, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x4d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73,
- 0x70, 0x61, 0x6e, 0x6e, 0x65, 0x72, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x64, 0x61, 0x74,
- 0x61, 0x62, 0x61, 0x73, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x42,
- 0x61, 0x63, 0x6b, 0x75, 0x70, 0x45, 0x6e, 0x63, 0x72, 0x79, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x43,
- 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x45, 0x6e, 0x63, 0x72, 0x79, 0x70, 0x74, 0x69, 0x6f, 0x6e,
- 0x54, 0x79, 0x70, 0x65, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x0e, 0x65, 0x6e, 0x63, 0x72, 0x79,
- 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x54, 0x79, 0x70, 0x65, 0x12, 0x4b, 0x0a, 0x0c, 0x6b, 0x6d, 0x73,
- 0x5f, 0x6b, 0x65, 0x79, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x42,
- 0x29, 0xe0, 0x41, 0x01, 0xfa, 0x41, 0x23, 0x0a, 0x21, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x6b, 0x6d,
- 0x73, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d,
- 0x2f, 0x43, 0x72, 0x79, 0x70, 0x74, 0x6f, 0x4b, 0x65, 0x79, 0x52, 0x0a, 0x6b, 0x6d, 0x73, 0x4b,
- 0x65, 0x79, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x4d, 0x0a, 0x0d, 0x6b, 0x6d, 0x73, 0x5f, 0x6b, 0x65,
- 0x79, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x09, 0x42, 0x29, 0xe0,
- 0x41, 0x01, 0xfa, 0x41, 0x23, 0x0a, 0x21, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x6b, 0x6d, 0x73, 0x2e,
- 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x43,
- 0x72, 0x79, 0x70, 0x74, 0x6f, 0x4b, 0x65, 0x79, 0x52, 0x0b, 0x6b, 0x6d, 0x73, 0x4b, 0x65, 0x79,
- 0x4e, 0x61, 0x6d, 0x65, 0x73, 0x22, 0x8e, 0x01, 0x0a, 0x0e, 0x45, 0x6e, 0x63, 0x72, 0x79, 0x70,
- 0x74, 0x69, 0x6f, 0x6e, 0x54, 0x79, 0x70, 0x65, 0x12, 0x1f, 0x0a, 0x1b, 0x45, 0x4e, 0x43, 0x52,
- 0x59, 0x50, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x55, 0x4e, 0x53, 0x50,
- 0x45, 0x43, 0x49, 0x46, 0x49, 0x45, 0x44, 0x10, 0x00, 0x12, 0x1b, 0x0a, 0x17, 0x55, 0x53, 0x45,
- 0x5f, 0x44, 0x41, 0x54, 0x41, 0x42, 0x41, 0x53, 0x45, 0x5f, 0x45, 0x4e, 0x43, 0x52, 0x59, 0x50,
- 0x54, 0x49, 0x4f, 0x4e, 0x10, 0x01, 0x12, 0x1d, 0x0a, 0x19, 0x47, 0x4f, 0x4f, 0x47, 0x4c, 0x45,
- 0x5f, 0x44, 0x45, 0x46, 0x41, 0x55, 0x4c, 0x54, 0x5f, 0x45, 0x4e, 0x43, 0x52, 0x59, 0x50, 0x54,
- 0x49, 0x4f, 0x4e, 0x10, 0x02, 0x12, 0x1f, 0x0a, 0x1b, 0x43, 0x55, 0x53, 0x54, 0x4f, 0x4d, 0x45,
- 0x52, 0x5f, 0x4d, 0x41, 0x4e, 0x41, 0x47, 0x45, 0x44, 0x5f, 0x45, 0x4e, 0x43, 0x52, 0x59, 0x50,
- 0x54, 0x49, 0x4f, 0x4e, 0x10, 0x03, 0x22, 0xd4, 0x03, 0x0a, 0x1a, 0x43, 0x6f, 0x70, 0x79, 0x42,
- 0x61, 0x63, 0x6b, 0x75, 0x70, 0x45, 0x6e, 0x63, 0x72, 0x79, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x43,
- 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x79, 0x0a, 0x0f, 0x65, 0x6e, 0x63, 0x72, 0x79, 0x70, 0x74,
- 0x69, 0x6f, 0x6e, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x4b,
- 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x70, 0x61, 0x6e, 0x6e, 0x65, 0x72, 0x2e,
- 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x64, 0x61, 0x74, 0x61, 0x62, 0x61, 0x73, 0x65, 0x2e, 0x76,
- 0x31, 0x2e, 0x43, 0x6f, 0x70, 0x79, 0x42, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x45, 0x6e, 0x63, 0x72,
- 0x79, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x45, 0x6e, 0x63,
- 0x72, 0x79, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x54, 0x79, 0x70, 0x65, 0x42, 0x03, 0xe0, 0x41, 0x02,
- 0x52, 0x0e, 0x65, 0x6e, 0x63, 0x72, 0x79, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x54, 0x79, 0x70, 0x65,
- 0x12, 0x4b, 0x0a, 0x0c, 0x6b, 0x6d, 0x73, 0x5f, 0x6b, 0x65, 0x79, 0x5f, 0x6e, 0x61, 0x6d, 0x65,
- 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x42, 0x29, 0xe0, 0x41, 0x01, 0xfa, 0x41, 0x23, 0x0a, 0x21,
- 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x6b, 0x6d, 0x73, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61,
- 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x43, 0x72, 0x79, 0x70, 0x74, 0x6f, 0x4b, 0x65,
- 0x79, 0x52, 0x0a, 0x6b, 0x6d, 0x73, 0x4b, 0x65, 0x79, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x4d, 0x0a,
- 0x0d, 0x6b, 0x6d, 0x73, 0x5f, 0x6b, 0x65, 0x79, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x18, 0x03,
- 0x20, 0x03, 0x28, 0x09, 0x42, 0x29, 0xe0, 0x41, 0x01, 0xfa, 0x41, 0x23, 0x0a, 0x21, 0x63, 0x6c,
- 0x6f, 0x75, 0x64, 0x6b, 0x6d, 0x73, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69,
- 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x43, 0x72, 0x79, 0x70, 0x74, 0x6f, 0x4b, 0x65, 0x79, 0x52,
- 0x0b, 0x6b, 0x6d, 0x73, 0x4b, 0x65, 0x79, 0x4e, 0x61, 0x6d, 0x65, 0x73, 0x22, 0x9e, 0x01, 0x0a,
- 0x0e, 0x45, 0x6e, 0x63, 0x72, 0x79, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x54, 0x79, 0x70, 0x65, 0x12,
- 0x1f, 0x0a, 0x1b, 0x45, 0x4e, 0x43, 0x52, 0x59, 0x50, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x54, 0x59,
- 0x50, 0x45, 0x5f, 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, 0x49, 0x45, 0x44, 0x10, 0x00,
- 0x12, 0x2b, 0x0a, 0x27, 0x55, 0x53, 0x45, 0x5f, 0x43, 0x4f, 0x4e, 0x46, 0x49, 0x47, 0x5f, 0x44,
- 0x45, 0x46, 0x41, 0x55, 0x4c, 0x54, 0x5f, 0x4f, 0x52, 0x5f, 0x42, 0x41, 0x43, 0x4b, 0x55, 0x50,
- 0x5f, 0x45, 0x4e, 0x43, 0x52, 0x59, 0x50, 0x54, 0x49, 0x4f, 0x4e, 0x10, 0x01, 0x12, 0x1d, 0x0a,
- 0x19, 0x47, 0x4f, 0x4f, 0x47, 0x4c, 0x45, 0x5f, 0x44, 0x45, 0x46, 0x41, 0x55, 0x4c, 0x54, 0x5f,
- 0x45, 0x4e, 0x43, 0x52, 0x59, 0x50, 0x54, 0x49, 0x4f, 0x4e, 0x10, 0x02, 0x12, 0x1f, 0x0a, 0x1b,
- 0x43, 0x55, 0x53, 0x54, 0x4f, 0x4d, 0x45, 0x52, 0x5f, 0x4d, 0x41, 0x4e, 0x41, 0x47, 0x45, 0x44,
- 0x5f, 0x45, 0x4e, 0x43, 0x52, 0x59, 0x50, 0x54, 0x49, 0x4f, 0x4e, 0x10, 0x03, 0x22, 0x10, 0x0a,
- 0x0e, 0x46, 0x75, 0x6c, 0x6c, 0x42, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x53, 0x70, 0x65, 0x63, 0x22,
- 0x17, 0x0a, 0x15, 0x49, 0x6e, 0x63, 0x72, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x61, 0x6c, 0x42, 0x61,
- 0x63, 0x6b, 0x75, 0x70, 0x53, 0x70, 0x65, 0x63, 0x42, 0xfd, 0x01, 0x0a, 0x24, 0x63, 0x6f, 0x6d,
- 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x70, 0x61, 0x6e, 0x6e, 0x65, 0x72, 0x2e,
- 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x64, 0x61, 0x74, 0x61, 0x62, 0x61, 0x73, 0x65, 0x2e, 0x76,
- 0x31, 0x42, 0x0b, 0x42, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01,
- 0x5a, 0x46, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x63,
- 0x6f, 0x6d, 0x2f, 0x67, 0x6f, 0x2f, 0x73, 0x70, 0x61, 0x6e, 0x6e, 0x65, 0x72, 0x2f, 0x61, 0x64,
- 0x6d, 0x69, 0x6e, 0x2f, 0x64, 0x61, 0x74, 0x61, 0x62, 0x61, 0x73, 0x65, 0x2f, 0x61, 0x70, 0x69,
- 0x76, 0x31, 0x2f, 0x64, 0x61, 0x74, 0x61, 0x62, 0x61, 0x73, 0x65, 0x70, 0x62, 0x3b, 0x64, 0x61,
- 0x74, 0x61, 0x62, 0x61, 0x73, 0x65, 0x70, 0x62, 0xaa, 0x02, 0x26, 0x47, 0x6f, 0x6f, 0x67, 0x6c,
- 0x65, 0x2e, 0x43, 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x53, 0x70, 0x61, 0x6e, 0x6e, 0x65, 0x72, 0x2e,
- 0x41, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x44, 0x61, 0x74, 0x61, 0x62, 0x61, 0x73, 0x65, 0x2e, 0x56,
- 0x31, 0xca, 0x02, 0x26, 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x5c, 0x43, 0x6c, 0x6f, 0x75, 0x64,
- 0x5c, 0x53, 0x70, 0x61, 0x6e, 0x6e, 0x65, 0x72, 0x5c, 0x41, 0x64, 0x6d, 0x69, 0x6e, 0x5c, 0x44,
- 0x61, 0x74, 0x61, 0x62, 0x61, 0x73, 0x65, 0x5c, 0x56, 0x31, 0xea, 0x02, 0x2b, 0x47, 0x6f, 0x6f,
- 0x67, 0x6c, 0x65, 0x3a, 0x3a, 0x43, 0x6c, 0x6f, 0x75, 0x64, 0x3a, 0x3a, 0x53, 0x70, 0x61, 0x6e,
- 0x6e, 0x65, 0x72, 0x3a, 0x3a, 0x41, 0x64, 0x6d, 0x69, 0x6e, 0x3a, 0x3a, 0x44, 0x61, 0x74, 0x61,
- 0x62, 0x61, 0x73, 0x65, 0x3a, 0x3a, 0x56, 0x31, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33,
-}
-
-var (
- file_google_spanner_admin_database_v1_backup_proto_rawDescOnce sync.Once
- file_google_spanner_admin_database_v1_backup_proto_rawDescData = file_google_spanner_admin_database_v1_backup_proto_rawDesc
-)
-
-func file_google_spanner_admin_database_v1_backup_proto_rawDescGZIP() []byte {
- file_google_spanner_admin_database_v1_backup_proto_rawDescOnce.Do(func() {
- file_google_spanner_admin_database_v1_backup_proto_rawDescData = protoimpl.X.CompressGZIP(file_google_spanner_admin_database_v1_backup_proto_rawDescData)
- })
- return file_google_spanner_admin_database_v1_backup_proto_rawDescData
-}
-
-var file_google_spanner_admin_database_v1_backup_proto_enumTypes = make([]protoimpl.EnumInfo, 3)
-var file_google_spanner_admin_database_v1_backup_proto_msgTypes = make([]protoimpl.MessageInfo, 17)
-var file_google_spanner_admin_database_v1_backup_proto_goTypes = []any{
- (Backup_State)(0), // 0: google.spanner.admin.database.v1.Backup.State
- (CreateBackupEncryptionConfig_EncryptionType)(0), // 1: google.spanner.admin.database.v1.CreateBackupEncryptionConfig.EncryptionType
- (CopyBackupEncryptionConfig_EncryptionType)(0), // 2: google.spanner.admin.database.v1.CopyBackupEncryptionConfig.EncryptionType
- (*Backup)(nil), // 3: google.spanner.admin.database.v1.Backup
- (*CreateBackupRequest)(nil), // 4: google.spanner.admin.database.v1.CreateBackupRequest
- (*CreateBackupMetadata)(nil), // 5: google.spanner.admin.database.v1.CreateBackupMetadata
- (*CopyBackupRequest)(nil), // 6: google.spanner.admin.database.v1.CopyBackupRequest
- (*CopyBackupMetadata)(nil), // 7: google.spanner.admin.database.v1.CopyBackupMetadata
- (*UpdateBackupRequest)(nil), // 8: google.spanner.admin.database.v1.UpdateBackupRequest
- (*GetBackupRequest)(nil), // 9: google.spanner.admin.database.v1.GetBackupRequest
- (*DeleteBackupRequest)(nil), // 10: google.spanner.admin.database.v1.DeleteBackupRequest
- (*ListBackupsRequest)(nil), // 11: google.spanner.admin.database.v1.ListBackupsRequest
- (*ListBackupsResponse)(nil), // 12: google.spanner.admin.database.v1.ListBackupsResponse
- (*ListBackupOperationsRequest)(nil), // 13: google.spanner.admin.database.v1.ListBackupOperationsRequest
- (*ListBackupOperationsResponse)(nil), // 14: google.spanner.admin.database.v1.ListBackupOperationsResponse
- (*BackupInfo)(nil), // 15: google.spanner.admin.database.v1.BackupInfo
- (*CreateBackupEncryptionConfig)(nil), // 16: google.spanner.admin.database.v1.CreateBackupEncryptionConfig
- (*CopyBackupEncryptionConfig)(nil), // 17: google.spanner.admin.database.v1.CopyBackupEncryptionConfig
- (*FullBackupSpec)(nil), // 18: google.spanner.admin.database.v1.FullBackupSpec
- (*IncrementalBackupSpec)(nil), // 19: google.spanner.admin.database.v1.IncrementalBackupSpec
- (*timestamppb.Timestamp)(nil), // 20: google.protobuf.Timestamp
- (*EncryptionInfo)(nil), // 21: google.spanner.admin.database.v1.EncryptionInfo
- (DatabaseDialect)(0), // 22: google.spanner.admin.database.v1.DatabaseDialect
- (*OperationProgress)(nil), // 23: google.spanner.admin.database.v1.OperationProgress
- (*fieldmaskpb.FieldMask)(nil), // 24: google.protobuf.FieldMask
- (*longrunningpb.Operation)(nil), // 25: google.longrunning.Operation
-}
-var file_google_spanner_admin_database_v1_backup_proto_depIdxs = []int32{
- 20, // 0: google.spanner.admin.database.v1.Backup.version_time:type_name -> google.protobuf.Timestamp
- 20, // 1: google.spanner.admin.database.v1.Backup.expire_time:type_name -> google.protobuf.Timestamp
- 20, // 2: google.spanner.admin.database.v1.Backup.create_time:type_name -> google.protobuf.Timestamp
- 0, // 3: google.spanner.admin.database.v1.Backup.state:type_name -> google.spanner.admin.database.v1.Backup.State
- 21, // 4: google.spanner.admin.database.v1.Backup.encryption_info:type_name -> google.spanner.admin.database.v1.EncryptionInfo
- 21, // 5: google.spanner.admin.database.v1.Backup.encryption_information:type_name -> google.spanner.admin.database.v1.EncryptionInfo
- 22, // 6: google.spanner.admin.database.v1.Backup.database_dialect:type_name -> google.spanner.admin.database.v1.DatabaseDialect
- 20, // 7: google.spanner.admin.database.v1.Backup.max_expire_time:type_name -> google.protobuf.Timestamp
- 20, // 8: google.spanner.admin.database.v1.Backup.oldest_version_time:type_name -> google.protobuf.Timestamp
- 3, // 9: google.spanner.admin.database.v1.CreateBackupRequest.backup:type_name -> google.spanner.admin.database.v1.Backup
- 16, // 10: google.spanner.admin.database.v1.CreateBackupRequest.encryption_config:type_name -> google.spanner.admin.database.v1.CreateBackupEncryptionConfig
- 23, // 11: google.spanner.admin.database.v1.CreateBackupMetadata.progress:type_name -> google.spanner.admin.database.v1.OperationProgress
- 20, // 12: google.spanner.admin.database.v1.CreateBackupMetadata.cancel_time:type_name -> google.protobuf.Timestamp
- 20, // 13: google.spanner.admin.database.v1.CopyBackupRequest.expire_time:type_name -> google.protobuf.Timestamp
- 17, // 14: google.spanner.admin.database.v1.CopyBackupRequest.encryption_config:type_name -> google.spanner.admin.database.v1.CopyBackupEncryptionConfig
- 23, // 15: google.spanner.admin.database.v1.CopyBackupMetadata.progress:type_name -> google.spanner.admin.database.v1.OperationProgress
- 20, // 16: google.spanner.admin.database.v1.CopyBackupMetadata.cancel_time:type_name -> google.protobuf.Timestamp
- 3, // 17: google.spanner.admin.database.v1.UpdateBackupRequest.backup:type_name -> google.spanner.admin.database.v1.Backup
- 24, // 18: google.spanner.admin.database.v1.UpdateBackupRequest.update_mask:type_name -> google.protobuf.FieldMask
- 3, // 19: google.spanner.admin.database.v1.ListBackupsResponse.backups:type_name -> google.spanner.admin.database.v1.Backup
- 25, // 20: google.spanner.admin.database.v1.ListBackupOperationsResponse.operations:type_name -> google.longrunning.Operation
- 20, // 21: google.spanner.admin.database.v1.BackupInfo.version_time:type_name -> google.protobuf.Timestamp
- 20, // 22: google.spanner.admin.database.v1.BackupInfo.create_time:type_name -> google.protobuf.Timestamp
- 1, // 23: google.spanner.admin.database.v1.CreateBackupEncryptionConfig.encryption_type:type_name -> google.spanner.admin.database.v1.CreateBackupEncryptionConfig.EncryptionType
- 2, // 24: google.spanner.admin.database.v1.CopyBackupEncryptionConfig.encryption_type:type_name -> google.spanner.admin.database.v1.CopyBackupEncryptionConfig.EncryptionType
- 25, // [25:25] is the sub-list for method output_type
- 25, // [25:25] is the sub-list for method input_type
- 25, // [25:25] is the sub-list for extension type_name
- 25, // [25:25] is the sub-list for extension extendee
- 0, // [0:25] is the sub-list for field type_name
-}
-
-func init() { file_google_spanner_admin_database_v1_backup_proto_init() }
-func file_google_spanner_admin_database_v1_backup_proto_init() {
- if File_google_spanner_admin_database_v1_backup_proto != nil {
- return
- }
- file_google_spanner_admin_database_v1_common_proto_init()
- if !protoimpl.UnsafeEnabled {
- file_google_spanner_admin_database_v1_backup_proto_msgTypes[0].Exporter = func(v any, i int) any {
- switch v := v.(*Backup); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_google_spanner_admin_database_v1_backup_proto_msgTypes[1].Exporter = func(v any, i int) any {
- switch v := v.(*CreateBackupRequest); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_google_spanner_admin_database_v1_backup_proto_msgTypes[2].Exporter = func(v any, i int) any {
- switch v := v.(*CreateBackupMetadata); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_google_spanner_admin_database_v1_backup_proto_msgTypes[3].Exporter = func(v any, i int) any {
- switch v := v.(*CopyBackupRequest); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_google_spanner_admin_database_v1_backup_proto_msgTypes[4].Exporter = func(v any, i int) any {
- switch v := v.(*CopyBackupMetadata); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_google_spanner_admin_database_v1_backup_proto_msgTypes[5].Exporter = func(v any, i int) any {
- switch v := v.(*UpdateBackupRequest); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_google_spanner_admin_database_v1_backup_proto_msgTypes[6].Exporter = func(v any, i int) any {
- switch v := v.(*GetBackupRequest); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_google_spanner_admin_database_v1_backup_proto_msgTypes[7].Exporter = func(v any, i int) any {
- switch v := v.(*DeleteBackupRequest); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_google_spanner_admin_database_v1_backup_proto_msgTypes[8].Exporter = func(v any, i int) any {
- switch v := v.(*ListBackupsRequest); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_google_spanner_admin_database_v1_backup_proto_msgTypes[9].Exporter = func(v any, i int) any {
- switch v := v.(*ListBackupsResponse); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_google_spanner_admin_database_v1_backup_proto_msgTypes[10].Exporter = func(v any, i int) any {
- switch v := v.(*ListBackupOperationsRequest); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_google_spanner_admin_database_v1_backup_proto_msgTypes[11].Exporter = func(v any, i int) any {
- switch v := v.(*ListBackupOperationsResponse); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_google_spanner_admin_database_v1_backup_proto_msgTypes[12].Exporter = func(v any, i int) any {
- switch v := v.(*BackupInfo); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_google_spanner_admin_database_v1_backup_proto_msgTypes[13].Exporter = func(v any, i int) any {
- switch v := v.(*CreateBackupEncryptionConfig); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_google_spanner_admin_database_v1_backup_proto_msgTypes[14].Exporter = func(v any, i int) any {
- switch v := v.(*CopyBackupEncryptionConfig); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_google_spanner_admin_database_v1_backup_proto_msgTypes[15].Exporter = func(v any, i int) any {
- switch v := v.(*FullBackupSpec); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_google_spanner_admin_database_v1_backup_proto_msgTypes[16].Exporter = func(v any, i int) any {
- switch v := v.(*IncrementalBackupSpec); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- }
- type x struct{}
- out := protoimpl.TypeBuilder{
- File: protoimpl.DescBuilder{
- GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
- RawDescriptor: file_google_spanner_admin_database_v1_backup_proto_rawDesc,
- NumEnums: 3,
- NumMessages: 17,
- NumExtensions: 0,
- NumServices: 0,
- },
- GoTypes: file_google_spanner_admin_database_v1_backup_proto_goTypes,
- DependencyIndexes: file_google_spanner_admin_database_v1_backup_proto_depIdxs,
- EnumInfos: file_google_spanner_admin_database_v1_backup_proto_enumTypes,
- MessageInfos: file_google_spanner_admin_database_v1_backup_proto_msgTypes,
- }.Build()
- File_google_spanner_admin_database_v1_backup_proto = out.File
- file_google_spanner_admin_database_v1_backup_proto_rawDesc = nil
- file_google_spanner_admin_database_v1_backup_proto_goTypes = nil
- file_google_spanner_admin_database_v1_backup_proto_depIdxs = nil
-}
diff --git a/vendor/cloud.google.com/go/spanner/admin/database/apiv1/databasepb/backup_schedule.pb.go b/vendor/cloud.google.com/go/spanner/admin/database/apiv1/databasepb/backup_schedule.pb.go
deleted file mode 100644
index fd7ca6b0d..000000000
--- a/vendor/cloud.google.com/go/spanner/admin/database/apiv1/databasepb/backup_schedule.pb.go
+++ /dev/null
@@ -1,1080 +0,0 @@
-// Copyright 2024 Google LLC
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-// Code generated by protoc-gen-go. DO NOT EDIT.
-// versions:
-// protoc-gen-go v1.34.2
-// protoc v4.25.3
-// source: google/spanner/admin/database/v1/backup_schedule.proto
-
-package databasepb
-
-import (
- reflect "reflect"
- sync "sync"
-
- _ "google.golang.org/genproto/googleapis/api/annotations"
- protoreflect "google.golang.org/protobuf/reflect/protoreflect"
- protoimpl "google.golang.org/protobuf/runtime/protoimpl"
- durationpb "google.golang.org/protobuf/types/known/durationpb"
- fieldmaskpb "google.golang.org/protobuf/types/known/fieldmaskpb"
- timestamppb "google.golang.org/protobuf/types/known/timestamppb"
-)
-
-const (
- // Verify that this generated code is sufficiently up-to-date.
- _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion)
- // Verify that runtime/protoimpl is sufficiently up-to-date.
- _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20)
-)
-
-// Defines specifications of the backup schedule.
-type BackupScheduleSpec struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
- // Required.
- //
- // Types that are assignable to ScheduleSpec:
- //
- // *BackupScheduleSpec_CronSpec
- ScheduleSpec isBackupScheduleSpec_ScheduleSpec `protobuf_oneof:"schedule_spec"`
-}
-
-func (x *BackupScheduleSpec) Reset() {
- *x = BackupScheduleSpec{}
- if protoimpl.UnsafeEnabled {
- mi := &file_google_spanner_admin_database_v1_backup_schedule_proto_msgTypes[0]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
-}
-
-func (x *BackupScheduleSpec) String() string {
- return protoimpl.X.MessageStringOf(x)
-}
-
-func (*BackupScheduleSpec) ProtoMessage() {}
-
-func (x *BackupScheduleSpec) ProtoReflect() protoreflect.Message {
- mi := &file_google_spanner_admin_database_v1_backup_schedule_proto_msgTypes[0]
- if protoimpl.UnsafeEnabled && x != nil {
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- if ms.LoadMessageInfo() == nil {
- ms.StoreMessageInfo(mi)
- }
- return ms
- }
- return mi.MessageOf(x)
-}
-
-// Deprecated: Use BackupScheduleSpec.ProtoReflect.Descriptor instead.
-func (*BackupScheduleSpec) Descriptor() ([]byte, []int) {
- return file_google_spanner_admin_database_v1_backup_schedule_proto_rawDescGZIP(), []int{0}
-}
-
-func (m *BackupScheduleSpec) GetScheduleSpec() isBackupScheduleSpec_ScheduleSpec {
- if m != nil {
- return m.ScheduleSpec
- }
- return nil
-}
-
-func (x *BackupScheduleSpec) GetCronSpec() *CrontabSpec {
- if x, ok := x.GetScheduleSpec().(*BackupScheduleSpec_CronSpec); ok {
- return x.CronSpec
- }
- return nil
-}
-
-type isBackupScheduleSpec_ScheduleSpec interface {
- isBackupScheduleSpec_ScheduleSpec()
-}
-
-type BackupScheduleSpec_CronSpec struct {
- // Cron style schedule specification.
- CronSpec *CrontabSpec `protobuf:"bytes,1,opt,name=cron_spec,json=cronSpec,proto3,oneof"`
-}
-
-func (*BackupScheduleSpec_CronSpec) isBackupScheduleSpec_ScheduleSpec() {}
-
-// BackupSchedule expresses the automated backup creation specification for a
-// Spanner database.
-// Next ID: 10
-type BackupSchedule struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
- // Identifier. Output only for the
- // [CreateBackupSchedule][DatabaseAdmin.CreateBackupSchededule] operation.
- // Required for the
- // [UpdateBackupSchedule][google.spanner.admin.database.v1.DatabaseAdmin.UpdateBackupSchedule]
- // operation. A globally unique identifier for the backup schedule which
- // cannot be changed. Values are of the form
- // `projects/<project>/instances/<instance>/databases/<database>/backupSchedules/[a-z][a-z0-9_\-]*[a-z0-9]`
- // The final segment of the name must be between 2 and 60 characters in
- // length.
- Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"`
- // Optional. The schedule specification based on which the backup creations
- // are triggered.
- Spec *BackupScheduleSpec `protobuf:"bytes,6,opt,name=spec,proto3" json:"spec,omitempty"`
- // Optional. The retention duration of a backup that must be at least 6 hours
- // and at most 366 days. The backup is eligible to be automatically deleted
- // once the retention period has elapsed.
- RetentionDuration *durationpb.Duration `protobuf:"bytes,3,opt,name=retention_duration,json=retentionDuration,proto3" json:"retention_duration,omitempty"`
- // Optional. The encryption configuration that will be used to encrypt the
- // backup. If this field is not specified, the backup will use the same
- // encryption configuration as the database.
- EncryptionConfig *CreateBackupEncryptionConfig `protobuf:"bytes,4,opt,name=encryption_config,json=encryptionConfig,proto3" json:"encryption_config,omitempty"`
- // Required. Backup type spec determines the type of backup that is created by
- // the backup schedule. Currently, only full backups are supported.
- //
- // Types that are assignable to BackupTypeSpec:
- //
- // *BackupSchedule_FullBackupSpec
- // *BackupSchedule_IncrementalBackupSpec
- BackupTypeSpec isBackupSchedule_BackupTypeSpec `protobuf_oneof:"backup_type_spec"`
- // Output only. The timestamp at which the schedule was last updated.
- // If the schedule has never been updated, this field contains the timestamp
- // when the schedule was first created.
- UpdateTime *timestamppb.Timestamp `protobuf:"bytes,9,opt,name=update_time,json=updateTime,proto3" json:"update_time,omitempty"`
-}
-
-func (x *BackupSchedule) Reset() {
- *x = BackupSchedule{}
- if protoimpl.UnsafeEnabled {
- mi := &file_google_spanner_admin_database_v1_backup_schedule_proto_msgTypes[1]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
-}
-
-func (x *BackupSchedule) String() string {
- return protoimpl.X.MessageStringOf(x)
-}
-
-func (*BackupSchedule) ProtoMessage() {}
-
-func (x *BackupSchedule) ProtoReflect() protoreflect.Message {
- mi := &file_google_spanner_admin_database_v1_backup_schedule_proto_msgTypes[1]
- if protoimpl.UnsafeEnabled && x != nil {
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- if ms.LoadMessageInfo() == nil {
- ms.StoreMessageInfo(mi)
- }
- return ms
- }
- return mi.MessageOf(x)
-}
-
-// Deprecated: Use BackupSchedule.ProtoReflect.Descriptor instead.
-func (*BackupSchedule) Descriptor() ([]byte, []int) {
- return file_google_spanner_admin_database_v1_backup_schedule_proto_rawDescGZIP(), []int{1}
-}
-
-func (x *BackupSchedule) GetName() string {
- if x != nil {
- return x.Name
- }
- return ""
-}
-
-func (x *BackupSchedule) GetSpec() *BackupScheduleSpec {
- if x != nil {
- return x.Spec
- }
- return nil
-}
-
-func (x *BackupSchedule) GetRetentionDuration() *durationpb.Duration {
- if x != nil {
- return x.RetentionDuration
- }
- return nil
-}
-
-func (x *BackupSchedule) GetEncryptionConfig() *CreateBackupEncryptionConfig {
- if x != nil {
- return x.EncryptionConfig
- }
- return nil
-}
-
-func (m *BackupSchedule) GetBackupTypeSpec() isBackupSchedule_BackupTypeSpec {
- if m != nil {
- return m.BackupTypeSpec
- }
- return nil
-}
-
-func (x *BackupSchedule) GetFullBackupSpec() *FullBackupSpec {
- if x, ok := x.GetBackupTypeSpec().(*BackupSchedule_FullBackupSpec); ok {
- return x.FullBackupSpec
- }
- return nil
-}
-
-func (x *BackupSchedule) GetIncrementalBackupSpec() *IncrementalBackupSpec {
- if x, ok := x.GetBackupTypeSpec().(*BackupSchedule_IncrementalBackupSpec); ok {
- return x.IncrementalBackupSpec
- }
- return nil
-}
-
-func (x *BackupSchedule) GetUpdateTime() *timestamppb.Timestamp {
- if x != nil {
- return x.UpdateTime
- }
- return nil
-}
-
-type isBackupSchedule_BackupTypeSpec interface {
- isBackupSchedule_BackupTypeSpec()
-}
-
-type BackupSchedule_FullBackupSpec struct {
- // The schedule creates only full backups.
- FullBackupSpec *FullBackupSpec `protobuf:"bytes,7,opt,name=full_backup_spec,json=fullBackupSpec,proto3,oneof"`
-}
-
-type BackupSchedule_IncrementalBackupSpec struct {
- // The schedule creates incremental backup chains.
- IncrementalBackupSpec *IncrementalBackupSpec `protobuf:"bytes,8,opt,name=incremental_backup_spec,json=incrementalBackupSpec,proto3,oneof"`
-}
-
-func (*BackupSchedule_FullBackupSpec) isBackupSchedule_BackupTypeSpec() {}
-
-func (*BackupSchedule_IncrementalBackupSpec) isBackupSchedule_BackupTypeSpec() {}
-
-// CrontabSpec can be used to specify the version time and frequency at
-// which the backup should be created.
-type CrontabSpec struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
- // Required. Textual representation of the crontab. User can customize the
- // backup frequency and the backup version time using the cron
- // expression. The version time must be in UTC timzeone.
- //
- // The backup will contain an externally consistent copy of the
- // database at the version time. Allowed frequencies are 12 hour, 1 day,
- // 1 week and 1 month. Examples of valid cron specifications:
- // - `0 2/12 * * * ` : every 12 hours at (2, 14) hours past midnight in UTC.
- // - `0 2,14 * * * ` : every 12 hours at (2,14) hours past midnight in UTC.
- // - `0 2 * * * ` : once a day at 2 past midnight in UTC.
- // - `0 2 * * 0 ` : once a week every Sunday at 2 past midnight in UTC.
- // - `0 2 8 * * ` : once a month on 8th day at 2 past midnight in UTC.
- Text string `protobuf:"bytes,1,opt,name=text,proto3" json:"text,omitempty"`
- // Output only. The time zone of the times in `CrontabSpec.text`. Currently
- // only UTC is supported.
- TimeZone string `protobuf:"bytes,2,opt,name=time_zone,json=timeZone,proto3" json:"time_zone,omitempty"`
- // Output only. Schedule backups will contain an externally consistent copy
- // of the database at the version time specified in
- // `schedule_spec.cron_spec`. However, Spanner may not initiate the creation
- // of the scheduled backups at that version time. Spanner will initiate
- // the creation of scheduled backups within the time window bounded by the
- // version_time specified in `schedule_spec.cron_spec` and version_time +
- // `creation_window`.
- CreationWindow *durationpb.Duration `protobuf:"bytes,3,opt,name=creation_window,json=creationWindow,proto3" json:"creation_window,omitempty"`
-}
-
-func (x *CrontabSpec) Reset() {
- *x = CrontabSpec{}
- if protoimpl.UnsafeEnabled {
- mi := &file_google_spanner_admin_database_v1_backup_schedule_proto_msgTypes[2]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
-}
-
-func (x *CrontabSpec) String() string {
- return protoimpl.X.MessageStringOf(x)
-}
-
-func (*CrontabSpec) ProtoMessage() {}
-
-func (x *CrontabSpec) ProtoReflect() protoreflect.Message {
- mi := &file_google_spanner_admin_database_v1_backup_schedule_proto_msgTypes[2]
- if protoimpl.UnsafeEnabled && x != nil {
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- if ms.LoadMessageInfo() == nil {
- ms.StoreMessageInfo(mi)
- }
- return ms
- }
- return mi.MessageOf(x)
-}
-
-// Deprecated: Use CrontabSpec.ProtoReflect.Descriptor instead.
-func (*CrontabSpec) Descriptor() ([]byte, []int) {
- return file_google_spanner_admin_database_v1_backup_schedule_proto_rawDescGZIP(), []int{2}
-}
-
-func (x *CrontabSpec) GetText() string {
- if x != nil {
- return x.Text
- }
- return ""
-}
-
-func (x *CrontabSpec) GetTimeZone() string {
- if x != nil {
- return x.TimeZone
- }
- return ""
-}
-
-func (x *CrontabSpec) GetCreationWindow() *durationpb.Duration {
- if x != nil {
- return x.CreationWindow
- }
- return nil
-}
-
-// The request for
-// [CreateBackupSchedule][google.spanner.admin.database.v1.DatabaseAdmin.CreateBackupSchedule].
-type CreateBackupScheduleRequest struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
- // Required. The name of the database that this backup schedule applies to.
- Parent string `protobuf:"bytes,1,opt,name=parent,proto3" json:"parent,omitempty"`
- // Required. The Id to use for the backup schedule. The `backup_schedule_id`
- // appended to `parent` forms the full backup schedule name of the form
- // `projects/<project>/instances/<instance>/databases/<database>/backupSchedules/<backup_schedule_id>`.
- BackupScheduleId string `protobuf:"bytes,2,opt,name=backup_schedule_id,json=backupScheduleId,proto3" json:"backup_schedule_id,omitempty"`
- // Required. The backup schedule to create.
- BackupSchedule *BackupSchedule `protobuf:"bytes,3,opt,name=backup_schedule,json=backupSchedule,proto3" json:"backup_schedule,omitempty"`
-}
-
-func (x *CreateBackupScheduleRequest) Reset() {
- *x = CreateBackupScheduleRequest{}
- if protoimpl.UnsafeEnabled {
- mi := &file_google_spanner_admin_database_v1_backup_schedule_proto_msgTypes[3]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
-}
-
-func (x *CreateBackupScheduleRequest) String() string {
- return protoimpl.X.MessageStringOf(x)
-}
-
-func (*CreateBackupScheduleRequest) ProtoMessage() {}
-
-func (x *CreateBackupScheduleRequest) ProtoReflect() protoreflect.Message {
- mi := &file_google_spanner_admin_database_v1_backup_schedule_proto_msgTypes[3]
- if protoimpl.UnsafeEnabled && x != nil {
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- if ms.LoadMessageInfo() == nil {
- ms.StoreMessageInfo(mi)
- }
- return ms
- }
- return mi.MessageOf(x)
-}
-
-// Deprecated: Use CreateBackupScheduleRequest.ProtoReflect.Descriptor instead.
-func (*CreateBackupScheduleRequest) Descriptor() ([]byte, []int) {
- return file_google_spanner_admin_database_v1_backup_schedule_proto_rawDescGZIP(), []int{3}
-}
-
-func (x *CreateBackupScheduleRequest) GetParent() string {
- if x != nil {
- return x.Parent
- }
- return ""
-}
-
-func (x *CreateBackupScheduleRequest) GetBackupScheduleId() string {
- if x != nil {
- return x.BackupScheduleId
- }
- return ""
-}
-
-func (x *CreateBackupScheduleRequest) GetBackupSchedule() *BackupSchedule {
- if x != nil {
- return x.BackupSchedule
- }
- return nil
-}
-
-// The request for
-// [GetBackupSchedule][google.spanner.admin.database.v1.DatabaseAdmin.GetBackupSchedule].
-type GetBackupScheduleRequest struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
- // Required. The name of the schedule to retrieve.
- // Values are of the form
- // `projects/<project>/instances/<instance>/databases/<database>/backupSchedules/<backup_schedule_id>`.
- Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"`
-}
-
-func (x *GetBackupScheduleRequest) Reset() {
- *x = GetBackupScheduleRequest{}
- if protoimpl.UnsafeEnabled {
- mi := &file_google_spanner_admin_database_v1_backup_schedule_proto_msgTypes[4]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
-}
-
-func (x *GetBackupScheduleRequest) String() string {
- return protoimpl.X.MessageStringOf(x)
-}
-
-func (*GetBackupScheduleRequest) ProtoMessage() {}
-
-func (x *GetBackupScheduleRequest) ProtoReflect() protoreflect.Message {
- mi := &file_google_spanner_admin_database_v1_backup_schedule_proto_msgTypes[4]
- if protoimpl.UnsafeEnabled && x != nil {
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- if ms.LoadMessageInfo() == nil {
- ms.StoreMessageInfo(mi)
- }
- return ms
- }
- return mi.MessageOf(x)
-}
-
-// Deprecated: Use GetBackupScheduleRequest.ProtoReflect.Descriptor instead.
-func (*GetBackupScheduleRequest) Descriptor() ([]byte, []int) {
- return file_google_spanner_admin_database_v1_backup_schedule_proto_rawDescGZIP(), []int{4}
-}
-
-func (x *GetBackupScheduleRequest) GetName() string {
- if x != nil {
- return x.Name
- }
- return ""
-}
-
-// The request for
-// [DeleteBackupSchedule][google.spanner.admin.database.v1.DatabaseAdmin.DeleteBackupSchedule].
-type DeleteBackupScheduleRequest struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
- // Required. The name of the schedule to delete.
- // Values are of the form
- // `projects/<project>/instances/<instance>/databases/<database>/backupSchedules/<backup_schedule_id>`.
- Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"`
-}
-
-func (x *DeleteBackupScheduleRequest) Reset() {
- *x = DeleteBackupScheduleRequest{}
- if protoimpl.UnsafeEnabled {
- mi := &file_google_spanner_admin_database_v1_backup_schedule_proto_msgTypes[5]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
-}
-
-func (x *DeleteBackupScheduleRequest) String() string {
- return protoimpl.X.MessageStringOf(x)
-}
-
-func (*DeleteBackupScheduleRequest) ProtoMessage() {}
-
-func (x *DeleteBackupScheduleRequest) ProtoReflect() protoreflect.Message {
- mi := &file_google_spanner_admin_database_v1_backup_schedule_proto_msgTypes[5]
- if protoimpl.UnsafeEnabled && x != nil {
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- if ms.LoadMessageInfo() == nil {
- ms.StoreMessageInfo(mi)
- }
- return ms
- }
- return mi.MessageOf(x)
-}
-
-// Deprecated: Use DeleteBackupScheduleRequest.ProtoReflect.Descriptor instead.
-func (*DeleteBackupScheduleRequest) Descriptor() ([]byte, []int) {
- return file_google_spanner_admin_database_v1_backup_schedule_proto_rawDescGZIP(), []int{5}
-}
-
-func (x *DeleteBackupScheduleRequest) GetName() string {
- if x != nil {
- return x.Name
- }
- return ""
-}
-
-// The request for
-// [ListBackupSchedules][google.spanner.admin.database.v1.DatabaseAdmin.ListBackupSchedules].
-type ListBackupSchedulesRequest struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
- // Required. Database is the parent resource whose backup schedules should be
- // listed. Values are of the form
- // projects/<project>/instances/<instance>/databases/<database>
- Parent string `protobuf:"bytes,1,opt,name=parent,proto3" json:"parent,omitempty"`
- // Optional. Number of backup schedules to be returned in the response. If 0
- // or less, defaults to the server's maximum allowed page size.
- PageSize int32 `protobuf:"varint,2,opt,name=page_size,json=pageSize,proto3" json:"page_size,omitempty"`
- // Optional. If non-empty, `page_token` should contain a
- // [next_page_token][google.spanner.admin.database.v1.ListBackupSchedulesResponse.next_page_token]
- // from a previous
- // [ListBackupSchedulesResponse][google.spanner.admin.database.v1.ListBackupSchedulesResponse]
- // to the same `parent`.
- PageToken string `protobuf:"bytes,4,opt,name=page_token,json=pageToken,proto3" json:"page_token,omitempty"`
-}
-
-func (x *ListBackupSchedulesRequest) Reset() {
- *x = ListBackupSchedulesRequest{}
- if protoimpl.UnsafeEnabled {
- mi := &file_google_spanner_admin_database_v1_backup_schedule_proto_msgTypes[6]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
-}
-
-func (x *ListBackupSchedulesRequest) String() string {
- return protoimpl.X.MessageStringOf(x)
-}
-
-func (*ListBackupSchedulesRequest) ProtoMessage() {}
-
-func (x *ListBackupSchedulesRequest) ProtoReflect() protoreflect.Message {
- mi := &file_google_spanner_admin_database_v1_backup_schedule_proto_msgTypes[6]
- if protoimpl.UnsafeEnabled && x != nil {
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- if ms.LoadMessageInfo() == nil {
- ms.StoreMessageInfo(mi)
- }
- return ms
- }
- return mi.MessageOf(x)
-}
-
-// Deprecated: Use ListBackupSchedulesRequest.ProtoReflect.Descriptor instead.
-func (*ListBackupSchedulesRequest) Descriptor() ([]byte, []int) {
- return file_google_spanner_admin_database_v1_backup_schedule_proto_rawDescGZIP(), []int{6}
-}
-
-func (x *ListBackupSchedulesRequest) GetParent() string {
- if x != nil {
- return x.Parent
- }
- return ""
-}
-
-func (x *ListBackupSchedulesRequest) GetPageSize() int32 {
- if x != nil {
- return x.PageSize
- }
- return 0
-}
-
-func (x *ListBackupSchedulesRequest) GetPageToken() string {
- if x != nil {
- return x.PageToken
- }
- return ""
-}
-
-// The response for
-// [ListBackupSchedules][google.spanner.admin.database.v1.DatabaseAdmin.ListBackupSchedules].
-type ListBackupSchedulesResponse struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
- // The list of backup schedules for a database.
- BackupSchedules []*BackupSchedule `protobuf:"bytes,1,rep,name=backup_schedules,json=backupSchedules,proto3" json:"backup_schedules,omitempty"`
- // `next_page_token` can be sent in a subsequent
- // [ListBackupSchedules][google.spanner.admin.database.v1.DatabaseAdmin.ListBackupSchedules]
- // call to fetch more of the schedules.
- NextPageToken string `protobuf:"bytes,2,opt,name=next_page_token,json=nextPageToken,proto3" json:"next_page_token,omitempty"`
-}
-
-func (x *ListBackupSchedulesResponse) Reset() {
- *x = ListBackupSchedulesResponse{}
- if protoimpl.UnsafeEnabled {
- mi := &file_google_spanner_admin_database_v1_backup_schedule_proto_msgTypes[7]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
-}
-
-func (x *ListBackupSchedulesResponse) String() string {
- return protoimpl.X.MessageStringOf(x)
-}
-
-func (*ListBackupSchedulesResponse) ProtoMessage() {}
-
-func (x *ListBackupSchedulesResponse) ProtoReflect() protoreflect.Message {
- mi := &file_google_spanner_admin_database_v1_backup_schedule_proto_msgTypes[7]
- if protoimpl.UnsafeEnabled && x != nil {
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- if ms.LoadMessageInfo() == nil {
- ms.StoreMessageInfo(mi)
- }
- return ms
- }
- return mi.MessageOf(x)
-}
-
-// Deprecated: Use ListBackupSchedulesResponse.ProtoReflect.Descriptor instead.
-func (*ListBackupSchedulesResponse) Descriptor() ([]byte, []int) {
- return file_google_spanner_admin_database_v1_backup_schedule_proto_rawDescGZIP(), []int{7}
-}
-
-func (x *ListBackupSchedulesResponse) GetBackupSchedules() []*BackupSchedule {
- if x != nil {
- return x.BackupSchedules
- }
- return nil
-}
-
-func (x *ListBackupSchedulesResponse) GetNextPageToken() string {
- if x != nil {
- return x.NextPageToken
- }
- return ""
-}
-
-// The request for
-// [UpdateBackupScheduleRequest][google.spanner.admin.database.v1.DatabaseAdmin.UpdateBackupSchedule].
-type UpdateBackupScheduleRequest struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
- // Required. The backup schedule to update. `backup_schedule.name`, and the
- // fields to be updated as specified by `update_mask` are required. Other
- // fields are ignored.
- BackupSchedule *BackupSchedule `protobuf:"bytes,1,opt,name=backup_schedule,json=backupSchedule,proto3" json:"backup_schedule,omitempty"`
- // Required. A mask specifying which fields in the BackupSchedule resource
- // should be updated. This mask is relative to the BackupSchedule resource,
- // not to the request message. The field mask must always be
- // specified; this prevents any future fields from being erased
- // accidentally.
- UpdateMask *fieldmaskpb.FieldMask `protobuf:"bytes,2,opt,name=update_mask,json=updateMask,proto3" json:"update_mask,omitempty"`
-}
-
-func (x *UpdateBackupScheduleRequest) Reset() {
- *x = UpdateBackupScheduleRequest{}
- if protoimpl.UnsafeEnabled {
- mi := &file_google_spanner_admin_database_v1_backup_schedule_proto_msgTypes[8]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
-}
-
-func (x *UpdateBackupScheduleRequest) String() string {
- return protoimpl.X.MessageStringOf(x)
-}
-
-func (*UpdateBackupScheduleRequest) ProtoMessage() {}
-
-func (x *UpdateBackupScheduleRequest) ProtoReflect() protoreflect.Message {
- mi := &file_google_spanner_admin_database_v1_backup_schedule_proto_msgTypes[8]
- if protoimpl.UnsafeEnabled && x != nil {
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- if ms.LoadMessageInfo() == nil {
- ms.StoreMessageInfo(mi)
- }
- return ms
- }
- return mi.MessageOf(x)
-}
-
-// Deprecated: Use UpdateBackupScheduleRequest.ProtoReflect.Descriptor instead.
-func (*UpdateBackupScheduleRequest) Descriptor() ([]byte, []int) {
- return file_google_spanner_admin_database_v1_backup_schedule_proto_rawDescGZIP(), []int{8}
-}
-
-func (x *UpdateBackupScheduleRequest) GetBackupSchedule() *BackupSchedule {
- if x != nil {
- return x.BackupSchedule
- }
- return nil
-}
-
-func (x *UpdateBackupScheduleRequest) GetUpdateMask() *fieldmaskpb.FieldMask {
- if x != nil {
- return x.UpdateMask
- }
- return nil
-}
-
-var File_google_spanner_admin_database_v1_backup_schedule_proto protoreflect.FileDescriptor
-
-var file_google_spanner_admin_database_v1_backup_schedule_proto_rawDesc = []byte{
- 0x0a, 0x36, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x73, 0x70, 0x61, 0x6e, 0x6e, 0x65, 0x72,
- 0x2f, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2f, 0x64, 0x61, 0x74, 0x61, 0x62, 0x61, 0x73, 0x65, 0x2f,
- 0x76, 0x31, 0x2f, 0x62, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x5f, 0x73, 0x63, 0x68, 0x65, 0x64, 0x75,
- 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x20, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65,
- 0x2e, 0x73, 0x70, 0x61, 0x6e, 0x6e, 0x65, 0x72, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x64,
- 0x61, 0x74, 0x61, 0x62, 0x61, 0x73, 0x65, 0x2e, 0x76, 0x31, 0x1a, 0x1f, 0x67, 0x6f, 0x6f, 0x67,
- 0x6c, 0x65, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x5f, 0x62, 0x65, 0x68,
- 0x61, 0x76, 0x69, 0x6f, 0x72, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x19, 0x67, 0x6f, 0x6f,
- 0x67, 0x6c, 0x65, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65,
- 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70,
- 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x64, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e,
- 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x20, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70,
- 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x5f, 0x6d, 0x61,
- 0x73, 0x6b, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65,
- 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74,
- 0x61, 0x6d, 0x70, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x2d, 0x67, 0x6f, 0x6f, 0x67, 0x6c,
- 0x65, 0x2f, 0x73, 0x70, 0x61, 0x6e, 0x6e, 0x65, 0x72, 0x2f, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2f,
- 0x64, 0x61, 0x74, 0x61, 0x62, 0x61, 0x73, 0x65, 0x2f, 0x76, 0x31, 0x2f, 0x62, 0x61, 0x63, 0x6b,
- 0x75, 0x70, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0x73, 0x0a, 0x12, 0x42, 0x61, 0x63, 0x6b,
- 0x75, 0x70, 0x53, 0x63, 0x68, 0x65, 0x64, 0x75, 0x6c, 0x65, 0x53, 0x70, 0x65, 0x63, 0x12, 0x4c,
- 0x0a, 0x09, 0x63, 0x72, 0x6f, 0x6e, 0x5f, 0x73, 0x70, 0x65, 0x63, 0x18, 0x01, 0x20, 0x01, 0x28,
- 0x0b, 0x32, 0x2d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x70, 0x61, 0x6e, 0x6e,
- 0x65, 0x72, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x64, 0x61, 0x74, 0x61, 0x62, 0x61, 0x73,
- 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x43, 0x72, 0x6f, 0x6e, 0x74, 0x61, 0x62, 0x53, 0x70, 0x65, 0x63,
- 0x48, 0x00, 0x52, 0x08, 0x63, 0x72, 0x6f, 0x6e, 0x53, 0x70, 0x65, 0x63, 0x42, 0x0f, 0x0a, 0x0d,
- 0x73, 0x63, 0x68, 0x65, 0x64, 0x75, 0x6c, 0x65, 0x5f, 0x73, 0x70, 0x65, 0x63, 0x22, 0x88, 0x06,
- 0x0a, 0x0e, 0x42, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x53, 0x63, 0x68, 0x65, 0x64, 0x75, 0x6c, 0x65,
- 0x12, 0x17, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x03,
- 0xe0, 0x41, 0x08, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x4d, 0x0a, 0x04, 0x73, 0x70, 0x65,
- 0x63, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x34, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65,
- 0x2e, 0x73, 0x70, 0x61, 0x6e, 0x6e, 0x65, 0x72, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x64,
- 0x61, 0x74, 0x61, 0x62, 0x61, 0x73, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x42, 0x61, 0x63, 0x6b, 0x75,
- 0x70, 0x53, 0x63, 0x68, 0x65, 0x64, 0x75, 0x6c, 0x65, 0x53, 0x70, 0x65, 0x63, 0x42, 0x03, 0xe0,
- 0x41, 0x01, 0x52, 0x04, 0x73, 0x70, 0x65, 0x63, 0x12, 0x4d, 0x0a, 0x12, 0x72, 0x65, 0x74, 0x65,
- 0x6e, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x64, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x03,
- 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72,
- 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x42,
- 0x03, 0xe0, 0x41, 0x01, 0x52, 0x11, 0x72, 0x65, 0x74, 0x65, 0x6e, 0x74, 0x69, 0x6f, 0x6e, 0x44,
- 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x70, 0x0a, 0x11, 0x65, 0x6e, 0x63, 0x72, 0x79,
- 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x18, 0x04, 0x20, 0x01,
- 0x28, 0x0b, 0x32, 0x3e, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x70, 0x61, 0x6e,
- 0x6e, 0x65, 0x72, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x64, 0x61, 0x74, 0x61, 0x62, 0x61,
- 0x73, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x42, 0x61, 0x63, 0x6b,
- 0x75, 0x70, 0x45, 0x6e, 0x63, 0x72, 0x79, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x6f, 0x6e, 0x66,
- 0x69, 0x67, 0x42, 0x03, 0xe0, 0x41, 0x01, 0x52, 0x10, 0x65, 0x6e, 0x63, 0x72, 0x79, 0x70, 0x74,
- 0x69, 0x6f, 0x6e, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x5c, 0x0a, 0x10, 0x66, 0x75, 0x6c,
- 0x6c, 0x5f, 0x62, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x5f, 0x73, 0x70, 0x65, 0x63, 0x18, 0x07, 0x20,
- 0x01, 0x28, 0x0b, 0x32, 0x30, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x70, 0x61,
- 0x6e, 0x6e, 0x65, 0x72, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x64, 0x61, 0x74, 0x61, 0x62,
- 0x61, 0x73, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x46, 0x75, 0x6c, 0x6c, 0x42, 0x61, 0x63, 0x6b, 0x75,
- 0x70, 0x53, 0x70, 0x65, 0x63, 0x48, 0x00, 0x52, 0x0e, 0x66, 0x75, 0x6c, 0x6c, 0x42, 0x61, 0x63,
- 0x6b, 0x75, 0x70, 0x53, 0x70, 0x65, 0x63, 0x12, 0x71, 0x0a, 0x17, 0x69, 0x6e, 0x63, 0x72, 0x65,
- 0x6d, 0x65, 0x6e, 0x74, 0x61, 0x6c, 0x5f, 0x62, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x5f, 0x73, 0x70,
- 0x65, 0x63, 0x18, 0x08, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x37, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c,
- 0x65, 0x2e, 0x73, 0x70, 0x61, 0x6e, 0x6e, 0x65, 0x72, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e,
- 0x64, 0x61, 0x74, 0x61, 0x62, 0x61, 0x73, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x49, 0x6e, 0x63, 0x72,
- 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x61, 0x6c, 0x42, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x53, 0x70, 0x65,
- 0x63, 0x48, 0x00, 0x52, 0x15, 0x69, 0x6e, 0x63, 0x72, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x61, 0x6c,
- 0x42, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x53, 0x70, 0x65, 0x63, 0x12, 0x40, 0x0a, 0x0b, 0x75, 0x70,
- 0x64, 0x61, 0x74, 0x65, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x18, 0x09, 0x20, 0x01, 0x28, 0x0b, 0x32,
- 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75,
- 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x42, 0x03, 0xe0, 0x41, 0x03,
- 0x52, 0x0a, 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, 0x54, 0x69, 0x6d, 0x65, 0x3a, 0xa5, 0x01, 0xea,
- 0x41, 0xa1, 0x01, 0x0a, 0x25, 0x73, 0x70, 0x61, 0x6e, 0x6e, 0x65, 0x72, 0x2e, 0x67, 0x6f, 0x6f,
- 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x42, 0x61, 0x63, 0x6b,
- 0x75, 0x70, 0x53, 0x63, 0x68, 0x65, 0x64, 0x75, 0x6c, 0x65, 0x12, 0x57, 0x70, 0x72, 0x6f, 0x6a,
- 0x65, 0x63, 0x74, 0x73, 0x2f, 0x7b, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x7d, 0x2f, 0x69,
- 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x73, 0x2f, 0x7b, 0x69, 0x6e, 0x73, 0x74, 0x61, 0x6e,
- 0x63, 0x65, 0x7d, 0x2f, 0x64, 0x61, 0x74, 0x61, 0x62, 0x61, 0x73, 0x65, 0x73, 0x2f, 0x7b, 0x64,
- 0x61, 0x74, 0x61, 0x62, 0x61, 0x73, 0x65, 0x7d, 0x2f, 0x62, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x53,
- 0x63, 0x68, 0x65, 0x64, 0x75, 0x6c, 0x65, 0x73, 0x2f, 0x7b, 0x73, 0x63, 0x68, 0x65, 0x64, 0x75,
- 0x6c, 0x65, 0x7d, 0x2a, 0x0f, 0x62, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x53, 0x63, 0x68, 0x65, 0x64,
- 0x75, 0x6c, 0x65, 0x73, 0x32, 0x0e, 0x62, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x53, 0x63, 0x68, 0x65,
- 0x64, 0x75, 0x6c, 0x65, 0x42, 0x12, 0x0a, 0x10, 0x62, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x5f, 0x74,
- 0x79, 0x70, 0x65, 0x5f, 0x73, 0x70, 0x65, 0x63, 0x22, 0x91, 0x01, 0x0a, 0x0b, 0x43, 0x72, 0x6f,
- 0x6e, 0x74, 0x61, 0x62, 0x53, 0x70, 0x65, 0x63, 0x12, 0x17, 0x0a, 0x04, 0x74, 0x65, 0x78, 0x74,
- 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x04, 0x74, 0x65, 0x78,
- 0x74, 0x12, 0x20, 0x0a, 0x09, 0x74, 0x69, 0x6d, 0x65, 0x5f, 0x7a, 0x6f, 0x6e, 0x65, 0x18, 0x02,
- 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x03, 0x52, 0x08, 0x74, 0x69, 0x6d, 0x65, 0x5a,
- 0x6f, 0x6e, 0x65, 0x12, 0x47, 0x0a, 0x0f, 0x63, 0x72, 0x65, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f,
- 0x77, 0x69, 0x6e, 0x64, 0x6f, 0x77, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67,
- 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44,
- 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x42, 0x03, 0xe0, 0x41, 0x03, 0x52, 0x0e, 0x63, 0x72,
- 0x65, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x57, 0x69, 0x6e, 0x64, 0x6f, 0x77, 0x22, 0xf1, 0x01, 0x0a,
- 0x1b, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x42, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x53, 0x63, 0x68,
- 0x65, 0x64, 0x75, 0x6c, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x3f, 0x0a, 0x06,
- 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x27, 0xe0, 0x41,
- 0x02, 0xfa, 0x41, 0x21, 0x0a, 0x1f, 0x73, 0x70, 0x61, 0x6e, 0x6e, 0x65, 0x72, 0x2e, 0x67, 0x6f,
- 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x44, 0x61, 0x74,
- 0x61, 0x62, 0x61, 0x73, 0x65, 0x52, 0x06, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x12, 0x31, 0x0a,
- 0x12, 0x62, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x5f, 0x73, 0x63, 0x68, 0x65, 0x64, 0x75, 0x6c, 0x65,
- 0x5f, 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x10,
- 0x62, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x53, 0x63, 0x68, 0x65, 0x64, 0x75, 0x6c, 0x65, 0x49, 0x64,
- 0x12, 0x5e, 0x0a, 0x0f, 0x62, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x5f, 0x73, 0x63, 0x68, 0x65, 0x64,
- 0x75, 0x6c, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x30, 0x2e, 0x67, 0x6f, 0x6f, 0x67,
- 0x6c, 0x65, 0x2e, 0x73, 0x70, 0x61, 0x6e, 0x6e, 0x65, 0x72, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e,
- 0x2e, 0x64, 0x61, 0x74, 0x61, 0x62, 0x61, 0x73, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x42, 0x61, 0x63,
- 0x6b, 0x75, 0x70, 0x53, 0x63, 0x68, 0x65, 0x64, 0x75, 0x6c, 0x65, 0x42, 0x03, 0xe0, 0x41, 0x02,
- 0x52, 0x0e, 0x62, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x53, 0x63, 0x68, 0x65, 0x64, 0x75, 0x6c, 0x65,
- 0x22, 0x5d, 0x0a, 0x18, 0x47, 0x65, 0x74, 0x42, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x53, 0x63, 0x68,
- 0x65, 0x64, 0x75, 0x6c, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x41, 0x0a, 0x04,
- 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x2d, 0xe0, 0x41, 0x02, 0xfa,
- 0x41, 0x27, 0x0a, 0x25, 0x73, 0x70, 0x61, 0x6e, 0x6e, 0x65, 0x72, 0x2e, 0x67, 0x6f, 0x6f, 0x67,
- 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x42, 0x61, 0x63, 0x6b, 0x75,
- 0x70, 0x53, 0x63, 0x68, 0x65, 0x64, 0x75, 0x6c, 0x65, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x22,
- 0x60, 0x0a, 0x1b, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x42, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x53,
- 0x63, 0x68, 0x65, 0x64, 0x75, 0x6c, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x41,
- 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x2d, 0xe0, 0x41,
- 0x02, 0xfa, 0x41, 0x27, 0x0a, 0x25, 0x73, 0x70, 0x61, 0x6e, 0x6e, 0x65, 0x72, 0x2e, 0x67, 0x6f,
- 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x42, 0x61, 0x63,
- 0x6b, 0x75, 0x70, 0x53, 0x63, 0x68, 0x65, 0x64, 0x75, 0x6c, 0x65, 0x52, 0x04, 0x6e, 0x61, 0x6d,
- 0x65, 0x22, 0xa3, 0x01, 0x0a, 0x1a, 0x4c, 0x69, 0x73, 0x74, 0x42, 0x61, 0x63, 0x6b, 0x75, 0x70,
- 0x53, 0x63, 0x68, 0x65, 0x64, 0x75, 0x6c, 0x65, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74,
- 0x12, 0x3f, 0x0a, 0x06, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09,
- 0x42, 0x27, 0xe0, 0x41, 0x02, 0xfa, 0x41, 0x21, 0x0a, 0x1f, 0x73, 0x70, 0x61, 0x6e, 0x6e, 0x65,
- 0x72, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d,
- 0x2f, 0x44, 0x61, 0x74, 0x61, 0x62, 0x61, 0x73, 0x65, 0x52, 0x06, 0x70, 0x61, 0x72, 0x65, 0x6e,
- 0x74, 0x12, 0x20, 0x0a, 0x09, 0x70, 0x61, 0x67, 0x65, 0x5f, 0x73, 0x69, 0x7a, 0x65, 0x18, 0x02,
- 0x20, 0x01, 0x28, 0x05, 0x42, 0x03, 0xe0, 0x41, 0x01, 0x52, 0x08, 0x70, 0x61, 0x67, 0x65, 0x53,
- 0x69, 0x7a, 0x65, 0x12, 0x22, 0x0a, 0x0a, 0x70, 0x61, 0x67, 0x65, 0x5f, 0x74, 0x6f, 0x6b, 0x65,
- 0x6e, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x01, 0x52, 0x09, 0x70, 0x61,
- 0x67, 0x65, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x22, 0xa2, 0x01, 0x0a, 0x1b, 0x4c, 0x69, 0x73, 0x74,
- 0x42, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x53, 0x63, 0x68, 0x65, 0x64, 0x75, 0x6c, 0x65, 0x73, 0x52,
- 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x5b, 0x0a, 0x10, 0x62, 0x61, 0x63, 0x6b, 0x75,
- 0x70, 0x5f, 0x73, 0x63, 0x68, 0x65, 0x64, 0x75, 0x6c, 0x65, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28,
- 0x0b, 0x32, 0x30, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x70, 0x61, 0x6e, 0x6e,
- 0x65, 0x72, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x64, 0x61, 0x74, 0x61, 0x62, 0x61, 0x73,
- 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x42, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x53, 0x63, 0x68, 0x65, 0x64,
- 0x75, 0x6c, 0x65, 0x52, 0x0f, 0x62, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x53, 0x63, 0x68, 0x65, 0x64,
- 0x75, 0x6c, 0x65, 0x73, 0x12, 0x26, 0x0a, 0x0f, 0x6e, 0x65, 0x78, 0x74, 0x5f, 0x70, 0x61, 0x67,
- 0x65, 0x5f, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0d, 0x6e,
- 0x65, 0x78, 0x74, 0x50, 0x61, 0x67, 0x65, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x22, 0xbf, 0x01, 0x0a,
- 0x1b, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x42, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x53, 0x63, 0x68,
- 0x65, 0x64, 0x75, 0x6c, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x5e, 0x0a, 0x0f,
- 0x62, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x5f, 0x73, 0x63, 0x68, 0x65, 0x64, 0x75, 0x6c, 0x65, 0x18,
- 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x30, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73,
- 0x70, 0x61, 0x6e, 0x6e, 0x65, 0x72, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x64, 0x61, 0x74,
- 0x61, 0x62, 0x61, 0x73, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x42, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x53,
- 0x63, 0x68, 0x65, 0x64, 0x75, 0x6c, 0x65, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x0e, 0x62, 0x61,
- 0x63, 0x6b, 0x75, 0x70, 0x53, 0x63, 0x68, 0x65, 0x64, 0x75, 0x6c, 0x65, 0x12, 0x40, 0x0a, 0x0b,
- 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, 0x5f, 0x6d, 0x61, 0x73, 0x6b, 0x18, 0x02, 0x20, 0x01, 0x28,
- 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f,
- 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x4d, 0x61, 0x73, 0x6b, 0x42, 0x03, 0xe0,
- 0x41, 0x02, 0x52, 0x0a, 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, 0x4d, 0x61, 0x73, 0x6b, 0x42, 0x85,
- 0x02, 0x0a, 0x24, 0x63, 0x6f, 0x6d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x70,
- 0x61, 0x6e, 0x6e, 0x65, 0x72, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x64, 0x61, 0x74, 0x61,
- 0x62, 0x61, 0x73, 0x65, 0x2e, 0x76, 0x31, 0x42, 0x13, 0x42, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x53,
- 0x63, 0x68, 0x65, 0x64, 0x75, 0x6c, 0x65, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x46,
- 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x63, 0x6f, 0x6d,
- 0x2f, 0x67, 0x6f, 0x2f, 0x73, 0x70, 0x61, 0x6e, 0x6e, 0x65, 0x72, 0x2f, 0x61, 0x64, 0x6d, 0x69,
- 0x6e, 0x2f, 0x64, 0x61, 0x74, 0x61, 0x62, 0x61, 0x73, 0x65, 0x2f, 0x61, 0x70, 0x69, 0x76, 0x31,
- 0x2f, 0x64, 0x61, 0x74, 0x61, 0x62, 0x61, 0x73, 0x65, 0x70, 0x62, 0x3b, 0x64, 0x61, 0x74, 0x61,
- 0x62, 0x61, 0x73, 0x65, 0x70, 0x62, 0xaa, 0x02, 0x26, 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e,
- 0x43, 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x53, 0x70, 0x61, 0x6e, 0x6e, 0x65, 0x72, 0x2e, 0x41, 0x64,
- 0x6d, 0x69, 0x6e, 0x2e, 0x44, 0x61, 0x74, 0x61, 0x62, 0x61, 0x73, 0x65, 0x2e, 0x56, 0x31, 0xca,
- 0x02, 0x26, 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x5c, 0x43, 0x6c, 0x6f, 0x75, 0x64, 0x5c, 0x53,
- 0x70, 0x61, 0x6e, 0x6e, 0x65, 0x72, 0x5c, 0x41, 0x64, 0x6d, 0x69, 0x6e, 0x5c, 0x44, 0x61, 0x74,
- 0x61, 0x62, 0x61, 0x73, 0x65, 0x5c, 0x56, 0x31, 0xea, 0x02, 0x2b, 0x47, 0x6f, 0x6f, 0x67, 0x6c,
- 0x65, 0x3a, 0x3a, 0x43, 0x6c, 0x6f, 0x75, 0x64, 0x3a, 0x3a, 0x53, 0x70, 0x61, 0x6e, 0x6e, 0x65,
- 0x72, 0x3a, 0x3a, 0x41, 0x64, 0x6d, 0x69, 0x6e, 0x3a, 0x3a, 0x44, 0x61, 0x74, 0x61, 0x62, 0x61,
- 0x73, 0x65, 0x3a, 0x3a, 0x56, 0x31, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33,
-}
-
-var (
- file_google_spanner_admin_database_v1_backup_schedule_proto_rawDescOnce sync.Once
- file_google_spanner_admin_database_v1_backup_schedule_proto_rawDescData = file_google_spanner_admin_database_v1_backup_schedule_proto_rawDesc
-)
-
-func file_google_spanner_admin_database_v1_backup_schedule_proto_rawDescGZIP() []byte {
- file_google_spanner_admin_database_v1_backup_schedule_proto_rawDescOnce.Do(func() {
- file_google_spanner_admin_database_v1_backup_schedule_proto_rawDescData = protoimpl.X.CompressGZIP(file_google_spanner_admin_database_v1_backup_schedule_proto_rawDescData)
- })
- return file_google_spanner_admin_database_v1_backup_schedule_proto_rawDescData
-}
-
-var file_google_spanner_admin_database_v1_backup_schedule_proto_msgTypes = make([]protoimpl.MessageInfo, 9)
-var file_google_spanner_admin_database_v1_backup_schedule_proto_goTypes = []any{
- (*BackupScheduleSpec)(nil), // 0: google.spanner.admin.database.v1.BackupScheduleSpec
- (*BackupSchedule)(nil), // 1: google.spanner.admin.database.v1.BackupSchedule
- (*CrontabSpec)(nil), // 2: google.spanner.admin.database.v1.CrontabSpec
- (*CreateBackupScheduleRequest)(nil), // 3: google.spanner.admin.database.v1.CreateBackupScheduleRequest
- (*GetBackupScheduleRequest)(nil), // 4: google.spanner.admin.database.v1.GetBackupScheduleRequest
- (*DeleteBackupScheduleRequest)(nil), // 5: google.spanner.admin.database.v1.DeleteBackupScheduleRequest
- (*ListBackupSchedulesRequest)(nil), // 6: google.spanner.admin.database.v1.ListBackupSchedulesRequest
- (*ListBackupSchedulesResponse)(nil), // 7: google.spanner.admin.database.v1.ListBackupSchedulesResponse
- (*UpdateBackupScheduleRequest)(nil), // 8: google.spanner.admin.database.v1.UpdateBackupScheduleRequest
- (*durationpb.Duration)(nil), // 9: google.protobuf.Duration
- (*CreateBackupEncryptionConfig)(nil), // 10: google.spanner.admin.database.v1.CreateBackupEncryptionConfig
- (*FullBackupSpec)(nil), // 11: google.spanner.admin.database.v1.FullBackupSpec
- (*IncrementalBackupSpec)(nil), // 12: google.spanner.admin.database.v1.IncrementalBackupSpec
- (*timestamppb.Timestamp)(nil), // 13: google.protobuf.Timestamp
- (*fieldmaskpb.FieldMask)(nil), // 14: google.protobuf.FieldMask
-}
-var file_google_spanner_admin_database_v1_backup_schedule_proto_depIdxs = []int32{
- 2, // 0: google.spanner.admin.database.v1.BackupScheduleSpec.cron_spec:type_name -> google.spanner.admin.database.v1.CrontabSpec
- 0, // 1: google.spanner.admin.database.v1.BackupSchedule.spec:type_name -> google.spanner.admin.database.v1.BackupScheduleSpec
- 9, // 2: google.spanner.admin.database.v1.BackupSchedule.retention_duration:type_name -> google.protobuf.Duration
- 10, // 3: google.spanner.admin.database.v1.BackupSchedule.encryption_config:type_name -> google.spanner.admin.database.v1.CreateBackupEncryptionConfig
- 11, // 4: google.spanner.admin.database.v1.BackupSchedule.full_backup_spec:type_name -> google.spanner.admin.database.v1.FullBackupSpec
- 12, // 5: google.spanner.admin.database.v1.BackupSchedule.incremental_backup_spec:type_name -> google.spanner.admin.database.v1.IncrementalBackupSpec
- 13, // 6: google.spanner.admin.database.v1.BackupSchedule.update_time:type_name -> google.protobuf.Timestamp
- 9, // 7: google.spanner.admin.database.v1.CrontabSpec.creation_window:type_name -> google.protobuf.Duration
- 1, // 8: google.spanner.admin.database.v1.CreateBackupScheduleRequest.backup_schedule:type_name -> google.spanner.admin.database.v1.BackupSchedule
- 1, // 9: google.spanner.admin.database.v1.ListBackupSchedulesResponse.backup_schedules:type_name -> google.spanner.admin.database.v1.BackupSchedule
- 1, // 10: google.spanner.admin.database.v1.UpdateBackupScheduleRequest.backup_schedule:type_name -> google.spanner.admin.database.v1.BackupSchedule
- 14, // 11: google.spanner.admin.database.v1.UpdateBackupScheduleRequest.update_mask:type_name -> google.protobuf.FieldMask
- 12, // [12:12] is the sub-list for method output_type
- 12, // [12:12] is the sub-list for method input_type
- 12, // [12:12] is the sub-list for extension type_name
- 12, // [12:12] is the sub-list for extension extendee
- 0, // [0:12] is the sub-list for field type_name
-}
-
-func init() { file_google_spanner_admin_database_v1_backup_schedule_proto_init() }
-func file_google_spanner_admin_database_v1_backup_schedule_proto_init() {
- if File_google_spanner_admin_database_v1_backup_schedule_proto != nil {
- return
- }
- file_google_spanner_admin_database_v1_backup_proto_init()
- if !protoimpl.UnsafeEnabled {
- file_google_spanner_admin_database_v1_backup_schedule_proto_msgTypes[0].Exporter = func(v any, i int) any {
- switch v := v.(*BackupScheduleSpec); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_google_spanner_admin_database_v1_backup_schedule_proto_msgTypes[1].Exporter = func(v any, i int) any {
- switch v := v.(*BackupSchedule); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_google_spanner_admin_database_v1_backup_schedule_proto_msgTypes[2].Exporter = func(v any, i int) any {
- switch v := v.(*CrontabSpec); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_google_spanner_admin_database_v1_backup_schedule_proto_msgTypes[3].Exporter = func(v any, i int) any {
- switch v := v.(*CreateBackupScheduleRequest); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_google_spanner_admin_database_v1_backup_schedule_proto_msgTypes[4].Exporter = func(v any, i int) any {
- switch v := v.(*GetBackupScheduleRequest); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_google_spanner_admin_database_v1_backup_schedule_proto_msgTypes[5].Exporter = func(v any, i int) any {
- switch v := v.(*DeleteBackupScheduleRequest); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_google_spanner_admin_database_v1_backup_schedule_proto_msgTypes[6].Exporter = func(v any, i int) any {
- switch v := v.(*ListBackupSchedulesRequest); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_google_spanner_admin_database_v1_backup_schedule_proto_msgTypes[7].Exporter = func(v any, i int) any {
- switch v := v.(*ListBackupSchedulesResponse); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_google_spanner_admin_database_v1_backup_schedule_proto_msgTypes[8].Exporter = func(v any, i int) any {
- switch v := v.(*UpdateBackupScheduleRequest); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- }
- file_google_spanner_admin_database_v1_backup_schedule_proto_msgTypes[0].OneofWrappers = []any{
- (*BackupScheduleSpec_CronSpec)(nil),
- }
- file_google_spanner_admin_database_v1_backup_schedule_proto_msgTypes[1].OneofWrappers = []any{
- (*BackupSchedule_FullBackupSpec)(nil),
- (*BackupSchedule_IncrementalBackupSpec)(nil),
- }
- type x struct{}
- out := protoimpl.TypeBuilder{
- File: protoimpl.DescBuilder{
- GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
- RawDescriptor: file_google_spanner_admin_database_v1_backup_schedule_proto_rawDesc,
- NumEnums: 0,
- NumMessages: 9,
- NumExtensions: 0,
- NumServices: 0,
- },
- GoTypes: file_google_spanner_admin_database_v1_backup_schedule_proto_goTypes,
- DependencyIndexes: file_google_spanner_admin_database_v1_backup_schedule_proto_depIdxs,
- MessageInfos: file_google_spanner_admin_database_v1_backup_schedule_proto_msgTypes,
- }.Build()
- File_google_spanner_admin_database_v1_backup_schedule_proto = out.File
- file_google_spanner_admin_database_v1_backup_schedule_proto_rawDesc = nil
- file_google_spanner_admin_database_v1_backup_schedule_proto_goTypes = nil
- file_google_spanner_admin_database_v1_backup_schedule_proto_depIdxs = nil
-}
diff --git a/vendor/cloud.google.com/go/spanner/admin/database/apiv1/databasepb/common.pb.go b/vendor/cloud.google.com/go/spanner/admin/database/apiv1/databasepb/common.pb.go
deleted file mode 100644
index ba93d9a26..000000000
--- a/vendor/cloud.google.com/go/spanner/admin/database/apiv1/databasepb/common.pb.go
+++ /dev/null
@@ -1,567 +0,0 @@
-// Copyright 2024 Google LLC
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-// Code generated by protoc-gen-go. DO NOT EDIT.
-// versions:
-// protoc-gen-go v1.34.2
-// protoc v4.25.3
-// source: google/spanner/admin/database/v1/common.proto
-
-package databasepb
-
-import (
- reflect "reflect"
- sync "sync"
-
- _ "google.golang.org/genproto/googleapis/api/annotations"
- status "google.golang.org/genproto/googleapis/rpc/status"
- protoreflect "google.golang.org/protobuf/reflect/protoreflect"
- protoimpl "google.golang.org/protobuf/runtime/protoimpl"
- timestamppb "google.golang.org/protobuf/types/known/timestamppb"
-)
-
-const (
- // Verify that this generated code is sufficiently up-to-date.
- _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion)
- // Verify that runtime/protoimpl is sufficiently up-to-date.
- _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20)
-)
-
-// Indicates the dialect type of a database.
-type DatabaseDialect int32
-
-const (
- // Default value. This value will create a database with the
- // GOOGLE_STANDARD_SQL dialect.
- DatabaseDialect_DATABASE_DIALECT_UNSPECIFIED DatabaseDialect = 0
- // GoogleSQL supported SQL.
- DatabaseDialect_GOOGLE_STANDARD_SQL DatabaseDialect = 1
- // PostgreSQL supported SQL.
- DatabaseDialect_POSTGRESQL DatabaseDialect = 2
-)
-
-// Enum value maps for DatabaseDialect.
-var (
- DatabaseDialect_name = map[int32]string{
- 0: "DATABASE_DIALECT_UNSPECIFIED",
- 1: "GOOGLE_STANDARD_SQL",
- 2: "POSTGRESQL",
- }
- DatabaseDialect_value = map[string]int32{
- "DATABASE_DIALECT_UNSPECIFIED": 0,
- "GOOGLE_STANDARD_SQL": 1,
- "POSTGRESQL": 2,
- }
-)
-
-func (x DatabaseDialect) Enum() *DatabaseDialect {
- p := new(DatabaseDialect)
- *p = x
- return p
-}
-
-func (x DatabaseDialect) String() string {
- return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x))
-}
-
-func (DatabaseDialect) Descriptor() protoreflect.EnumDescriptor {
- return file_google_spanner_admin_database_v1_common_proto_enumTypes[0].Descriptor()
-}
-
-func (DatabaseDialect) Type() protoreflect.EnumType {
- return &file_google_spanner_admin_database_v1_common_proto_enumTypes[0]
-}
-
-func (x DatabaseDialect) Number() protoreflect.EnumNumber {
- return protoreflect.EnumNumber(x)
-}
-
-// Deprecated: Use DatabaseDialect.Descriptor instead.
-func (DatabaseDialect) EnumDescriptor() ([]byte, []int) {
- return file_google_spanner_admin_database_v1_common_proto_rawDescGZIP(), []int{0}
-}
-
-// Possible encryption types.
-type EncryptionInfo_Type int32
-
-const (
- // Encryption type was not specified, though data at rest remains encrypted.
- EncryptionInfo_TYPE_UNSPECIFIED EncryptionInfo_Type = 0
- // The data is encrypted at rest with a key that is
- // fully managed by Google. No key version or status will be populated.
- // This is the default state.
- EncryptionInfo_GOOGLE_DEFAULT_ENCRYPTION EncryptionInfo_Type = 1
- // The data is encrypted at rest with a key that is
- // managed by the customer. The active version of the key. `kms_key_version`
- // will be populated, and `encryption_status` may be populated.
- EncryptionInfo_CUSTOMER_MANAGED_ENCRYPTION EncryptionInfo_Type = 2
-)
-
-// Enum value maps for EncryptionInfo_Type.
-var (
- EncryptionInfo_Type_name = map[int32]string{
- 0: "TYPE_UNSPECIFIED",
- 1: "GOOGLE_DEFAULT_ENCRYPTION",
- 2: "CUSTOMER_MANAGED_ENCRYPTION",
- }
- EncryptionInfo_Type_value = map[string]int32{
- "TYPE_UNSPECIFIED": 0,
- "GOOGLE_DEFAULT_ENCRYPTION": 1,
- "CUSTOMER_MANAGED_ENCRYPTION": 2,
- }
-)
-
-func (x EncryptionInfo_Type) Enum() *EncryptionInfo_Type {
- p := new(EncryptionInfo_Type)
- *p = x
- return p
-}
-
-func (x EncryptionInfo_Type) String() string {
- return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x))
-}
-
-func (EncryptionInfo_Type) Descriptor() protoreflect.EnumDescriptor {
- return file_google_spanner_admin_database_v1_common_proto_enumTypes[1].Descriptor()
-}
-
-func (EncryptionInfo_Type) Type() protoreflect.EnumType {
- return &file_google_spanner_admin_database_v1_common_proto_enumTypes[1]
-}
-
-func (x EncryptionInfo_Type) Number() protoreflect.EnumNumber {
- return protoreflect.EnumNumber(x)
-}
-
-// Deprecated: Use EncryptionInfo_Type.Descriptor instead.
-func (EncryptionInfo_Type) EnumDescriptor() ([]byte, []int) {
- return file_google_spanner_admin_database_v1_common_proto_rawDescGZIP(), []int{2, 0}
-}
-
-// Encapsulates progress related information for a Cloud Spanner long
-// running operation.
-type OperationProgress struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
- // Percent completion of the operation.
- // Values are between 0 and 100 inclusive.
- ProgressPercent int32 `protobuf:"varint,1,opt,name=progress_percent,json=progressPercent,proto3" json:"progress_percent,omitempty"`
- // Time the request was received.
- StartTime *timestamppb.Timestamp `protobuf:"bytes,2,opt,name=start_time,json=startTime,proto3" json:"start_time,omitempty"`
- // If set, the time at which this operation failed or was completed
- // successfully.
- EndTime *timestamppb.Timestamp `protobuf:"bytes,3,opt,name=end_time,json=endTime,proto3" json:"end_time,omitempty"`
-}
-
-func (x *OperationProgress) Reset() {
- *x = OperationProgress{}
- if protoimpl.UnsafeEnabled {
- mi := &file_google_spanner_admin_database_v1_common_proto_msgTypes[0]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
-}
-
-func (x *OperationProgress) String() string {
- return protoimpl.X.MessageStringOf(x)
-}
-
-func (*OperationProgress) ProtoMessage() {}
-
-func (x *OperationProgress) ProtoReflect() protoreflect.Message {
- mi := &file_google_spanner_admin_database_v1_common_proto_msgTypes[0]
- if protoimpl.UnsafeEnabled && x != nil {
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- if ms.LoadMessageInfo() == nil {
- ms.StoreMessageInfo(mi)
- }
- return ms
- }
- return mi.MessageOf(x)
-}
-
-// Deprecated: Use OperationProgress.ProtoReflect.Descriptor instead.
-func (*OperationProgress) Descriptor() ([]byte, []int) {
- return file_google_spanner_admin_database_v1_common_proto_rawDescGZIP(), []int{0}
-}
-
-func (x *OperationProgress) GetProgressPercent() int32 {
- if x != nil {
- return x.ProgressPercent
- }
- return 0
-}
-
-func (x *OperationProgress) GetStartTime() *timestamppb.Timestamp {
- if x != nil {
- return x.StartTime
- }
- return nil
-}
-
-func (x *OperationProgress) GetEndTime() *timestamppb.Timestamp {
- if x != nil {
- return x.EndTime
- }
- return nil
-}
-
-// Encryption configuration for a Cloud Spanner database.
-type EncryptionConfig struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
- // The Cloud KMS key to be used for encrypting and decrypting
- // the database. Values are of the form
- // `projects/<project>/locations/<location>/keyRings/<key_ring>/cryptoKeys/<kms_key_name>`.
- KmsKeyName string `protobuf:"bytes,2,opt,name=kms_key_name,json=kmsKeyName,proto3" json:"kms_key_name,omitempty"`
- // Specifies the KMS configuration for the one or more keys used to encrypt
- // the database. Values are of the form
- // `projects/<project>/locations/<location>/keyRings/<key_ring>/cryptoKeys/<kms_key_name>`.
- //
- // The keys referenced by kms_key_names must fully cover all
- // regions of the database instance configuration. Some examples:
- // * For single region database instance configs, specify a single regional
- // location KMS key.
- // * For multi-regional database instance configs of type GOOGLE_MANAGED,
- // either specify a multi-regional location KMS key or multiple regional
- // location KMS keys that cover all regions in the instance config.
- // * For a database instance config of type USER_MANAGED, please specify only
- // regional location KMS keys to cover each region in the instance config.
- // Multi-regional location KMS keys are not supported for USER_MANAGED
- // instance configs.
- KmsKeyNames []string `protobuf:"bytes,3,rep,name=kms_key_names,json=kmsKeyNames,proto3" json:"kms_key_names,omitempty"`
-}
-
-func (x *EncryptionConfig) Reset() {
- *x = EncryptionConfig{}
- if protoimpl.UnsafeEnabled {
- mi := &file_google_spanner_admin_database_v1_common_proto_msgTypes[1]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
-}
-
-func (x *EncryptionConfig) String() string {
- return protoimpl.X.MessageStringOf(x)
-}
-
-func (*EncryptionConfig) ProtoMessage() {}
-
-func (x *EncryptionConfig) ProtoReflect() protoreflect.Message {
- mi := &file_google_spanner_admin_database_v1_common_proto_msgTypes[1]
- if protoimpl.UnsafeEnabled && x != nil {
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- if ms.LoadMessageInfo() == nil {
- ms.StoreMessageInfo(mi)
- }
- return ms
- }
- return mi.MessageOf(x)
-}
-
-// Deprecated: Use EncryptionConfig.ProtoReflect.Descriptor instead.
-func (*EncryptionConfig) Descriptor() ([]byte, []int) {
- return file_google_spanner_admin_database_v1_common_proto_rawDescGZIP(), []int{1}
-}
-
-func (x *EncryptionConfig) GetKmsKeyName() string {
- if x != nil {
- return x.KmsKeyName
- }
- return ""
-}
-
-func (x *EncryptionConfig) GetKmsKeyNames() []string {
- if x != nil {
- return x.KmsKeyNames
- }
- return nil
-}
-
-// Encryption information for a Cloud Spanner database or backup.
-type EncryptionInfo struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
- // Output only. The type of encryption.
- EncryptionType EncryptionInfo_Type `protobuf:"varint,3,opt,name=encryption_type,json=encryptionType,proto3,enum=google.spanner.admin.database.v1.EncryptionInfo_Type" json:"encryption_type,omitempty"`
- // Output only. If present, the status of a recent encrypt/decrypt call on
- // underlying data for this database or backup. Regardless of status, data is
- // always encrypted at rest.
- EncryptionStatus *status.Status `protobuf:"bytes,4,opt,name=encryption_status,json=encryptionStatus,proto3" json:"encryption_status,omitempty"`
- // Output only. A Cloud KMS key version that is being used to protect the
- // database or backup.
- KmsKeyVersion string `protobuf:"bytes,2,opt,name=kms_key_version,json=kmsKeyVersion,proto3" json:"kms_key_version,omitempty"`
-}
-
-func (x *EncryptionInfo) Reset() {
- *x = EncryptionInfo{}
- if protoimpl.UnsafeEnabled {
- mi := &file_google_spanner_admin_database_v1_common_proto_msgTypes[2]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
-}
-
-func (x *EncryptionInfo) String() string {
- return protoimpl.X.MessageStringOf(x)
-}
-
-func (*EncryptionInfo) ProtoMessage() {}
-
-func (x *EncryptionInfo) ProtoReflect() protoreflect.Message {
- mi := &file_google_spanner_admin_database_v1_common_proto_msgTypes[2]
- if protoimpl.UnsafeEnabled && x != nil {
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- if ms.LoadMessageInfo() == nil {
- ms.StoreMessageInfo(mi)
- }
- return ms
- }
- return mi.MessageOf(x)
-}
-
-// Deprecated: Use EncryptionInfo.ProtoReflect.Descriptor instead.
-func (*EncryptionInfo) Descriptor() ([]byte, []int) {
- return file_google_spanner_admin_database_v1_common_proto_rawDescGZIP(), []int{2}
-}
-
-func (x *EncryptionInfo) GetEncryptionType() EncryptionInfo_Type {
- if x != nil {
- return x.EncryptionType
- }
- return EncryptionInfo_TYPE_UNSPECIFIED
-}
-
-func (x *EncryptionInfo) GetEncryptionStatus() *status.Status {
- if x != nil {
- return x.EncryptionStatus
- }
- return nil
-}
-
-func (x *EncryptionInfo) GetKmsKeyVersion() string {
- if x != nil {
- return x.KmsKeyVersion
- }
- return ""
-}
-
-var File_google_spanner_admin_database_v1_common_proto protoreflect.FileDescriptor
-
-var file_google_spanner_admin_database_v1_common_proto_rawDesc = []byte{
- 0x0a, 0x2d, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x73, 0x70, 0x61, 0x6e, 0x6e, 0x65, 0x72,
- 0x2f, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2f, 0x64, 0x61, 0x74, 0x61, 0x62, 0x61, 0x73, 0x65, 0x2f,
- 0x76, 0x31, 0x2f, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12,
- 0x20, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x70, 0x61, 0x6e, 0x6e, 0x65, 0x72, 0x2e,
- 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x64, 0x61, 0x74, 0x61, 0x62, 0x61, 0x73, 0x65, 0x2e, 0x76,
- 0x31, 0x1a, 0x1f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x66, 0x69,
- 0x65, 0x6c, 0x64, 0x5f, 0x62, 0x65, 0x68, 0x61, 0x76, 0x69, 0x6f, 0x72, 0x2e, 0x70, 0x72, 0x6f,
- 0x74, 0x6f, 0x1a, 0x19, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x72,
- 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1f, 0x67,
- 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x74,
- 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x17,
- 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x72, 0x70, 0x63, 0x2f, 0x73, 0x74, 0x61, 0x74, 0x75,
- 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0xb0, 0x01, 0x0a, 0x11, 0x4f, 0x70, 0x65, 0x72,
- 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x50, 0x72, 0x6f, 0x67, 0x72, 0x65, 0x73, 0x73, 0x12, 0x29, 0x0a,
- 0x10, 0x70, 0x72, 0x6f, 0x67, 0x72, 0x65, 0x73, 0x73, 0x5f, 0x70, 0x65, 0x72, 0x63, 0x65, 0x6e,
- 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x05, 0x52, 0x0f, 0x70, 0x72, 0x6f, 0x67, 0x72, 0x65, 0x73,
- 0x73, 0x50, 0x65, 0x72, 0x63, 0x65, 0x6e, 0x74, 0x12, 0x39, 0x0a, 0x0a, 0x73, 0x74, 0x61, 0x72,
- 0x74, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67,
- 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54,
- 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x09, 0x73, 0x74, 0x61, 0x72, 0x74, 0x54,
- 0x69, 0x6d, 0x65, 0x12, 0x35, 0x0a, 0x08, 0x65, 0x6e, 0x64, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x18,
- 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70,
- 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d,
- 0x70, 0x52, 0x07, 0x65, 0x6e, 0x64, 0x54, 0x69, 0x6d, 0x65, 0x22, 0xa8, 0x01, 0x0a, 0x10, 0x45,
- 0x6e, 0x63, 0x72, 0x79, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12,
- 0x48, 0x0a, 0x0c, 0x6b, 0x6d, 0x73, 0x5f, 0x6b, 0x65, 0x79, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18,
- 0x02, 0x20, 0x01, 0x28, 0x09, 0x42, 0x26, 0xfa, 0x41, 0x23, 0x0a, 0x21, 0x63, 0x6c, 0x6f, 0x75,
- 0x64, 0x6b, 0x6d, 0x73, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e,
- 0x63, 0x6f, 0x6d, 0x2f, 0x43, 0x72, 0x79, 0x70, 0x74, 0x6f, 0x4b, 0x65, 0x79, 0x52, 0x0a, 0x6b,
- 0x6d, 0x73, 0x4b, 0x65, 0x79, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x4a, 0x0a, 0x0d, 0x6b, 0x6d, 0x73,
- 0x5f, 0x6b, 0x65, 0x79, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x09,
- 0x42, 0x26, 0xfa, 0x41, 0x23, 0x0a, 0x21, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x6b, 0x6d, 0x73, 0x2e,
- 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x43,
- 0x72, 0x79, 0x70, 0x74, 0x6f, 0x4b, 0x65, 0x79, 0x52, 0x0b, 0x6b, 0x6d, 0x73, 0x4b, 0x65, 0x79,
- 0x4e, 0x61, 0x6d, 0x65, 0x73, 0x22, 0xf3, 0x02, 0x0a, 0x0e, 0x45, 0x6e, 0x63, 0x72, 0x79, 0x70,
- 0x74, 0x69, 0x6f, 0x6e, 0x49, 0x6e, 0x66, 0x6f, 0x12, 0x63, 0x0a, 0x0f, 0x65, 0x6e, 0x63, 0x72,
- 0x79, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28,
- 0x0e, 0x32, 0x35, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x70, 0x61, 0x6e, 0x6e,
- 0x65, 0x72, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x64, 0x61, 0x74, 0x61, 0x62, 0x61, 0x73,
- 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x45, 0x6e, 0x63, 0x72, 0x79, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x49,
- 0x6e, 0x66, 0x6f, 0x2e, 0x54, 0x79, 0x70, 0x65, 0x42, 0x03, 0xe0, 0x41, 0x03, 0x52, 0x0e, 0x65,
- 0x6e, 0x63, 0x72, 0x79, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x54, 0x79, 0x70, 0x65, 0x12, 0x44, 0x0a,
- 0x11, 0x65, 0x6e, 0x63, 0x72, 0x79, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x73, 0x74, 0x61, 0x74,
- 0x75, 0x73, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x12, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c,
- 0x65, 0x2e, 0x72, 0x70, 0x63, 0x2e, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x42, 0x03, 0xe0, 0x41,
- 0x03, 0x52, 0x10, 0x65, 0x6e, 0x63, 0x72, 0x79, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x53, 0x74, 0x61,
- 0x74, 0x75, 0x73, 0x12, 0x58, 0x0a, 0x0f, 0x6b, 0x6d, 0x73, 0x5f, 0x6b, 0x65, 0x79, 0x5f, 0x76,
- 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x42, 0x30, 0xe0, 0x41,
- 0x03, 0xfa, 0x41, 0x2a, 0x0a, 0x28, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x6b, 0x6d, 0x73, 0x2e, 0x67,
- 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x43, 0x72,
- 0x79, 0x70, 0x74, 0x6f, 0x4b, 0x65, 0x79, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x0d,
- 0x6b, 0x6d, 0x73, 0x4b, 0x65, 0x79, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x22, 0x5c, 0x0a,
- 0x04, 0x54, 0x79, 0x70, 0x65, 0x12, 0x14, 0x0a, 0x10, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x55, 0x4e,
- 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, 0x49, 0x45, 0x44, 0x10, 0x00, 0x12, 0x1d, 0x0a, 0x19, 0x47,
- 0x4f, 0x4f, 0x47, 0x4c, 0x45, 0x5f, 0x44, 0x45, 0x46, 0x41, 0x55, 0x4c, 0x54, 0x5f, 0x45, 0x4e,
- 0x43, 0x52, 0x59, 0x50, 0x54, 0x49, 0x4f, 0x4e, 0x10, 0x01, 0x12, 0x1f, 0x0a, 0x1b, 0x43, 0x55,
- 0x53, 0x54, 0x4f, 0x4d, 0x45, 0x52, 0x5f, 0x4d, 0x41, 0x4e, 0x41, 0x47, 0x45, 0x44, 0x5f, 0x45,
- 0x4e, 0x43, 0x52, 0x59, 0x50, 0x54, 0x49, 0x4f, 0x4e, 0x10, 0x02, 0x2a, 0x5c, 0x0a, 0x0f, 0x44,
- 0x61, 0x74, 0x61, 0x62, 0x61, 0x73, 0x65, 0x44, 0x69, 0x61, 0x6c, 0x65, 0x63, 0x74, 0x12, 0x20,
- 0x0a, 0x1c, 0x44, 0x41, 0x54, 0x41, 0x42, 0x41, 0x53, 0x45, 0x5f, 0x44, 0x49, 0x41, 0x4c, 0x45,
- 0x43, 0x54, 0x5f, 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, 0x49, 0x45, 0x44, 0x10, 0x00,
- 0x12, 0x17, 0x0a, 0x13, 0x47, 0x4f, 0x4f, 0x47, 0x4c, 0x45, 0x5f, 0x53, 0x54, 0x41, 0x4e, 0x44,
- 0x41, 0x52, 0x44, 0x5f, 0x53, 0x51, 0x4c, 0x10, 0x01, 0x12, 0x0e, 0x0a, 0x0a, 0x50, 0x4f, 0x53,
- 0x54, 0x47, 0x52, 0x45, 0x53, 0x51, 0x4c, 0x10, 0x02, 0x42, 0xa2, 0x04, 0xea, 0x41, 0x78, 0x0a,
- 0x21, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x6b, 0x6d, 0x73, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65,
- 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x43, 0x72, 0x79, 0x70, 0x74, 0x6f, 0x4b,
- 0x65, 0x79, 0x12, 0x53, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x7b, 0x70, 0x72,
- 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x7d, 0x2f, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73,
- 0x2f, 0x7b, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x7d, 0x2f, 0x6b, 0x65, 0x79, 0x52,
- 0x69, 0x6e, 0x67, 0x73, 0x2f, 0x7b, 0x6b, 0x65, 0x79, 0x5f, 0x72, 0x69, 0x6e, 0x67, 0x7d, 0x2f,
- 0x63, 0x72, 0x79, 0x70, 0x74, 0x6f, 0x4b, 0x65, 0x79, 0x73, 0x2f, 0x7b, 0x63, 0x72, 0x79, 0x70,
- 0x74, 0x6f, 0x5f, 0x6b, 0x65, 0x79, 0x7d, 0xea, 0x41, 0xa6, 0x01, 0x0a, 0x28, 0x63, 0x6c, 0x6f,
- 0x75, 0x64, 0x6b, 0x6d, 0x73, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73,
- 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x43, 0x72, 0x79, 0x70, 0x74, 0x6f, 0x4b, 0x65, 0x79, 0x56, 0x65,
- 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x7a, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f,
- 0x7b, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x7d, 0x2f, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x69,
- 0x6f, 0x6e, 0x73, 0x2f, 0x7b, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x7d, 0x2f, 0x6b,
- 0x65, 0x79, 0x52, 0x69, 0x6e, 0x67, 0x73, 0x2f, 0x7b, 0x6b, 0x65, 0x79, 0x5f, 0x72, 0x69, 0x6e,
- 0x67, 0x7d, 0x2f, 0x63, 0x72, 0x79, 0x70, 0x74, 0x6f, 0x4b, 0x65, 0x79, 0x73, 0x2f, 0x7b, 0x63,
- 0x72, 0x79, 0x70, 0x74, 0x6f, 0x5f, 0x6b, 0x65, 0x79, 0x7d, 0x2f, 0x63, 0x72, 0x79, 0x70, 0x74,
- 0x6f, 0x4b, 0x65, 0x79, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x7b, 0x63, 0x72,
- 0x79, 0x70, 0x74, 0x6f, 0x5f, 0x6b, 0x65, 0x79, 0x5f, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e,
- 0x7d, 0x0a, 0x24, 0x63, 0x6f, 0x6d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x70,
- 0x61, 0x6e, 0x6e, 0x65, 0x72, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x64, 0x61, 0x74, 0x61,
- 0x62, 0x61, 0x73, 0x65, 0x2e, 0x76, 0x31, 0x42, 0x0b, 0x43, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x50,
- 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x46, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x67, 0x6f,
- 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x67, 0x6f, 0x2f, 0x73, 0x70, 0x61, 0x6e,
- 0x6e, 0x65, 0x72, 0x2f, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2f, 0x64, 0x61, 0x74, 0x61, 0x62, 0x61,
- 0x73, 0x65, 0x2f, 0x61, 0x70, 0x69, 0x76, 0x31, 0x2f, 0x64, 0x61, 0x74, 0x61, 0x62, 0x61, 0x73,
- 0x65, 0x70, 0x62, 0x3b, 0x64, 0x61, 0x74, 0x61, 0x62, 0x61, 0x73, 0x65, 0x70, 0x62, 0xaa, 0x02,
- 0x26, 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x43, 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x53, 0x70,
- 0x61, 0x6e, 0x6e, 0x65, 0x72, 0x2e, 0x41, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x44, 0x61, 0x74, 0x61,
- 0x62, 0x61, 0x73, 0x65, 0x2e, 0x56, 0x31, 0xca, 0x02, 0x26, 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65,
- 0x5c, 0x43, 0x6c, 0x6f, 0x75, 0x64, 0x5c, 0x53, 0x70, 0x61, 0x6e, 0x6e, 0x65, 0x72, 0x5c, 0x41,
- 0x64, 0x6d, 0x69, 0x6e, 0x5c, 0x44, 0x61, 0x74, 0x61, 0x62, 0x61, 0x73, 0x65, 0x5c, 0x56, 0x31,
- 0xea, 0x02, 0x2b, 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x3a, 0x3a, 0x43, 0x6c, 0x6f, 0x75, 0x64,
- 0x3a, 0x3a, 0x53, 0x70, 0x61, 0x6e, 0x6e, 0x65, 0x72, 0x3a, 0x3a, 0x41, 0x64, 0x6d, 0x69, 0x6e,
- 0x3a, 0x3a, 0x44, 0x61, 0x74, 0x61, 0x62, 0x61, 0x73, 0x65, 0x3a, 0x3a, 0x56, 0x31, 0x62, 0x06,
- 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33,
-}
-
-var (
- file_google_spanner_admin_database_v1_common_proto_rawDescOnce sync.Once
- file_google_spanner_admin_database_v1_common_proto_rawDescData = file_google_spanner_admin_database_v1_common_proto_rawDesc
-)
-
-func file_google_spanner_admin_database_v1_common_proto_rawDescGZIP() []byte {
- file_google_spanner_admin_database_v1_common_proto_rawDescOnce.Do(func() {
- file_google_spanner_admin_database_v1_common_proto_rawDescData = protoimpl.X.CompressGZIP(file_google_spanner_admin_database_v1_common_proto_rawDescData)
- })
- return file_google_spanner_admin_database_v1_common_proto_rawDescData
-}
-
-var file_google_spanner_admin_database_v1_common_proto_enumTypes = make([]protoimpl.EnumInfo, 2)
-var file_google_spanner_admin_database_v1_common_proto_msgTypes = make([]protoimpl.MessageInfo, 3)
-var file_google_spanner_admin_database_v1_common_proto_goTypes = []any{
- (DatabaseDialect)(0), // 0: google.spanner.admin.database.v1.DatabaseDialect
- (EncryptionInfo_Type)(0), // 1: google.spanner.admin.database.v1.EncryptionInfo.Type
- (*OperationProgress)(nil), // 2: google.spanner.admin.database.v1.OperationProgress
- (*EncryptionConfig)(nil), // 3: google.spanner.admin.database.v1.EncryptionConfig
- (*EncryptionInfo)(nil), // 4: google.spanner.admin.database.v1.EncryptionInfo
- (*timestamppb.Timestamp)(nil), // 5: google.protobuf.Timestamp
- (*status.Status)(nil), // 6: google.rpc.Status
-}
-var file_google_spanner_admin_database_v1_common_proto_depIdxs = []int32{
- 5, // 0: google.spanner.admin.database.v1.OperationProgress.start_time:type_name -> google.protobuf.Timestamp
- 5, // 1: google.spanner.admin.database.v1.OperationProgress.end_time:type_name -> google.protobuf.Timestamp
- 1, // 2: google.spanner.admin.database.v1.EncryptionInfo.encryption_type:type_name -> google.spanner.admin.database.v1.EncryptionInfo.Type
- 6, // 3: google.spanner.admin.database.v1.EncryptionInfo.encryption_status:type_name -> google.rpc.Status
- 4, // [4:4] is the sub-list for method output_type
- 4, // [4:4] is the sub-list for method input_type
- 4, // [4:4] is the sub-list for extension type_name
- 4, // [4:4] is the sub-list for extension extendee
- 0, // [0:4] is the sub-list for field type_name
-}
-
-func init() { file_google_spanner_admin_database_v1_common_proto_init() }
-func file_google_spanner_admin_database_v1_common_proto_init() {
- if File_google_spanner_admin_database_v1_common_proto != nil {
- return
- }
- if !protoimpl.UnsafeEnabled {
- file_google_spanner_admin_database_v1_common_proto_msgTypes[0].Exporter = func(v any, i int) any {
- switch v := v.(*OperationProgress); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_google_spanner_admin_database_v1_common_proto_msgTypes[1].Exporter = func(v any, i int) any {
- switch v := v.(*EncryptionConfig); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_google_spanner_admin_database_v1_common_proto_msgTypes[2].Exporter = func(v any, i int) any {
- switch v := v.(*EncryptionInfo); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- }
- type x struct{}
- out := protoimpl.TypeBuilder{
- File: protoimpl.DescBuilder{
- GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
- RawDescriptor: file_google_spanner_admin_database_v1_common_proto_rawDesc,
- NumEnums: 2,
- NumMessages: 3,
- NumExtensions: 0,
- NumServices: 0,
- },
- GoTypes: file_google_spanner_admin_database_v1_common_proto_goTypes,
- DependencyIndexes: file_google_spanner_admin_database_v1_common_proto_depIdxs,
- EnumInfos: file_google_spanner_admin_database_v1_common_proto_enumTypes,
- MessageInfos: file_google_spanner_admin_database_v1_common_proto_msgTypes,
- }.Build()
- File_google_spanner_admin_database_v1_common_proto = out.File
- file_google_spanner_admin_database_v1_common_proto_rawDesc = nil
- file_google_spanner_admin_database_v1_common_proto_goTypes = nil
- file_google_spanner_admin_database_v1_common_proto_depIdxs = nil
-}
diff --git a/vendor/cloud.google.com/go/spanner/admin/database/apiv1/databasepb/spanner_database_admin.pb.go b/vendor/cloud.google.com/go/spanner/admin/database/apiv1/databasepb/spanner_database_admin.pb.go
deleted file mode 100644
index 2be6d9793..000000000
--- a/vendor/cloud.google.com/go/spanner/admin/database/apiv1/databasepb/spanner_database_admin.pb.go
+++ /dev/null
@@ -1,4807 +0,0 @@
-// Copyright 2024 Google LLC
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-// Code generated by protoc-gen-go. DO NOT EDIT.
-// versions:
-// protoc-gen-go v1.34.2
-// protoc v4.25.3
-// source: google/spanner/admin/database/v1/spanner_database_admin.proto
-
-package databasepb
-
-import (
- context "context"
- reflect "reflect"
- sync "sync"
-
- iampb "cloud.google.com/go/iam/apiv1/iampb"
- longrunningpb "cloud.google.com/go/longrunning/autogen/longrunningpb"
- _ "google.golang.org/genproto/googleapis/api/annotations"
- grpc "google.golang.org/grpc"
- codes "google.golang.org/grpc/codes"
- status "google.golang.org/grpc/status"
- protoreflect "google.golang.org/protobuf/reflect/protoreflect"
- protoimpl "google.golang.org/protobuf/runtime/protoimpl"
- emptypb "google.golang.org/protobuf/types/known/emptypb"
- fieldmaskpb "google.golang.org/protobuf/types/known/fieldmaskpb"
- timestamppb "google.golang.org/protobuf/types/known/timestamppb"
-)
-
-const (
- // Verify that this generated code is sufficiently up-to-date.
- _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion)
- // Verify that runtime/protoimpl is sufficiently up-to-date.
- _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20)
-)
-
-// Indicates the type of the restore source.
-type RestoreSourceType int32
-
-const (
- // No restore associated.
- RestoreSourceType_TYPE_UNSPECIFIED RestoreSourceType = 0
- // A backup was used as the source of the restore.
- RestoreSourceType_BACKUP RestoreSourceType = 1
-)
-
-// Enum value maps for RestoreSourceType.
-var (
- RestoreSourceType_name = map[int32]string{
- 0: "TYPE_UNSPECIFIED",
- 1: "BACKUP",
- }
- RestoreSourceType_value = map[string]int32{
- "TYPE_UNSPECIFIED": 0,
- "BACKUP": 1,
- }
-)
-
-func (x RestoreSourceType) Enum() *RestoreSourceType {
- p := new(RestoreSourceType)
- *p = x
- return p
-}
-
-func (x RestoreSourceType) String() string {
- return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x))
-}
-
-func (RestoreSourceType) Descriptor() protoreflect.EnumDescriptor {
- return file_google_spanner_admin_database_v1_spanner_database_admin_proto_enumTypes[0].Descriptor()
-}
-
-func (RestoreSourceType) Type() protoreflect.EnumType {
- return &file_google_spanner_admin_database_v1_spanner_database_admin_proto_enumTypes[0]
-}
-
-func (x RestoreSourceType) Number() protoreflect.EnumNumber {
- return protoreflect.EnumNumber(x)
-}
-
-// Deprecated: Use RestoreSourceType.Descriptor instead.
-func (RestoreSourceType) EnumDescriptor() ([]byte, []int) {
- return file_google_spanner_admin_database_v1_spanner_database_admin_proto_rawDescGZIP(), []int{0}
-}
-
-// Indicates the current state of the database.
-type Database_State int32
-
-const (
- // Not specified.
- Database_STATE_UNSPECIFIED Database_State = 0
- // The database is still being created. Operations on the database may fail
- // with `FAILED_PRECONDITION` in this state.
- Database_CREATING Database_State = 1
- // The database is fully created and ready for use.
- Database_READY Database_State = 2
- // The database is fully created and ready for use, but is still
- // being optimized for performance and cannot handle full load.
- //
- // In this state, the database still references the backup
- // it was restore from, preventing the backup
- // from being deleted. When optimizations are complete, the full performance
- // of the database will be restored, and the database will transition to
- // `READY` state.
- Database_READY_OPTIMIZING Database_State = 3
-)
-
-// Enum value maps for Database_State.
-var (
- Database_State_name = map[int32]string{
- 0: "STATE_UNSPECIFIED",
- 1: "CREATING",
- 2: "READY",
- 3: "READY_OPTIMIZING",
- }
- Database_State_value = map[string]int32{
- "STATE_UNSPECIFIED": 0,
- "CREATING": 1,
- "READY": 2,
- "READY_OPTIMIZING": 3,
- }
-)
-
-func (x Database_State) Enum() *Database_State {
- p := new(Database_State)
- *p = x
- return p
-}
-
-func (x Database_State) String() string {
- return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x))
-}
-
-func (Database_State) Descriptor() protoreflect.EnumDescriptor {
- return file_google_spanner_admin_database_v1_spanner_database_admin_proto_enumTypes[1].Descriptor()
-}
-
-func (Database_State) Type() protoreflect.EnumType {
- return &file_google_spanner_admin_database_v1_spanner_database_admin_proto_enumTypes[1]
-}
-
-func (x Database_State) Number() protoreflect.EnumNumber {
- return protoreflect.EnumNumber(x)
-}
-
-// Deprecated: Use Database_State.Descriptor instead.
-func (Database_State) EnumDescriptor() ([]byte, []int) {
- return file_google_spanner_admin_database_v1_spanner_database_admin_proto_rawDescGZIP(), []int{1, 0}
-}
-
-// Encryption types for the database to be restored.
-type RestoreDatabaseEncryptionConfig_EncryptionType int32
-
-const (
- // Unspecified. Do not use.
- RestoreDatabaseEncryptionConfig_ENCRYPTION_TYPE_UNSPECIFIED RestoreDatabaseEncryptionConfig_EncryptionType = 0
- // This is the default option when
- // [encryption_config][google.spanner.admin.database.v1.RestoreDatabaseEncryptionConfig]
- // is not specified.
- RestoreDatabaseEncryptionConfig_USE_CONFIG_DEFAULT_OR_BACKUP_ENCRYPTION RestoreDatabaseEncryptionConfig_EncryptionType = 1
- // Use Google default encryption.
- RestoreDatabaseEncryptionConfig_GOOGLE_DEFAULT_ENCRYPTION RestoreDatabaseEncryptionConfig_EncryptionType = 2
- // Use customer managed encryption. If specified, `kms_key_name` must
- // must contain a valid Cloud KMS key.
- RestoreDatabaseEncryptionConfig_CUSTOMER_MANAGED_ENCRYPTION RestoreDatabaseEncryptionConfig_EncryptionType = 3
-)
-
-// Enum value maps for RestoreDatabaseEncryptionConfig_EncryptionType.
-var (
- RestoreDatabaseEncryptionConfig_EncryptionType_name = map[int32]string{
- 0: "ENCRYPTION_TYPE_UNSPECIFIED",
- 1: "USE_CONFIG_DEFAULT_OR_BACKUP_ENCRYPTION",
- 2: "GOOGLE_DEFAULT_ENCRYPTION",
- 3: "CUSTOMER_MANAGED_ENCRYPTION",
- }
- RestoreDatabaseEncryptionConfig_EncryptionType_value = map[string]int32{
- "ENCRYPTION_TYPE_UNSPECIFIED": 0,
- "USE_CONFIG_DEFAULT_OR_BACKUP_ENCRYPTION": 1,
- "GOOGLE_DEFAULT_ENCRYPTION": 2,
- "CUSTOMER_MANAGED_ENCRYPTION": 3,
- }
-)
-
-func (x RestoreDatabaseEncryptionConfig_EncryptionType) Enum() *RestoreDatabaseEncryptionConfig_EncryptionType {
- p := new(RestoreDatabaseEncryptionConfig_EncryptionType)
- *p = x
- return p
-}
-
-func (x RestoreDatabaseEncryptionConfig_EncryptionType) String() string {
- return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x))
-}
-
-func (RestoreDatabaseEncryptionConfig_EncryptionType) Descriptor() protoreflect.EnumDescriptor {
- return file_google_spanner_admin_database_v1_spanner_database_admin_proto_enumTypes[2].Descriptor()
-}
-
-func (RestoreDatabaseEncryptionConfig_EncryptionType) Type() protoreflect.EnumType {
- return &file_google_spanner_admin_database_v1_spanner_database_admin_proto_enumTypes[2]
-}
-
-func (x RestoreDatabaseEncryptionConfig_EncryptionType) Number() protoreflect.EnumNumber {
- return protoreflect.EnumNumber(x)
-}
-
-// Deprecated: Use RestoreDatabaseEncryptionConfig_EncryptionType.Descriptor instead.
-func (RestoreDatabaseEncryptionConfig_EncryptionType) EnumDescriptor() ([]byte, []int) {
- return file_google_spanner_admin_database_v1_spanner_database_admin_proto_rawDescGZIP(), []int{18, 0}
-}
-
-// Information about the database restore.
-type RestoreInfo struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
- // The type of the restore source.
- SourceType RestoreSourceType `protobuf:"varint,1,opt,name=source_type,json=sourceType,proto3,enum=google.spanner.admin.database.v1.RestoreSourceType" json:"source_type,omitempty"`
- // Information about the source used to restore the database.
- //
- // Types that are assignable to SourceInfo:
- //
- // *RestoreInfo_BackupInfo
- SourceInfo isRestoreInfo_SourceInfo `protobuf_oneof:"source_info"`
-}
-
-func (x *RestoreInfo) Reset() {
- *x = RestoreInfo{}
- if protoimpl.UnsafeEnabled {
- mi := &file_google_spanner_admin_database_v1_spanner_database_admin_proto_msgTypes[0]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
-}
-
-func (x *RestoreInfo) String() string {
- return protoimpl.X.MessageStringOf(x)
-}
-
-func (*RestoreInfo) ProtoMessage() {}
-
-func (x *RestoreInfo) ProtoReflect() protoreflect.Message {
- mi := &file_google_spanner_admin_database_v1_spanner_database_admin_proto_msgTypes[0]
- if protoimpl.UnsafeEnabled && x != nil {
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- if ms.LoadMessageInfo() == nil {
- ms.StoreMessageInfo(mi)
- }
- return ms
- }
- return mi.MessageOf(x)
-}
-
-// Deprecated: Use RestoreInfo.ProtoReflect.Descriptor instead.
-func (*RestoreInfo) Descriptor() ([]byte, []int) {
- return file_google_spanner_admin_database_v1_spanner_database_admin_proto_rawDescGZIP(), []int{0}
-}
-
-func (x *RestoreInfo) GetSourceType() RestoreSourceType {
- if x != nil {
- return x.SourceType
- }
- return RestoreSourceType_TYPE_UNSPECIFIED
-}
-
-func (m *RestoreInfo) GetSourceInfo() isRestoreInfo_SourceInfo {
- if m != nil {
- return m.SourceInfo
- }
- return nil
-}
-
-func (x *RestoreInfo) GetBackupInfo() *BackupInfo {
- if x, ok := x.GetSourceInfo().(*RestoreInfo_BackupInfo); ok {
- return x.BackupInfo
- }
- return nil
-}
-
-type isRestoreInfo_SourceInfo interface {
- isRestoreInfo_SourceInfo()
-}
-
-type RestoreInfo_BackupInfo struct {
- // Information about the backup used to restore the database. The backup
- // may no longer exist.
- BackupInfo *BackupInfo `protobuf:"bytes,2,opt,name=backup_info,json=backupInfo,proto3,oneof"`
-}
-
-func (*RestoreInfo_BackupInfo) isRestoreInfo_SourceInfo() {}
-
-// A Cloud Spanner database.
-type Database struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
- // Required. The name of the database. Values are of the form
- // `projects/<project>/instances/<instance>/databases/<database>`,
- // where `<database>` is as specified in the `CREATE DATABASE`
- // statement. This name can be passed to other API methods to
- // identify the database.
- Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"`
- // Output only. The current database state.
- State Database_State `protobuf:"varint,2,opt,name=state,proto3,enum=google.spanner.admin.database.v1.Database_State" json:"state,omitempty"`
- // Output only. If exists, the time at which the database creation started.
- CreateTime *timestamppb.Timestamp `protobuf:"bytes,3,opt,name=create_time,json=createTime,proto3" json:"create_time,omitempty"`
- // Output only. Applicable only for restored databases. Contains information
- // about the restore source.
- RestoreInfo *RestoreInfo `protobuf:"bytes,4,opt,name=restore_info,json=restoreInfo,proto3" json:"restore_info,omitempty"`
- // Output only. For databases that are using customer managed encryption, this
- // field contains the encryption configuration for the database.
- // For databases that are using Google default or other types of encryption,
- // this field is empty.
- EncryptionConfig *EncryptionConfig `protobuf:"bytes,5,opt,name=encryption_config,json=encryptionConfig,proto3" json:"encryption_config,omitempty"`
- // Output only. For databases that are using customer managed encryption, this
- // field contains the encryption information for the database, such as
- // all Cloud KMS key versions that are in use. The `encryption_status' field
- // inside of each `EncryptionInfo` is not populated.
- //
- // For databases that are using Google default or other types of encryption,
- // this field is empty.
- //
- // This field is propagated lazily from the backend. There might be a delay
- // from when a key version is being used and when it appears in this field.
- EncryptionInfo []*EncryptionInfo `protobuf:"bytes,8,rep,name=encryption_info,json=encryptionInfo,proto3" json:"encryption_info,omitempty"`
- // Output only. The period in which Cloud Spanner retains all versions of data
- // for the database. This is the same as the value of version_retention_period
- // database option set using
- // [UpdateDatabaseDdl][google.spanner.admin.database.v1.DatabaseAdmin.UpdateDatabaseDdl].
- // Defaults to 1 hour, if not set.
- VersionRetentionPeriod string `protobuf:"bytes,6,opt,name=version_retention_period,json=versionRetentionPeriod,proto3" json:"version_retention_period,omitempty"`
- // Output only. Earliest timestamp at which older versions of the data can be
- // read. This value is continuously updated by Cloud Spanner and becomes stale
- // the moment it is queried. If you are using this value to recover data, make
- // sure to account for the time from the moment when the value is queried to
- // the moment when you initiate the recovery.
- EarliestVersionTime *timestamppb.Timestamp `protobuf:"bytes,7,opt,name=earliest_version_time,json=earliestVersionTime,proto3" json:"earliest_version_time,omitempty"`
- // Output only. The read-write region which contains the database's leader
- // replicas.
- //
- // This is the same as the value of default_leader
- // database option set using DatabaseAdmin.CreateDatabase or
- // DatabaseAdmin.UpdateDatabaseDdl. If not explicitly set, this is empty.
- DefaultLeader string `protobuf:"bytes,9,opt,name=default_leader,json=defaultLeader,proto3" json:"default_leader,omitempty"`
- // Output only. The dialect of the Cloud Spanner Database.
- DatabaseDialect DatabaseDialect `protobuf:"varint,10,opt,name=database_dialect,json=databaseDialect,proto3,enum=google.spanner.admin.database.v1.DatabaseDialect" json:"database_dialect,omitempty"`
- // Whether drop protection is enabled for this database. Defaults to false,
- // if not set. For more details, please see how to [prevent accidental
- // database
- // deletion](https://cloud.google.com/spanner/docs/prevent-database-deletion).
- EnableDropProtection bool `protobuf:"varint,11,opt,name=enable_drop_protection,json=enableDropProtection,proto3" json:"enable_drop_protection,omitempty"`
- // Output only. If true, the database is being updated. If false, there are no
- // ongoing update operations for the database.
- Reconciling bool `protobuf:"varint,12,opt,name=reconciling,proto3" json:"reconciling,omitempty"`
-}
-
-func (x *Database) Reset() {
- *x = Database{}
- if protoimpl.UnsafeEnabled {
- mi := &file_google_spanner_admin_database_v1_spanner_database_admin_proto_msgTypes[1]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
-}
-
-func (x *Database) String() string {
- return protoimpl.X.MessageStringOf(x)
-}
-
-func (*Database) ProtoMessage() {}
-
-func (x *Database) ProtoReflect() protoreflect.Message {
- mi := &file_google_spanner_admin_database_v1_spanner_database_admin_proto_msgTypes[1]
- if protoimpl.UnsafeEnabled && x != nil {
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- if ms.LoadMessageInfo() == nil {
- ms.StoreMessageInfo(mi)
- }
- return ms
- }
- return mi.MessageOf(x)
-}
-
-// Deprecated: Use Database.ProtoReflect.Descriptor instead.
-func (*Database) Descriptor() ([]byte, []int) {
- return file_google_spanner_admin_database_v1_spanner_database_admin_proto_rawDescGZIP(), []int{1}
-}
-
-func (x *Database) GetName() string {
- if x != nil {
- return x.Name
- }
- return ""
-}
-
-func (x *Database) GetState() Database_State {
- if x != nil {
- return x.State
- }
- return Database_STATE_UNSPECIFIED
-}
-
-func (x *Database) GetCreateTime() *timestamppb.Timestamp {
- if x != nil {
- return x.CreateTime
- }
- return nil
-}
-
-func (x *Database) GetRestoreInfo() *RestoreInfo {
- if x != nil {
- return x.RestoreInfo
- }
- return nil
-}
-
-func (x *Database) GetEncryptionConfig() *EncryptionConfig {
- if x != nil {
- return x.EncryptionConfig
- }
- return nil
-}
-
-func (x *Database) GetEncryptionInfo() []*EncryptionInfo {
- if x != nil {
- return x.EncryptionInfo
- }
- return nil
-}
-
-func (x *Database) GetVersionRetentionPeriod() string {
- if x != nil {
- return x.VersionRetentionPeriod
- }
- return ""
-}
-
-func (x *Database) GetEarliestVersionTime() *timestamppb.Timestamp {
- if x != nil {
- return x.EarliestVersionTime
- }
- return nil
-}
-
-func (x *Database) GetDefaultLeader() string {
- if x != nil {
- return x.DefaultLeader
- }
- return ""
-}
-
-func (x *Database) GetDatabaseDialect() DatabaseDialect {
- if x != nil {
- return x.DatabaseDialect
- }
- return DatabaseDialect_DATABASE_DIALECT_UNSPECIFIED
-}
-
-func (x *Database) GetEnableDropProtection() bool {
- if x != nil {
- return x.EnableDropProtection
- }
- return false
-}
-
-func (x *Database) GetReconciling() bool {
- if x != nil {
- return x.Reconciling
- }
- return false
-}
-
-// The request for
-// [ListDatabases][google.spanner.admin.database.v1.DatabaseAdmin.ListDatabases].
-type ListDatabasesRequest struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
- // Required. The instance whose databases should be listed.
- // Values are of the form `projects/<project>/instances/<instance>`.
- Parent string `protobuf:"bytes,1,opt,name=parent,proto3" json:"parent,omitempty"`
- // Number of databases to be returned in the response. If 0 or less,
- // defaults to the server's maximum allowed page size.
- PageSize int32 `protobuf:"varint,3,opt,name=page_size,json=pageSize,proto3" json:"page_size,omitempty"`
- // If non-empty, `page_token` should contain a
- // [next_page_token][google.spanner.admin.database.v1.ListDatabasesResponse.next_page_token]
- // from a previous
- // [ListDatabasesResponse][google.spanner.admin.database.v1.ListDatabasesResponse].
- PageToken string `protobuf:"bytes,4,opt,name=page_token,json=pageToken,proto3" json:"page_token,omitempty"`
-}
-
-func (x *ListDatabasesRequest) Reset() {
- *x = ListDatabasesRequest{}
- if protoimpl.UnsafeEnabled {
- mi := &file_google_spanner_admin_database_v1_spanner_database_admin_proto_msgTypes[2]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
-}
-
-func (x *ListDatabasesRequest) String() string {
- return protoimpl.X.MessageStringOf(x)
-}
-
-func (*ListDatabasesRequest) ProtoMessage() {}
-
-func (x *ListDatabasesRequest) ProtoReflect() protoreflect.Message {
- mi := &file_google_spanner_admin_database_v1_spanner_database_admin_proto_msgTypes[2]
- if protoimpl.UnsafeEnabled && x != nil {
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- if ms.LoadMessageInfo() == nil {
- ms.StoreMessageInfo(mi)
- }
- return ms
- }
- return mi.MessageOf(x)
-}
-
-// Deprecated: Use ListDatabasesRequest.ProtoReflect.Descriptor instead.
-func (*ListDatabasesRequest) Descriptor() ([]byte, []int) {
- return file_google_spanner_admin_database_v1_spanner_database_admin_proto_rawDescGZIP(), []int{2}
-}
-
-func (x *ListDatabasesRequest) GetParent() string {
- if x != nil {
- return x.Parent
- }
- return ""
-}
-
-func (x *ListDatabasesRequest) GetPageSize() int32 {
- if x != nil {
- return x.PageSize
- }
- return 0
-}
-
-func (x *ListDatabasesRequest) GetPageToken() string {
- if x != nil {
- return x.PageToken
- }
- return ""
-}
-
-// The response for
-// [ListDatabases][google.spanner.admin.database.v1.DatabaseAdmin.ListDatabases].
-type ListDatabasesResponse struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
- // Databases that matched the request.
- Databases []*Database `protobuf:"bytes,1,rep,name=databases,proto3" json:"databases,omitempty"`
- // `next_page_token` can be sent in a subsequent
- // [ListDatabases][google.spanner.admin.database.v1.DatabaseAdmin.ListDatabases]
- // call to fetch more of the matching databases.
- NextPageToken string `protobuf:"bytes,2,opt,name=next_page_token,json=nextPageToken,proto3" json:"next_page_token,omitempty"`
-}
-
-func (x *ListDatabasesResponse) Reset() {
- *x = ListDatabasesResponse{}
- if protoimpl.UnsafeEnabled {
- mi := &file_google_spanner_admin_database_v1_spanner_database_admin_proto_msgTypes[3]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
-}
-
-func (x *ListDatabasesResponse) String() string {
- return protoimpl.X.MessageStringOf(x)
-}
-
-func (*ListDatabasesResponse) ProtoMessage() {}
-
-func (x *ListDatabasesResponse) ProtoReflect() protoreflect.Message {
- mi := &file_google_spanner_admin_database_v1_spanner_database_admin_proto_msgTypes[3]
- if protoimpl.UnsafeEnabled && x != nil {
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- if ms.LoadMessageInfo() == nil {
- ms.StoreMessageInfo(mi)
- }
- return ms
- }
- return mi.MessageOf(x)
-}
-
-// Deprecated: Use ListDatabasesResponse.ProtoReflect.Descriptor instead.
-func (*ListDatabasesResponse) Descriptor() ([]byte, []int) {
- return file_google_spanner_admin_database_v1_spanner_database_admin_proto_rawDescGZIP(), []int{3}
-}
-
-func (x *ListDatabasesResponse) GetDatabases() []*Database {
- if x != nil {
- return x.Databases
- }
- return nil
-}
-
-func (x *ListDatabasesResponse) GetNextPageToken() string {
- if x != nil {
- return x.NextPageToken
- }
- return ""
-}
-
-// The request for
-// [CreateDatabase][google.spanner.admin.database.v1.DatabaseAdmin.CreateDatabase].
-type CreateDatabaseRequest struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
- // Required. The name of the instance that will serve the new database.
- // Values are of the form `projects/<project>/instances/<instance>`.
- Parent string `protobuf:"bytes,1,opt,name=parent,proto3" json:"parent,omitempty"`
- // Required. A `CREATE DATABASE` statement, which specifies the ID of the
- // new database. The database ID must conform to the regular expression
- // `[a-z][a-z0-9_\-]*[a-z0-9]` and be between 2 and 30 characters in length.
- // If the database ID is a reserved word or if it contains a hyphen, the
- // database ID must be enclosed in backticks (“ ` “).
- CreateStatement string `protobuf:"bytes,2,opt,name=create_statement,json=createStatement,proto3" json:"create_statement,omitempty"`
- // Optional. A list of DDL statements to run inside the newly created
- // database. Statements can create tables, indexes, etc. These
- // statements execute atomically with the creation of the database:
- // if there is an error in any statement, the database is not created.
- ExtraStatements []string `protobuf:"bytes,3,rep,name=extra_statements,json=extraStatements,proto3" json:"extra_statements,omitempty"`
- // Optional. The encryption configuration for the database. If this field is
- // not specified, Cloud Spanner will encrypt/decrypt all data at rest using
- // Google default encryption.
- EncryptionConfig *EncryptionConfig `protobuf:"bytes,4,opt,name=encryption_config,json=encryptionConfig,proto3" json:"encryption_config,omitempty"`
- // Optional. The dialect of the Cloud Spanner Database.
- DatabaseDialect DatabaseDialect `protobuf:"varint,5,opt,name=database_dialect,json=databaseDialect,proto3,enum=google.spanner.admin.database.v1.DatabaseDialect" json:"database_dialect,omitempty"`
- // Optional. Proto descriptors used by CREATE/ALTER PROTO BUNDLE statements in
- // 'extra_statements' above.
- // Contains a protobuf-serialized
- // [google.protobuf.FileDescriptorSet](https://github.com/protocolbuffers/protobuf/blob/main/src/google/protobuf/descriptor.proto).
- // To generate it, [install](https://grpc.io/docs/protoc-installation/) and
- // run `protoc` with --include_imports and --descriptor_set_out. For example,
- // to generate for moon/shot/app.proto, run
- // ```
- //
- // $protoc --proto_path=/app_path --proto_path=/lib_path \
- // --include_imports \
- // --descriptor_set_out=descriptors.data \
- // moon/shot/app.proto
- //
- // ```
- // For more details, see protobuffer [self
- // description](https://developers.google.com/protocol-buffers/docs/techniques#self-description).
- ProtoDescriptors []byte `protobuf:"bytes,6,opt,name=proto_descriptors,json=protoDescriptors,proto3" json:"proto_descriptors,omitempty"`
-}
-
-func (x *CreateDatabaseRequest) Reset() {
- *x = CreateDatabaseRequest{}
- if protoimpl.UnsafeEnabled {
- mi := &file_google_spanner_admin_database_v1_spanner_database_admin_proto_msgTypes[4]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
-}
-
-func (x *CreateDatabaseRequest) String() string {
- return protoimpl.X.MessageStringOf(x)
-}
-
-func (*CreateDatabaseRequest) ProtoMessage() {}
-
-func (x *CreateDatabaseRequest) ProtoReflect() protoreflect.Message {
- mi := &file_google_spanner_admin_database_v1_spanner_database_admin_proto_msgTypes[4]
- if protoimpl.UnsafeEnabled && x != nil {
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- if ms.LoadMessageInfo() == nil {
- ms.StoreMessageInfo(mi)
- }
- return ms
- }
- return mi.MessageOf(x)
-}
-
-// Deprecated: Use CreateDatabaseRequest.ProtoReflect.Descriptor instead.
-func (*CreateDatabaseRequest) Descriptor() ([]byte, []int) {
- return file_google_spanner_admin_database_v1_spanner_database_admin_proto_rawDescGZIP(), []int{4}
-}
-
-func (x *CreateDatabaseRequest) GetParent() string {
- if x != nil {
- return x.Parent
- }
- return ""
-}
-
-func (x *CreateDatabaseRequest) GetCreateStatement() string {
- if x != nil {
- return x.CreateStatement
- }
- return ""
-}
-
-func (x *CreateDatabaseRequest) GetExtraStatements() []string {
- if x != nil {
- return x.ExtraStatements
- }
- return nil
-}
-
-func (x *CreateDatabaseRequest) GetEncryptionConfig() *EncryptionConfig {
- if x != nil {
- return x.EncryptionConfig
- }
- return nil
-}
-
-func (x *CreateDatabaseRequest) GetDatabaseDialect() DatabaseDialect {
- if x != nil {
- return x.DatabaseDialect
- }
- return DatabaseDialect_DATABASE_DIALECT_UNSPECIFIED
-}
-
-func (x *CreateDatabaseRequest) GetProtoDescriptors() []byte {
- if x != nil {
- return x.ProtoDescriptors
- }
- return nil
-}
-
-// Metadata type for the operation returned by
-// [CreateDatabase][google.spanner.admin.database.v1.DatabaseAdmin.CreateDatabase].
-type CreateDatabaseMetadata struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
- // The database being created.
- Database string `protobuf:"bytes,1,opt,name=database,proto3" json:"database,omitempty"`
-}
-
-func (x *CreateDatabaseMetadata) Reset() {
- *x = CreateDatabaseMetadata{}
- if protoimpl.UnsafeEnabled {
- mi := &file_google_spanner_admin_database_v1_spanner_database_admin_proto_msgTypes[5]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
-}
-
-func (x *CreateDatabaseMetadata) String() string {
- return protoimpl.X.MessageStringOf(x)
-}
-
-func (*CreateDatabaseMetadata) ProtoMessage() {}
-
-func (x *CreateDatabaseMetadata) ProtoReflect() protoreflect.Message {
- mi := &file_google_spanner_admin_database_v1_spanner_database_admin_proto_msgTypes[5]
- if protoimpl.UnsafeEnabled && x != nil {
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- if ms.LoadMessageInfo() == nil {
- ms.StoreMessageInfo(mi)
- }
- return ms
- }
- return mi.MessageOf(x)
-}
-
-// Deprecated: Use CreateDatabaseMetadata.ProtoReflect.Descriptor instead.
-func (*CreateDatabaseMetadata) Descriptor() ([]byte, []int) {
- return file_google_spanner_admin_database_v1_spanner_database_admin_proto_rawDescGZIP(), []int{5}
-}
-
-func (x *CreateDatabaseMetadata) GetDatabase() string {
- if x != nil {
- return x.Database
- }
- return ""
-}
-
-// The request for
-// [GetDatabase][google.spanner.admin.database.v1.DatabaseAdmin.GetDatabase].
-type GetDatabaseRequest struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
- // Required. The name of the requested database. Values are of the form
- // `projects/<project>/instances/<instance>/databases/<database>`.
- Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"`
-}
-
-func (x *GetDatabaseRequest) Reset() {
- *x = GetDatabaseRequest{}
- if protoimpl.UnsafeEnabled {
- mi := &file_google_spanner_admin_database_v1_spanner_database_admin_proto_msgTypes[6]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
-}
-
-func (x *GetDatabaseRequest) String() string {
- return protoimpl.X.MessageStringOf(x)
-}
-
-func (*GetDatabaseRequest) ProtoMessage() {}
-
-func (x *GetDatabaseRequest) ProtoReflect() protoreflect.Message {
- mi := &file_google_spanner_admin_database_v1_spanner_database_admin_proto_msgTypes[6]
- if protoimpl.UnsafeEnabled && x != nil {
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- if ms.LoadMessageInfo() == nil {
- ms.StoreMessageInfo(mi)
- }
- return ms
- }
- return mi.MessageOf(x)
-}
-
-// Deprecated: Use GetDatabaseRequest.ProtoReflect.Descriptor instead.
-func (*GetDatabaseRequest) Descriptor() ([]byte, []int) {
- return file_google_spanner_admin_database_v1_spanner_database_admin_proto_rawDescGZIP(), []int{6}
-}
-
-func (x *GetDatabaseRequest) GetName() string {
- if x != nil {
- return x.Name
- }
- return ""
-}
-
-// The request for
-// [UpdateDatabase][google.spanner.admin.database.v1.DatabaseAdmin.UpdateDatabase].
-type UpdateDatabaseRequest struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
- // Required. The database to update.
- // The `name` field of the database is of the form
- // `projects/<project>/instances/<instance>/databases/<database>`.
- Database *Database `protobuf:"bytes,1,opt,name=database,proto3" json:"database,omitempty"`
- // Required. The list of fields to update. Currently, only
- // `enable_drop_protection` field can be updated.
- UpdateMask *fieldmaskpb.FieldMask `protobuf:"bytes,2,opt,name=update_mask,json=updateMask,proto3" json:"update_mask,omitempty"`
-}
-
-func (x *UpdateDatabaseRequest) Reset() {
- *x = UpdateDatabaseRequest{}
- if protoimpl.UnsafeEnabled {
- mi := &file_google_spanner_admin_database_v1_spanner_database_admin_proto_msgTypes[7]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
-}
-
-func (x *UpdateDatabaseRequest) String() string {
- return protoimpl.X.MessageStringOf(x)
-}
-
-func (*UpdateDatabaseRequest) ProtoMessage() {}
-
-func (x *UpdateDatabaseRequest) ProtoReflect() protoreflect.Message {
- mi := &file_google_spanner_admin_database_v1_spanner_database_admin_proto_msgTypes[7]
- if protoimpl.UnsafeEnabled && x != nil {
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- if ms.LoadMessageInfo() == nil {
- ms.StoreMessageInfo(mi)
- }
- return ms
- }
- return mi.MessageOf(x)
-}
-
-// Deprecated: Use UpdateDatabaseRequest.ProtoReflect.Descriptor instead.
-func (*UpdateDatabaseRequest) Descriptor() ([]byte, []int) {
- return file_google_spanner_admin_database_v1_spanner_database_admin_proto_rawDescGZIP(), []int{7}
-}
-
-func (x *UpdateDatabaseRequest) GetDatabase() *Database {
- if x != nil {
- return x.Database
- }
- return nil
-}
-
-func (x *UpdateDatabaseRequest) GetUpdateMask() *fieldmaskpb.FieldMask {
- if x != nil {
- return x.UpdateMask
- }
- return nil
-}
-
-// Metadata type for the operation returned by
-// [UpdateDatabase][google.spanner.admin.database.v1.DatabaseAdmin.UpdateDatabase].
-type UpdateDatabaseMetadata struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
- // The request for
- // [UpdateDatabase][google.spanner.admin.database.v1.DatabaseAdmin.UpdateDatabase].
- Request *UpdateDatabaseRequest `protobuf:"bytes,1,opt,name=request,proto3" json:"request,omitempty"`
- // The progress of the
- // [UpdateDatabase][google.spanner.admin.database.v1.DatabaseAdmin.UpdateDatabase]
- // operation.
- Progress *OperationProgress `protobuf:"bytes,2,opt,name=progress,proto3" json:"progress,omitempty"`
- // The time at which this operation was cancelled. If set, this operation is
- // in the process of undoing itself (which is best-effort).
- CancelTime *timestamppb.Timestamp `protobuf:"bytes,3,opt,name=cancel_time,json=cancelTime,proto3" json:"cancel_time,omitempty"`
-}
-
-func (x *UpdateDatabaseMetadata) Reset() {
- *x = UpdateDatabaseMetadata{}
- if protoimpl.UnsafeEnabled {
- mi := &file_google_spanner_admin_database_v1_spanner_database_admin_proto_msgTypes[8]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
-}
-
-func (x *UpdateDatabaseMetadata) String() string {
- return protoimpl.X.MessageStringOf(x)
-}
-
-func (*UpdateDatabaseMetadata) ProtoMessage() {}
-
-func (x *UpdateDatabaseMetadata) ProtoReflect() protoreflect.Message {
- mi := &file_google_spanner_admin_database_v1_spanner_database_admin_proto_msgTypes[8]
- if protoimpl.UnsafeEnabled && x != nil {
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- if ms.LoadMessageInfo() == nil {
- ms.StoreMessageInfo(mi)
- }
- return ms
- }
- return mi.MessageOf(x)
-}
-
-// Deprecated: Use UpdateDatabaseMetadata.ProtoReflect.Descriptor instead.
-func (*UpdateDatabaseMetadata) Descriptor() ([]byte, []int) {
- return file_google_spanner_admin_database_v1_spanner_database_admin_proto_rawDescGZIP(), []int{8}
-}
-
-func (x *UpdateDatabaseMetadata) GetRequest() *UpdateDatabaseRequest {
- if x != nil {
- return x.Request
- }
- return nil
-}
-
-func (x *UpdateDatabaseMetadata) GetProgress() *OperationProgress {
- if x != nil {
- return x.Progress
- }
- return nil
-}
-
-func (x *UpdateDatabaseMetadata) GetCancelTime() *timestamppb.Timestamp {
- if x != nil {
- return x.CancelTime
- }
- return nil
-}
-
-// Enqueues the given DDL statements to be applied, in order but not
-// necessarily all at once, to the database schema at some point (or
-// points) in the future. The server checks that the statements
-// are executable (syntactically valid, name tables that exist, etc.)
-// before enqueueing them, but they may still fail upon
-// later execution (e.g., if a statement from another batch of
-// statements is applied first and it conflicts in some way, or if
-// there is some data-related problem like a `NULL` value in a column to
-// which `NOT NULL` would be added). If a statement fails, all
-// subsequent statements in the batch are automatically cancelled.
-//
-// Each batch of statements is assigned a name which can be used with
-// the [Operations][google.longrunning.Operations] API to monitor
-// progress. See the
-// [operation_id][google.spanner.admin.database.v1.UpdateDatabaseDdlRequest.operation_id]
-// field for more details.
-type UpdateDatabaseDdlRequest struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
- // Required. The database to update.
- Database string `protobuf:"bytes,1,opt,name=database,proto3" json:"database,omitempty"`
- // Required. DDL statements to be applied to the database.
- Statements []string `protobuf:"bytes,2,rep,name=statements,proto3" json:"statements,omitempty"`
- // If empty, the new update request is assigned an
- // automatically-generated operation ID. Otherwise, `operation_id`
- // is used to construct the name of the resulting
- // [Operation][google.longrunning.Operation].
- //
- // Specifying an explicit operation ID simplifies determining
- // whether the statements were executed in the event that the
- // [UpdateDatabaseDdl][google.spanner.admin.database.v1.DatabaseAdmin.UpdateDatabaseDdl]
- // call is replayed, or the return value is otherwise lost: the
- // [database][google.spanner.admin.database.v1.UpdateDatabaseDdlRequest.database]
- // and `operation_id` fields can be combined to form the
- // [name][google.longrunning.Operation.name] of the resulting
- // [longrunning.Operation][google.longrunning.Operation]:
- // `<database>/operations/<operation_id>`.
- //
- // `operation_id` should be unique within the database, and must be
- // a valid identifier: `[a-z][a-z0-9_]*`. Note that
- // automatically-generated operation IDs always begin with an
- // underscore. If the named operation already exists,
- // [UpdateDatabaseDdl][google.spanner.admin.database.v1.DatabaseAdmin.UpdateDatabaseDdl]
- // returns `ALREADY_EXISTS`.
- OperationId string `protobuf:"bytes,3,opt,name=operation_id,json=operationId,proto3" json:"operation_id,omitempty"`
- // Optional. Proto descriptors used by CREATE/ALTER PROTO BUNDLE statements.
- // Contains a protobuf-serialized
- // [google.protobuf.FileDescriptorSet](https://github.com/protocolbuffers/protobuf/blob/main/src/google/protobuf/descriptor.proto).
- // To generate it, [install](https://grpc.io/docs/protoc-installation/) and
- // run `protoc` with --include_imports and --descriptor_set_out. For example,
- // to generate for moon/shot/app.proto, run
- // ```
- //
- // $protoc --proto_path=/app_path --proto_path=/lib_path \
- // --include_imports \
- // --descriptor_set_out=descriptors.data \
- // moon/shot/app.proto
- //
- // ```
- // For more details, see protobuffer [self
- // description](https://developers.google.com/protocol-buffers/docs/techniques#self-description).
- ProtoDescriptors []byte `protobuf:"bytes,4,opt,name=proto_descriptors,json=protoDescriptors,proto3" json:"proto_descriptors,omitempty"`
-}
-
-func (x *UpdateDatabaseDdlRequest) Reset() {
- *x = UpdateDatabaseDdlRequest{}
- if protoimpl.UnsafeEnabled {
- mi := &file_google_spanner_admin_database_v1_spanner_database_admin_proto_msgTypes[9]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
-}
-
-func (x *UpdateDatabaseDdlRequest) String() string {
- return protoimpl.X.MessageStringOf(x)
-}
-
-func (*UpdateDatabaseDdlRequest) ProtoMessage() {}
-
-func (x *UpdateDatabaseDdlRequest) ProtoReflect() protoreflect.Message {
- mi := &file_google_spanner_admin_database_v1_spanner_database_admin_proto_msgTypes[9]
- if protoimpl.UnsafeEnabled && x != nil {
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- if ms.LoadMessageInfo() == nil {
- ms.StoreMessageInfo(mi)
- }
- return ms
- }
- return mi.MessageOf(x)
-}
-
-// Deprecated: Use UpdateDatabaseDdlRequest.ProtoReflect.Descriptor instead.
-func (*UpdateDatabaseDdlRequest) Descriptor() ([]byte, []int) {
- return file_google_spanner_admin_database_v1_spanner_database_admin_proto_rawDescGZIP(), []int{9}
-}
-
-func (x *UpdateDatabaseDdlRequest) GetDatabase() string {
- if x != nil {
- return x.Database
- }
- return ""
-}
-
-func (x *UpdateDatabaseDdlRequest) GetStatements() []string {
- if x != nil {
- return x.Statements
- }
- return nil
-}
-
-func (x *UpdateDatabaseDdlRequest) GetOperationId() string {
- if x != nil {
- return x.OperationId
- }
- return ""
-}
-
-func (x *UpdateDatabaseDdlRequest) GetProtoDescriptors() []byte {
- if x != nil {
- return x.ProtoDescriptors
- }
- return nil
-}
-
-// Action information extracted from a DDL statement. This proto is used to
-// display the brief info of the DDL statement for the operation
-// [UpdateDatabaseDdl][google.spanner.admin.database.v1.DatabaseAdmin.UpdateDatabaseDdl].
-type DdlStatementActionInfo struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
- // The action for the DDL statement, e.g. CREATE, ALTER, DROP, GRANT, etc.
- // This field is a non-empty string.
- Action string `protobuf:"bytes,1,opt,name=action,proto3" json:"action,omitempty"`
- // The entity type for the DDL statement, e.g. TABLE, INDEX, VIEW, etc.
- // This field can be empty string for some DDL statement,
- // e.g. for statement "ANALYZE", `entity_type` = "".
- EntityType string `protobuf:"bytes,2,opt,name=entity_type,json=entityType,proto3" json:"entity_type,omitempty"`
- // The entity name(s) being operated on the DDL statement.
- // E.g.
- // 1. For statement "CREATE TABLE t1(...)", `entity_names` = ["t1"].
- // 2. For statement "GRANT ROLE r1, r2 ...", `entity_names` = ["r1", "r2"].
- // 3. For statement "ANALYZE", `entity_names` = [].
- EntityNames []string `protobuf:"bytes,3,rep,name=entity_names,json=entityNames,proto3" json:"entity_names,omitempty"`
-}
-
-func (x *DdlStatementActionInfo) Reset() {
- *x = DdlStatementActionInfo{}
- if protoimpl.UnsafeEnabled {
- mi := &file_google_spanner_admin_database_v1_spanner_database_admin_proto_msgTypes[10]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
-}
-
-func (x *DdlStatementActionInfo) String() string {
- return protoimpl.X.MessageStringOf(x)
-}
-
-func (*DdlStatementActionInfo) ProtoMessage() {}
-
-func (x *DdlStatementActionInfo) ProtoReflect() protoreflect.Message {
- mi := &file_google_spanner_admin_database_v1_spanner_database_admin_proto_msgTypes[10]
- if protoimpl.UnsafeEnabled && x != nil {
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- if ms.LoadMessageInfo() == nil {
- ms.StoreMessageInfo(mi)
- }
- return ms
- }
- return mi.MessageOf(x)
-}
-
-// Deprecated: Use DdlStatementActionInfo.ProtoReflect.Descriptor instead.
-func (*DdlStatementActionInfo) Descriptor() ([]byte, []int) {
- return file_google_spanner_admin_database_v1_spanner_database_admin_proto_rawDescGZIP(), []int{10}
-}
-
-func (x *DdlStatementActionInfo) GetAction() string {
- if x != nil {
- return x.Action
- }
- return ""
-}
-
-func (x *DdlStatementActionInfo) GetEntityType() string {
- if x != nil {
- return x.EntityType
- }
- return ""
-}
-
-func (x *DdlStatementActionInfo) GetEntityNames() []string {
- if x != nil {
- return x.EntityNames
- }
- return nil
-}
-
-// Metadata type for the operation returned by
-// [UpdateDatabaseDdl][google.spanner.admin.database.v1.DatabaseAdmin.UpdateDatabaseDdl].
-type UpdateDatabaseDdlMetadata struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
- // The database being modified.
- Database string `protobuf:"bytes,1,opt,name=database,proto3" json:"database,omitempty"`
- // For an update this list contains all the statements. For an
- // individual statement, this list contains only that statement.
- Statements []string `protobuf:"bytes,2,rep,name=statements,proto3" json:"statements,omitempty"`
- // Reports the commit timestamps of all statements that have
- // succeeded so far, where `commit_timestamps[i]` is the commit
- // timestamp for the statement `statements[i]`.
- CommitTimestamps []*timestamppb.Timestamp `protobuf:"bytes,3,rep,name=commit_timestamps,json=commitTimestamps,proto3" json:"commit_timestamps,omitempty"`
- // Output only. When true, indicates that the operation is throttled e.g.
- // due to resource constraints. When resources become available the operation
- // will resume and this field will be false again.
- Throttled bool `protobuf:"varint,4,opt,name=throttled,proto3" json:"throttled,omitempty"`
- // The progress of the
- // [UpdateDatabaseDdl][google.spanner.admin.database.v1.DatabaseAdmin.UpdateDatabaseDdl]
- // operations. All DDL statements will have continuously updating progress,
- // and `progress[i]` is the operation progress for `statements[i]`. Also,
- // `progress[i]` will have start time and end time populated with commit
- // timestamp of operation, as well as a progress of 100% once the operation
- // has completed.
- Progress []*OperationProgress `protobuf:"bytes,5,rep,name=progress,proto3" json:"progress,omitempty"`
- // The brief action info for the DDL statements.
- // `actions[i]` is the brief info for `statements[i]`.
- Actions []*DdlStatementActionInfo `protobuf:"bytes,6,rep,name=actions,proto3" json:"actions,omitempty"`
-}
-
-func (x *UpdateDatabaseDdlMetadata) Reset() {
- *x = UpdateDatabaseDdlMetadata{}
- if protoimpl.UnsafeEnabled {
- mi := &file_google_spanner_admin_database_v1_spanner_database_admin_proto_msgTypes[11]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
-}
-
-func (x *UpdateDatabaseDdlMetadata) String() string {
- return protoimpl.X.MessageStringOf(x)
-}
-
-func (*UpdateDatabaseDdlMetadata) ProtoMessage() {}
-
-func (x *UpdateDatabaseDdlMetadata) ProtoReflect() protoreflect.Message {
- mi := &file_google_spanner_admin_database_v1_spanner_database_admin_proto_msgTypes[11]
- if protoimpl.UnsafeEnabled && x != nil {
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- if ms.LoadMessageInfo() == nil {
- ms.StoreMessageInfo(mi)
- }
- return ms
- }
- return mi.MessageOf(x)
-}
-
-// Deprecated: Use UpdateDatabaseDdlMetadata.ProtoReflect.Descriptor instead.
-func (*UpdateDatabaseDdlMetadata) Descriptor() ([]byte, []int) {
- return file_google_spanner_admin_database_v1_spanner_database_admin_proto_rawDescGZIP(), []int{11}
-}
-
-func (x *UpdateDatabaseDdlMetadata) GetDatabase() string {
- if x != nil {
- return x.Database
- }
- return ""
-}
-
-func (x *UpdateDatabaseDdlMetadata) GetStatements() []string {
- if x != nil {
- return x.Statements
- }
- return nil
-}
-
-func (x *UpdateDatabaseDdlMetadata) GetCommitTimestamps() []*timestamppb.Timestamp {
- if x != nil {
- return x.CommitTimestamps
- }
- return nil
-}
-
-func (x *UpdateDatabaseDdlMetadata) GetThrottled() bool {
- if x != nil {
- return x.Throttled
- }
- return false
-}
-
-func (x *UpdateDatabaseDdlMetadata) GetProgress() []*OperationProgress {
- if x != nil {
- return x.Progress
- }
- return nil
-}
-
-func (x *UpdateDatabaseDdlMetadata) GetActions() []*DdlStatementActionInfo {
- if x != nil {
- return x.Actions
- }
- return nil
-}
-
-// The request for
-// [DropDatabase][google.spanner.admin.database.v1.DatabaseAdmin.DropDatabase].
-type DropDatabaseRequest struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
- // Required. The database to be dropped.
- Database string `protobuf:"bytes,1,opt,name=database,proto3" json:"database,omitempty"`
-}
-
-func (x *DropDatabaseRequest) Reset() {
- *x = DropDatabaseRequest{}
- if protoimpl.UnsafeEnabled {
- mi := &file_google_spanner_admin_database_v1_spanner_database_admin_proto_msgTypes[12]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
-}
-
-func (x *DropDatabaseRequest) String() string {
- return protoimpl.X.MessageStringOf(x)
-}
-
-func (*DropDatabaseRequest) ProtoMessage() {}
-
-func (x *DropDatabaseRequest) ProtoReflect() protoreflect.Message {
- mi := &file_google_spanner_admin_database_v1_spanner_database_admin_proto_msgTypes[12]
- if protoimpl.UnsafeEnabled && x != nil {
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- if ms.LoadMessageInfo() == nil {
- ms.StoreMessageInfo(mi)
- }
- return ms
- }
- return mi.MessageOf(x)
-}
-
-// Deprecated: Use DropDatabaseRequest.ProtoReflect.Descriptor instead.
-func (*DropDatabaseRequest) Descriptor() ([]byte, []int) {
- return file_google_spanner_admin_database_v1_spanner_database_admin_proto_rawDescGZIP(), []int{12}
-}
-
-func (x *DropDatabaseRequest) GetDatabase() string {
- if x != nil {
- return x.Database
- }
- return ""
-}
-
-// The request for
-// [GetDatabaseDdl][google.spanner.admin.database.v1.DatabaseAdmin.GetDatabaseDdl].
-type GetDatabaseDdlRequest struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
- // Required. The database whose schema we wish to get.
- // Values are of the form
- // `projects/<project>/instances/<instance>/databases/<database>`
- Database string `protobuf:"bytes,1,opt,name=database,proto3" json:"database,omitempty"`
-}
-
-func (x *GetDatabaseDdlRequest) Reset() {
- *x = GetDatabaseDdlRequest{}
- if protoimpl.UnsafeEnabled {
- mi := &file_google_spanner_admin_database_v1_spanner_database_admin_proto_msgTypes[13]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
-}
-
-func (x *GetDatabaseDdlRequest) String() string {
- return protoimpl.X.MessageStringOf(x)
-}
-
-func (*GetDatabaseDdlRequest) ProtoMessage() {}
-
-func (x *GetDatabaseDdlRequest) ProtoReflect() protoreflect.Message {
- mi := &file_google_spanner_admin_database_v1_spanner_database_admin_proto_msgTypes[13]
- if protoimpl.UnsafeEnabled && x != nil {
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- if ms.LoadMessageInfo() == nil {
- ms.StoreMessageInfo(mi)
- }
- return ms
- }
- return mi.MessageOf(x)
-}
-
-// Deprecated: Use GetDatabaseDdlRequest.ProtoReflect.Descriptor instead.
-func (*GetDatabaseDdlRequest) Descriptor() ([]byte, []int) {
- return file_google_spanner_admin_database_v1_spanner_database_admin_proto_rawDescGZIP(), []int{13}
-}
-
-func (x *GetDatabaseDdlRequest) GetDatabase() string {
- if x != nil {
- return x.Database
- }
- return ""
-}
-
-// The response for
-// [GetDatabaseDdl][google.spanner.admin.database.v1.DatabaseAdmin.GetDatabaseDdl].
-type GetDatabaseDdlResponse struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
- // A list of formatted DDL statements defining the schema of the database
- // specified in the request.
- Statements []string `protobuf:"bytes,1,rep,name=statements,proto3" json:"statements,omitempty"`
- // Proto descriptors stored in the database.
- // Contains a protobuf-serialized
- // [google.protobuf.FileDescriptorSet](https://github.com/protocolbuffers/protobuf/blob/main/src/google/protobuf/descriptor.proto).
- // For more details, see protobuffer [self
- // description](https://developers.google.com/protocol-buffers/docs/techniques#self-description).
- ProtoDescriptors []byte `protobuf:"bytes,2,opt,name=proto_descriptors,json=protoDescriptors,proto3" json:"proto_descriptors,omitempty"`
-}
-
-func (x *GetDatabaseDdlResponse) Reset() {
- *x = GetDatabaseDdlResponse{}
- if protoimpl.UnsafeEnabled {
- mi := &file_google_spanner_admin_database_v1_spanner_database_admin_proto_msgTypes[14]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
-}
-
-func (x *GetDatabaseDdlResponse) String() string {
- return protoimpl.X.MessageStringOf(x)
-}
-
-func (*GetDatabaseDdlResponse) ProtoMessage() {}
-
-func (x *GetDatabaseDdlResponse) ProtoReflect() protoreflect.Message {
- mi := &file_google_spanner_admin_database_v1_spanner_database_admin_proto_msgTypes[14]
- if protoimpl.UnsafeEnabled && x != nil {
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- if ms.LoadMessageInfo() == nil {
- ms.StoreMessageInfo(mi)
- }
- return ms
- }
- return mi.MessageOf(x)
-}
-
-// Deprecated: Use GetDatabaseDdlResponse.ProtoReflect.Descriptor instead.
-func (*GetDatabaseDdlResponse) Descriptor() ([]byte, []int) {
- return file_google_spanner_admin_database_v1_spanner_database_admin_proto_rawDescGZIP(), []int{14}
-}
-
-func (x *GetDatabaseDdlResponse) GetStatements() []string {
- if x != nil {
- return x.Statements
- }
- return nil
-}
-
-func (x *GetDatabaseDdlResponse) GetProtoDescriptors() []byte {
- if x != nil {
- return x.ProtoDescriptors
- }
- return nil
-}
-
-// The request for
-// [ListDatabaseOperations][google.spanner.admin.database.v1.DatabaseAdmin.ListDatabaseOperations].
-type ListDatabaseOperationsRequest struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
- // Required. The instance of the database operations.
- // Values are of the form `projects/<project>/instances/<instance>`.
- Parent string `protobuf:"bytes,1,opt,name=parent,proto3" json:"parent,omitempty"`
- // An expression that filters the list of returned operations.
- //
- // A filter expression consists of a field name, a
- // comparison operator, and a value for filtering.
- // The value must be a string, a number, or a boolean. The comparison operator
- // must be one of: `<`, `>`, `<=`, `>=`, `!=`, `=`, or `:`.
- // Colon `:` is the contains operator. Filter rules are not case sensitive.
- //
- // The following fields in the [Operation][google.longrunning.Operation]
- // are eligible for filtering:
- //
- // - `name` - The name of the long-running operation
- // - `done` - False if the operation is in progress, else true.
- // - `metadata.@type` - the type of metadata. For example, the type string
- // for
- // [RestoreDatabaseMetadata][google.spanner.admin.database.v1.RestoreDatabaseMetadata]
- // is
- // `type.googleapis.com/google.spanner.admin.database.v1.RestoreDatabaseMetadata`.
- // - `metadata.<field_name>` - any field in metadata.value.
- // `metadata.@type` must be specified first, if filtering on metadata
- // fields.
- // - `error` - Error associated with the long-running operation.
- // - `response.@type` - the type of response.
- // - `response.<field_name>` - any field in response.value.
- //
- // You can combine multiple expressions by enclosing each expression in
- // parentheses. By default, expressions are combined with AND logic. However,
- // you can specify AND, OR, and NOT logic explicitly.
- //
- // Here are a few examples:
- //
- // - `done:true` - The operation is complete.
- // - `(metadata.@type=type.googleapis.com/google.spanner.admin.database.v1.RestoreDatabaseMetadata) AND` \
- // `(metadata.source_type:BACKUP) AND` \
- // `(metadata.backup_info.backup:backup_howl) AND` \
- // `(metadata.name:restored_howl) AND` \
- // `(metadata.progress.start_time < \"2018-03-28T14:50:00Z\") AND` \
- // `(error:*)` - Return operations where:
- // - The operation's metadata type is
- // [RestoreDatabaseMetadata][google.spanner.admin.database.v1.RestoreDatabaseMetadata].
- // - The database is restored from a backup.
- // - The backup name contains "backup_howl".
- // - The restored database's name contains "restored_howl".
- // - The operation started before 2018-03-28T14:50:00Z.
- // - The operation resulted in an error.
- Filter string `protobuf:"bytes,2,opt,name=filter,proto3" json:"filter,omitempty"`
- // Number of operations to be returned in the response. If 0 or
- // less, defaults to the server's maximum allowed page size.
- PageSize int32 `protobuf:"varint,3,opt,name=page_size,json=pageSize,proto3" json:"page_size,omitempty"`
- // If non-empty, `page_token` should contain a
- // [next_page_token][google.spanner.admin.database.v1.ListDatabaseOperationsResponse.next_page_token]
- // from a previous
- // [ListDatabaseOperationsResponse][google.spanner.admin.database.v1.ListDatabaseOperationsResponse]
- // to the same `parent` and with the same `filter`.
- PageToken string `protobuf:"bytes,4,opt,name=page_token,json=pageToken,proto3" json:"page_token,omitempty"`
-}
-
-func (x *ListDatabaseOperationsRequest) Reset() {
- *x = ListDatabaseOperationsRequest{}
- if protoimpl.UnsafeEnabled {
- mi := &file_google_spanner_admin_database_v1_spanner_database_admin_proto_msgTypes[15]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
-}
-
-func (x *ListDatabaseOperationsRequest) String() string {
- return protoimpl.X.MessageStringOf(x)
-}
-
-func (*ListDatabaseOperationsRequest) ProtoMessage() {}
-
-func (x *ListDatabaseOperationsRequest) ProtoReflect() protoreflect.Message {
- mi := &file_google_spanner_admin_database_v1_spanner_database_admin_proto_msgTypes[15]
- if protoimpl.UnsafeEnabled && x != nil {
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- if ms.LoadMessageInfo() == nil {
- ms.StoreMessageInfo(mi)
- }
- return ms
- }
- return mi.MessageOf(x)
-}
-
-// Deprecated: Use ListDatabaseOperationsRequest.ProtoReflect.Descriptor instead.
-func (*ListDatabaseOperationsRequest) Descriptor() ([]byte, []int) {
- return file_google_spanner_admin_database_v1_spanner_database_admin_proto_rawDescGZIP(), []int{15}
-}
-
-func (x *ListDatabaseOperationsRequest) GetParent() string {
- if x != nil {
- return x.Parent
- }
- return ""
-}
-
-func (x *ListDatabaseOperationsRequest) GetFilter() string {
- if x != nil {
- return x.Filter
- }
- return ""
-}
-
-func (x *ListDatabaseOperationsRequest) GetPageSize() int32 {
- if x != nil {
- return x.PageSize
- }
- return 0
-}
-
-func (x *ListDatabaseOperationsRequest) GetPageToken() string {
- if x != nil {
- return x.PageToken
- }
- return ""
-}
-
-// The response for
-// [ListDatabaseOperations][google.spanner.admin.database.v1.DatabaseAdmin.ListDatabaseOperations].
-type ListDatabaseOperationsResponse struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
- // The list of matching database [long-running
- // operations][google.longrunning.Operation]. Each operation's name will be
- // prefixed by the database's name. The operation's
- // [metadata][google.longrunning.Operation.metadata] field type
- // `metadata.type_url` describes the type of the metadata.
- Operations []*longrunningpb.Operation `protobuf:"bytes,1,rep,name=operations,proto3" json:"operations,omitempty"`
- // `next_page_token` can be sent in a subsequent
- // [ListDatabaseOperations][google.spanner.admin.database.v1.DatabaseAdmin.ListDatabaseOperations]
- // call to fetch more of the matching metadata.
- NextPageToken string `protobuf:"bytes,2,opt,name=next_page_token,json=nextPageToken,proto3" json:"next_page_token,omitempty"`
-}
-
-func (x *ListDatabaseOperationsResponse) Reset() {
- *x = ListDatabaseOperationsResponse{}
- if protoimpl.UnsafeEnabled {
- mi := &file_google_spanner_admin_database_v1_spanner_database_admin_proto_msgTypes[16]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
-}
-
-func (x *ListDatabaseOperationsResponse) String() string {
- return protoimpl.X.MessageStringOf(x)
-}
-
-func (*ListDatabaseOperationsResponse) ProtoMessage() {}
-
-func (x *ListDatabaseOperationsResponse) ProtoReflect() protoreflect.Message {
- mi := &file_google_spanner_admin_database_v1_spanner_database_admin_proto_msgTypes[16]
- if protoimpl.UnsafeEnabled && x != nil {
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- if ms.LoadMessageInfo() == nil {
- ms.StoreMessageInfo(mi)
- }
- return ms
- }
- return mi.MessageOf(x)
-}
-
-// Deprecated: Use ListDatabaseOperationsResponse.ProtoReflect.Descriptor instead.
-func (*ListDatabaseOperationsResponse) Descriptor() ([]byte, []int) {
- return file_google_spanner_admin_database_v1_spanner_database_admin_proto_rawDescGZIP(), []int{16}
-}
-
-func (x *ListDatabaseOperationsResponse) GetOperations() []*longrunningpb.Operation {
- if x != nil {
- return x.Operations
- }
- return nil
-}
-
-func (x *ListDatabaseOperationsResponse) GetNextPageToken() string {
- if x != nil {
- return x.NextPageToken
- }
- return ""
-}
-
-// The request for
-// [RestoreDatabase][google.spanner.admin.database.v1.DatabaseAdmin.RestoreDatabase].
-type RestoreDatabaseRequest struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
- // Required. The name of the instance in which to create the
- // restored database. This instance must be in the same project and
- // have the same instance configuration as the instance containing
- // the source backup. Values are of the form
- // `projects/<project>/instances/<instance>`.
- Parent string `protobuf:"bytes,1,opt,name=parent,proto3" json:"parent,omitempty"`
- // Required. The id of the database to create and restore to. This
- // database must not already exist. The `database_id` appended to
- // `parent` forms the full database name of the form
- // `projects/<project>/instances/<instance>/databases/<database_id>`.
- DatabaseId string `protobuf:"bytes,2,opt,name=database_id,json=databaseId,proto3" json:"database_id,omitempty"`
- // Required. The source from which to restore.
- //
- // Types that are assignable to Source:
- //
- // *RestoreDatabaseRequest_Backup
- Source isRestoreDatabaseRequest_Source `protobuf_oneof:"source"`
- // Optional. An encryption configuration describing the encryption type and
- // key resources in Cloud KMS used to encrypt/decrypt the database to restore
- // to. If this field is not specified, the restored database will use the same
- // encryption configuration as the backup by default, namely
- // [encryption_type][google.spanner.admin.database.v1.RestoreDatabaseEncryptionConfig.encryption_type]
- // = `USE_CONFIG_DEFAULT_OR_BACKUP_ENCRYPTION`.
- EncryptionConfig *RestoreDatabaseEncryptionConfig `protobuf:"bytes,4,opt,name=encryption_config,json=encryptionConfig,proto3" json:"encryption_config,omitempty"`
-}
-
-func (x *RestoreDatabaseRequest) Reset() {
- *x = RestoreDatabaseRequest{}
- if protoimpl.UnsafeEnabled {
- mi := &file_google_spanner_admin_database_v1_spanner_database_admin_proto_msgTypes[17]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
-}
-
-func (x *RestoreDatabaseRequest) String() string {
- return protoimpl.X.MessageStringOf(x)
-}
-
-func (*RestoreDatabaseRequest) ProtoMessage() {}
-
-func (x *RestoreDatabaseRequest) ProtoReflect() protoreflect.Message {
- mi := &file_google_spanner_admin_database_v1_spanner_database_admin_proto_msgTypes[17]
- if protoimpl.UnsafeEnabled && x != nil {
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- if ms.LoadMessageInfo() == nil {
- ms.StoreMessageInfo(mi)
- }
- return ms
- }
- return mi.MessageOf(x)
-}
-
-// Deprecated: Use RestoreDatabaseRequest.ProtoReflect.Descriptor instead.
-func (*RestoreDatabaseRequest) Descriptor() ([]byte, []int) {
- return file_google_spanner_admin_database_v1_spanner_database_admin_proto_rawDescGZIP(), []int{17}
-}
-
-func (x *RestoreDatabaseRequest) GetParent() string {
- if x != nil {
- return x.Parent
- }
- return ""
-}
-
-func (x *RestoreDatabaseRequest) GetDatabaseId() string {
- if x != nil {
- return x.DatabaseId
- }
- return ""
-}
-
-func (m *RestoreDatabaseRequest) GetSource() isRestoreDatabaseRequest_Source {
- if m != nil {
- return m.Source
- }
- return nil
-}
-
-func (x *RestoreDatabaseRequest) GetBackup() string {
- if x, ok := x.GetSource().(*RestoreDatabaseRequest_Backup); ok {
- return x.Backup
- }
- return ""
-}
-
-func (x *RestoreDatabaseRequest) GetEncryptionConfig() *RestoreDatabaseEncryptionConfig {
- if x != nil {
- return x.EncryptionConfig
- }
- return nil
-}
-
-type isRestoreDatabaseRequest_Source interface {
- isRestoreDatabaseRequest_Source()
-}
-
-type RestoreDatabaseRequest_Backup struct {
- // Name of the backup from which to restore. Values are of the form
- // `projects/<project>/instances/<instance>/backups/<backup>`.
- Backup string `protobuf:"bytes,3,opt,name=backup,proto3,oneof"`
-}
-
-func (*RestoreDatabaseRequest_Backup) isRestoreDatabaseRequest_Source() {}
-
-// Encryption configuration for the restored database.
-type RestoreDatabaseEncryptionConfig struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
- // Required. The encryption type of the restored database.
- EncryptionType RestoreDatabaseEncryptionConfig_EncryptionType `protobuf:"varint,1,opt,name=encryption_type,json=encryptionType,proto3,enum=google.spanner.admin.database.v1.RestoreDatabaseEncryptionConfig_EncryptionType" json:"encryption_type,omitempty"`
- // Optional. The Cloud KMS key that will be used to encrypt/decrypt the
- // restored database. This field should be set only when
- // [encryption_type][google.spanner.admin.database.v1.RestoreDatabaseEncryptionConfig.encryption_type]
- // is `CUSTOMER_MANAGED_ENCRYPTION`. Values are of the form
- // `projects/<project>/locations/<location>/keyRings/<key_ring>/cryptoKeys/<kms_key_name>`.
- KmsKeyName string `protobuf:"bytes,2,opt,name=kms_key_name,json=kmsKeyName,proto3" json:"kms_key_name,omitempty"`
- // Optional. Specifies the KMS configuration for the one or more keys used to
- // encrypt the database. Values are of the form
- // `projects/<project>/locations/<location>/keyRings/<key_ring>/cryptoKeys/<kms_key_name>`.
- //
- // The keys referenced by kms_key_names must fully cover all
- // regions of the database instance configuration. Some examples:
- // * For single region database instance configs, specify a single regional
- // location KMS key.
- // * For multi-regional database instance configs of type GOOGLE_MANAGED,
- // either specify a multi-regional location KMS key or multiple regional
- // location KMS keys that cover all regions in the instance config.
- // * For a database instance config of type USER_MANAGED, please specify only
- // regional location KMS keys to cover each region in the instance config.
- // Multi-regional location KMS keys are not supported for USER_MANAGED
- // instance configs.
- KmsKeyNames []string `protobuf:"bytes,3,rep,name=kms_key_names,json=kmsKeyNames,proto3" json:"kms_key_names,omitempty"`
-}
-
-func (x *RestoreDatabaseEncryptionConfig) Reset() {
- *x = RestoreDatabaseEncryptionConfig{}
- if protoimpl.UnsafeEnabled {
- mi := &file_google_spanner_admin_database_v1_spanner_database_admin_proto_msgTypes[18]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
-}
-
-func (x *RestoreDatabaseEncryptionConfig) String() string {
- return protoimpl.X.MessageStringOf(x)
-}
-
-func (*RestoreDatabaseEncryptionConfig) ProtoMessage() {}
-
-func (x *RestoreDatabaseEncryptionConfig) ProtoReflect() protoreflect.Message {
- mi := &file_google_spanner_admin_database_v1_spanner_database_admin_proto_msgTypes[18]
- if protoimpl.UnsafeEnabled && x != nil {
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- if ms.LoadMessageInfo() == nil {
- ms.StoreMessageInfo(mi)
- }
- return ms
- }
- return mi.MessageOf(x)
-}
-
-// Deprecated: Use RestoreDatabaseEncryptionConfig.ProtoReflect.Descriptor instead.
-func (*RestoreDatabaseEncryptionConfig) Descriptor() ([]byte, []int) {
- return file_google_spanner_admin_database_v1_spanner_database_admin_proto_rawDescGZIP(), []int{18}
-}
-
-func (x *RestoreDatabaseEncryptionConfig) GetEncryptionType() RestoreDatabaseEncryptionConfig_EncryptionType {
- if x != nil {
- return x.EncryptionType
- }
- return RestoreDatabaseEncryptionConfig_ENCRYPTION_TYPE_UNSPECIFIED
-}
-
-func (x *RestoreDatabaseEncryptionConfig) GetKmsKeyName() string {
- if x != nil {
- return x.KmsKeyName
- }
- return ""
-}
-
-func (x *RestoreDatabaseEncryptionConfig) GetKmsKeyNames() []string {
- if x != nil {
- return x.KmsKeyNames
- }
- return nil
-}
-
-// Metadata type for the long-running operation returned by
-// [RestoreDatabase][google.spanner.admin.database.v1.DatabaseAdmin.RestoreDatabase].
-type RestoreDatabaseMetadata struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
- // Name of the database being created and restored to.
- Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"`
- // The type of the restore source.
- SourceType RestoreSourceType `protobuf:"varint,2,opt,name=source_type,json=sourceType,proto3,enum=google.spanner.admin.database.v1.RestoreSourceType" json:"source_type,omitempty"`
- // Information about the source used to restore the database, as specified by
- // `source` in
- // [RestoreDatabaseRequest][google.spanner.admin.database.v1.RestoreDatabaseRequest].
- //
- // Types that are assignable to SourceInfo:
- //
- // *RestoreDatabaseMetadata_BackupInfo
- SourceInfo isRestoreDatabaseMetadata_SourceInfo `protobuf_oneof:"source_info"`
- // The progress of the
- // [RestoreDatabase][google.spanner.admin.database.v1.DatabaseAdmin.RestoreDatabase]
- // operation.
- Progress *OperationProgress `protobuf:"bytes,4,opt,name=progress,proto3" json:"progress,omitempty"`
- // The time at which cancellation of this operation was received.
- // [Operations.CancelOperation][google.longrunning.Operations.CancelOperation]
- // starts asynchronous cancellation on a long-running operation. The server
- // makes a best effort to cancel the operation, but success is not guaranteed.
- // Clients can use
- // [Operations.GetOperation][google.longrunning.Operations.GetOperation] or
- // other methods to check whether the cancellation succeeded or whether the
- // operation completed despite cancellation. On successful cancellation,
- // the operation is not deleted; instead, it becomes an operation with
- // an [Operation.error][google.longrunning.Operation.error] value with a
- // [google.rpc.Status.code][google.rpc.Status.code] of 1, corresponding to
- // `Code.CANCELLED`.
- CancelTime *timestamppb.Timestamp `protobuf:"bytes,5,opt,name=cancel_time,json=cancelTime,proto3" json:"cancel_time,omitempty"`
- // If exists, the name of the long-running operation that will be used to
- // track the post-restore optimization process to optimize the performance of
- // the restored database, and remove the dependency on the restore source.
- // The name is of the form
- // `projects/<project>/instances/<instance>/databases/<database>/operations/<operation>`
- // where the <database> is the name of database being created and restored to.
- // The metadata type of the long-running operation is
- // [OptimizeRestoredDatabaseMetadata][google.spanner.admin.database.v1.OptimizeRestoredDatabaseMetadata].
- // This long-running operation will be automatically created by the system
- // after the RestoreDatabase long-running operation completes successfully.
- // This operation will not be created if the restore was not successful.
- OptimizeDatabaseOperationName string `protobuf:"bytes,6,opt,name=optimize_database_operation_name,json=optimizeDatabaseOperationName,proto3" json:"optimize_database_operation_name,omitempty"`
-}
-
-func (x *RestoreDatabaseMetadata) Reset() {
- *x = RestoreDatabaseMetadata{}
- if protoimpl.UnsafeEnabled {
- mi := &file_google_spanner_admin_database_v1_spanner_database_admin_proto_msgTypes[19]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
-}
-
-func (x *RestoreDatabaseMetadata) String() string {
- return protoimpl.X.MessageStringOf(x)
-}
-
-func (*RestoreDatabaseMetadata) ProtoMessage() {}
-
-func (x *RestoreDatabaseMetadata) ProtoReflect() protoreflect.Message {
- mi := &file_google_spanner_admin_database_v1_spanner_database_admin_proto_msgTypes[19]
- if protoimpl.UnsafeEnabled && x != nil {
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- if ms.LoadMessageInfo() == nil {
- ms.StoreMessageInfo(mi)
- }
- return ms
- }
- return mi.MessageOf(x)
-}
-
-// Deprecated: Use RestoreDatabaseMetadata.ProtoReflect.Descriptor instead.
-func (*RestoreDatabaseMetadata) Descriptor() ([]byte, []int) {
- return file_google_spanner_admin_database_v1_spanner_database_admin_proto_rawDescGZIP(), []int{19}
-}
-
-func (x *RestoreDatabaseMetadata) GetName() string {
- if x != nil {
- return x.Name
- }
- return ""
-}
-
-func (x *RestoreDatabaseMetadata) GetSourceType() RestoreSourceType {
- if x != nil {
- return x.SourceType
- }
- return RestoreSourceType_TYPE_UNSPECIFIED
-}
-
-func (m *RestoreDatabaseMetadata) GetSourceInfo() isRestoreDatabaseMetadata_SourceInfo {
- if m != nil {
- return m.SourceInfo
- }
- return nil
-}
-
-func (x *RestoreDatabaseMetadata) GetBackupInfo() *BackupInfo {
- if x, ok := x.GetSourceInfo().(*RestoreDatabaseMetadata_BackupInfo); ok {
- return x.BackupInfo
- }
- return nil
-}
-
-func (x *RestoreDatabaseMetadata) GetProgress() *OperationProgress {
- if x != nil {
- return x.Progress
- }
- return nil
-}
-
-func (x *RestoreDatabaseMetadata) GetCancelTime() *timestamppb.Timestamp {
- if x != nil {
- return x.CancelTime
- }
- return nil
-}
-
-func (x *RestoreDatabaseMetadata) GetOptimizeDatabaseOperationName() string {
- if x != nil {
- return x.OptimizeDatabaseOperationName
- }
- return ""
-}
-
-type isRestoreDatabaseMetadata_SourceInfo interface {
- isRestoreDatabaseMetadata_SourceInfo()
-}
-
-type RestoreDatabaseMetadata_BackupInfo struct {
- // Information about the backup used to restore the database.
- BackupInfo *BackupInfo `protobuf:"bytes,3,opt,name=backup_info,json=backupInfo,proto3,oneof"`
-}
-
-func (*RestoreDatabaseMetadata_BackupInfo) isRestoreDatabaseMetadata_SourceInfo() {}
-
-// Metadata type for the long-running operation used to track the progress
-// of optimizations performed on a newly restored database. This long-running
-// operation is automatically created by the system after the successful
-// completion of a database restore, and cannot be cancelled.
-type OptimizeRestoredDatabaseMetadata struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
- // Name of the restored database being optimized.
- Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"`
- // The progress of the post-restore optimizations.
- Progress *OperationProgress `protobuf:"bytes,2,opt,name=progress,proto3" json:"progress,omitempty"`
-}
-
-func (x *OptimizeRestoredDatabaseMetadata) Reset() {
- *x = OptimizeRestoredDatabaseMetadata{}
- if protoimpl.UnsafeEnabled {
- mi := &file_google_spanner_admin_database_v1_spanner_database_admin_proto_msgTypes[20]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
-}
-
-func (x *OptimizeRestoredDatabaseMetadata) String() string {
- return protoimpl.X.MessageStringOf(x)
-}
-
-func (*OptimizeRestoredDatabaseMetadata) ProtoMessage() {}
-
-func (x *OptimizeRestoredDatabaseMetadata) ProtoReflect() protoreflect.Message {
- mi := &file_google_spanner_admin_database_v1_spanner_database_admin_proto_msgTypes[20]
- if protoimpl.UnsafeEnabled && x != nil {
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- if ms.LoadMessageInfo() == nil {
- ms.StoreMessageInfo(mi)
- }
- return ms
- }
- return mi.MessageOf(x)
-}
-
-// Deprecated: Use OptimizeRestoredDatabaseMetadata.ProtoReflect.Descriptor instead.
-func (*OptimizeRestoredDatabaseMetadata) Descriptor() ([]byte, []int) {
- return file_google_spanner_admin_database_v1_spanner_database_admin_proto_rawDescGZIP(), []int{20}
-}
-
-func (x *OptimizeRestoredDatabaseMetadata) GetName() string {
- if x != nil {
- return x.Name
- }
- return ""
-}
-
-func (x *OptimizeRestoredDatabaseMetadata) GetProgress() *OperationProgress {
- if x != nil {
- return x.Progress
- }
- return nil
-}
-
-// A Cloud Spanner database role.
-type DatabaseRole struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
- // Required. The name of the database role. Values are of the form
- // `projects/<project>/instances/<instance>/databases/<database>/databaseRoles/<role>`
- // where `<role>` is as specified in the `CREATE ROLE` DDL statement.
- Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"`
-}
-
-func (x *DatabaseRole) Reset() {
- *x = DatabaseRole{}
- if protoimpl.UnsafeEnabled {
- mi := &file_google_spanner_admin_database_v1_spanner_database_admin_proto_msgTypes[21]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
-}
-
-func (x *DatabaseRole) String() string {
- return protoimpl.X.MessageStringOf(x)
-}
-
-func (*DatabaseRole) ProtoMessage() {}
-
-func (x *DatabaseRole) ProtoReflect() protoreflect.Message {
- mi := &file_google_spanner_admin_database_v1_spanner_database_admin_proto_msgTypes[21]
- if protoimpl.UnsafeEnabled && x != nil {
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- if ms.LoadMessageInfo() == nil {
- ms.StoreMessageInfo(mi)
- }
- return ms
- }
- return mi.MessageOf(x)
-}
-
-// Deprecated: Use DatabaseRole.ProtoReflect.Descriptor instead.
-func (*DatabaseRole) Descriptor() ([]byte, []int) {
- return file_google_spanner_admin_database_v1_spanner_database_admin_proto_rawDescGZIP(), []int{21}
-}
-
-func (x *DatabaseRole) GetName() string {
- if x != nil {
- return x.Name
- }
- return ""
-}
-
-// The request for
-// [ListDatabaseRoles][google.spanner.admin.database.v1.DatabaseAdmin.ListDatabaseRoles].
-type ListDatabaseRolesRequest struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
- // Required. The database whose roles should be listed.
- // Values are of the form
- // `projects/<project>/instances/<instance>/databases/<database>`.
- Parent string `protobuf:"bytes,1,opt,name=parent,proto3" json:"parent,omitempty"`
- // Number of database roles to be returned in the response. If 0 or less,
- // defaults to the server's maximum allowed page size.
- PageSize int32 `protobuf:"varint,2,opt,name=page_size,json=pageSize,proto3" json:"page_size,omitempty"`
- // If non-empty, `page_token` should contain a
- // [next_page_token][google.spanner.admin.database.v1.ListDatabaseRolesResponse.next_page_token]
- // from a previous
- // [ListDatabaseRolesResponse][google.spanner.admin.database.v1.ListDatabaseRolesResponse].
- PageToken string `protobuf:"bytes,3,opt,name=page_token,json=pageToken,proto3" json:"page_token,omitempty"`
-}
-
-func (x *ListDatabaseRolesRequest) Reset() {
- *x = ListDatabaseRolesRequest{}
- if protoimpl.UnsafeEnabled {
- mi := &file_google_spanner_admin_database_v1_spanner_database_admin_proto_msgTypes[22]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
-}
-
-func (x *ListDatabaseRolesRequest) String() string {
- return protoimpl.X.MessageStringOf(x)
-}
-
-func (*ListDatabaseRolesRequest) ProtoMessage() {}
-
-func (x *ListDatabaseRolesRequest) ProtoReflect() protoreflect.Message {
- mi := &file_google_spanner_admin_database_v1_spanner_database_admin_proto_msgTypes[22]
- if protoimpl.UnsafeEnabled && x != nil {
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- if ms.LoadMessageInfo() == nil {
- ms.StoreMessageInfo(mi)
- }
- return ms
- }
- return mi.MessageOf(x)
-}
-
-// Deprecated: Use ListDatabaseRolesRequest.ProtoReflect.Descriptor instead.
-func (*ListDatabaseRolesRequest) Descriptor() ([]byte, []int) {
- return file_google_spanner_admin_database_v1_spanner_database_admin_proto_rawDescGZIP(), []int{22}
-}
-
-func (x *ListDatabaseRolesRequest) GetParent() string {
- if x != nil {
- return x.Parent
- }
- return ""
-}
-
-func (x *ListDatabaseRolesRequest) GetPageSize() int32 {
- if x != nil {
- return x.PageSize
- }
- return 0
-}
-
-func (x *ListDatabaseRolesRequest) GetPageToken() string {
- if x != nil {
- return x.PageToken
- }
- return ""
-}
-
-// The response for
-// [ListDatabaseRoles][google.spanner.admin.database.v1.DatabaseAdmin.ListDatabaseRoles].
-type ListDatabaseRolesResponse struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
- // Database roles that matched the request.
- DatabaseRoles []*DatabaseRole `protobuf:"bytes,1,rep,name=database_roles,json=databaseRoles,proto3" json:"database_roles,omitempty"`
- // `next_page_token` can be sent in a subsequent
- // [ListDatabaseRoles][google.spanner.admin.database.v1.DatabaseAdmin.ListDatabaseRoles]
- // call to fetch more of the matching roles.
- NextPageToken string `protobuf:"bytes,2,opt,name=next_page_token,json=nextPageToken,proto3" json:"next_page_token,omitempty"`
-}
-
-func (x *ListDatabaseRolesResponse) Reset() {
- *x = ListDatabaseRolesResponse{}
- if protoimpl.UnsafeEnabled {
- mi := &file_google_spanner_admin_database_v1_spanner_database_admin_proto_msgTypes[23]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
-}
-
-func (x *ListDatabaseRolesResponse) String() string {
- return protoimpl.X.MessageStringOf(x)
-}
-
-func (*ListDatabaseRolesResponse) ProtoMessage() {}
-
-func (x *ListDatabaseRolesResponse) ProtoReflect() protoreflect.Message {
- mi := &file_google_spanner_admin_database_v1_spanner_database_admin_proto_msgTypes[23]
- if protoimpl.UnsafeEnabled && x != nil {
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- if ms.LoadMessageInfo() == nil {
- ms.StoreMessageInfo(mi)
- }
- return ms
- }
- return mi.MessageOf(x)
-}
-
-// Deprecated: Use ListDatabaseRolesResponse.ProtoReflect.Descriptor instead.
-func (*ListDatabaseRolesResponse) Descriptor() ([]byte, []int) {
- return file_google_spanner_admin_database_v1_spanner_database_admin_proto_rawDescGZIP(), []int{23}
-}
-
-func (x *ListDatabaseRolesResponse) GetDatabaseRoles() []*DatabaseRole {
- if x != nil {
- return x.DatabaseRoles
- }
- return nil
-}
-
-func (x *ListDatabaseRolesResponse) GetNextPageToken() string {
- if x != nil {
- return x.NextPageToken
- }
- return ""
-}
-
-var File_google_spanner_admin_database_v1_spanner_database_admin_proto protoreflect.FileDescriptor
-
-var file_google_spanner_admin_database_v1_spanner_database_admin_proto_rawDesc = []byte{
- 0x0a, 0x3d, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x73, 0x70, 0x61, 0x6e, 0x6e, 0x65, 0x72,
- 0x2f, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2f, 0x64, 0x61, 0x74, 0x61, 0x62, 0x61, 0x73, 0x65, 0x2f,
- 0x76, 0x31, 0x2f, 0x73, 0x70, 0x61, 0x6e, 0x6e, 0x65, 0x72, 0x5f, 0x64, 0x61, 0x74, 0x61, 0x62,
- 0x61, 0x73, 0x65, 0x5f, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12,
- 0x20, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x70, 0x61, 0x6e, 0x6e, 0x65, 0x72, 0x2e,
- 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x64, 0x61, 0x74, 0x61, 0x62, 0x61, 0x73, 0x65, 0x2e, 0x76,
- 0x31, 0x1a, 0x1c, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x61, 0x6e,
- 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a,
- 0x17, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x63, 0x6c, 0x69, 0x65,
- 0x6e, 0x74, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65,
- 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x5f, 0x62, 0x65, 0x68, 0x61, 0x76,
- 0x69, 0x6f, 0x72, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x19, 0x67, 0x6f, 0x6f, 0x67, 0x6c,
- 0x65, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x2e, 0x70,
- 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x69, 0x61, 0x6d,
- 0x2f, 0x76, 0x31, 0x2f, 0x69, 0x61, 0x6d, 0x5f, 0x70, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x2e, 0x70,
- 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1a, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x69, 0x61, 0x6d,
- 0x2f, 0x76, 0x31, 0x2f, 0x70, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f,
- 0x1a, 0x23, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x6c, 0x6f, 0x6e, 0x67, 0x72, 0x75, 0x6e,
- 0x6e, 0x69, 0x6e, 0x67, 0x2f, 0x6f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e,
- 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1b, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72,
- 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x65, 0x6d, 0x70, 0x74, 0x79, 0x2e, 0x70, 0x72, 0x6f,
- 0x74, 0x6f, 0x1a, 0x20, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f,
- 0x62, 0x75, 0x66, 0x2f, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x5f, 0x6d, 0x61, 0x73, 0x6b, 0x2e, 0x70,
- 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f,
- 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x2e,
- 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x2d, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x73, 0x70,
- 0x61, 0x6e, 0x6e, 0x65, 0x72, 0x2f, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2f, 0x64, 0x61, 0x74, 0x61,
- 0x62, 0x61, 0x73, 0x65, 0x2f, 0x76, 0x31, 0x2f, 0x62, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x2e, 0x70,
- 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x36, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x73, 0x70, 0x61,
- 0x6e, 0x6e, 0x65, 0x72, 0x2f, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2f, 0x64, 0x61, 0x74, 0x61, 0x62,
- 0x61, 0x73, 0x65, 0x2f, 0x76, 0x31, 0x2f, 0x62, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x5f, 0x73, 0x63,
- 0x68, 0x65, 0x64, 0x75, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x2d, 0x67, 0x6f,
- 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x73, 0x70, 0x61, 0x6e, 0x6e, 0x65, 0x72, 0x2f, 0x61, 0x64, 0x6d,
- 0x69, 0x6e, 0x2f, 0x64, 0x61, 0x74, 0x61, 0x62, 0x61, 0x73, 0x65, 0x2f, 0x76, 0x31, 0x2f, 0x63,
- 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0xc3, 0x01, 0x0a, 0x0b,
- 0x52, 0x65, 0x73, 0x74, 0x6f, 0x72, 0x65, 0x49, 0x6e, 0x66, 0x6f, 0x12, 0x54, 0x0a, 0x0b, 0x73,
- 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0e,
- 0x32, 0x33, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x70, 0x61, 0x6e, 0x6e, 0x65,
- 0x72, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x64, 0x61, 0x74, 0x61, 0x62, 0x61, 0x73, 0x65,
- 0x2e, 0x76, 0x31, 0x2e, 0x52, 0x65, 0x73, 0x74, 0x6f, 0x72, 0x65, 0x53, 0x6f, 0x75, 0x72, 0x63,
- 0x65, 0x54, 0x79, 0x70, 0x65, 0x52, 0x0a, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x54, 0x79, 0x70,
- 0x65, 0x12, 0x4f, 0x0a, 0x0b, 0x62, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x5f, 0x69, 0x6e, 0x66, 0x6f,
- 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e,
- 0x73, 0x70, 0x61, 0x6e, 0x6e, 0x65, 0x72, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x64, 0x61,
- 0x74, 0x61, 0x62, 0x61, 0x73, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x42, 0x61, 0x63, 0x6b, 0x75, 0x70,
- 0x49, 0x6e, 0x66, 0x6f, 0x48, 0x00, 0x52, 0x0a, 0x62, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x49, 0x6e,
- 0x66, 0x6f, 0x42, 0x0d, 0x0a, 0x0b, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x69, 0x6e, 0x66,
- 0x6f, 0x22, 0x82, 0x08, 0x0a, 0x08, 0x44, 0x61, 0x74, 0x61, 0x62, 0x61, 0x73, 0x65, 0x12, 0x17,
- 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41,
- 0x02, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x4b, 0x0a, 0x05, 0x73, 0x74, 0x61, 0x74, 0x65,
- 0x18, 0x02, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x30, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e,
- 0x73, 0x70, 0x61, 0x6e, 0x6e, 0x65, 0x72, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x64, 0x61,
- 0x74, 0x61, 0x62, 0x61, 0x73, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x44, 0x61, 0x74, 0x61, 0x62, 0x61,
- 0x73, 0x65, 0x2e, 0x53, 0x74, 0x61, 0x74, 0x65, 0x42, 0x03, 0xe0, 0x41, 0x03, 0x52, 0x05, 0x73,
- 0x74, 0x61, 0x74, 0x65, 0x12, 0x40, 0x0a, 0x0b, 0x63, 0x72, 0x65, 0x61, 0x74, 0x65, 0x5f, 0x74,
- 0x69, 0x6d, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67,
- 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65,
- 0x73, 0x74, 0x61, 0x6d, 0x70, 0x42, 0x03, 0xe0, 0x41, 0x03, 0x52, 0x0a, 0x63, 0x72, 0x65, 0x61,
- 0x74, 0x65, 0x54, 0x69, 0x6d, 0x65, 0x12, 0x55, 0x0a, 0x0c, 0x72, 0x65, 0x73, 0x74, 0x6f, 0x72,
- 0x65, 0x5f, 0x69, 0x6e, 0x66, 0x6f, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2d, 0x2e, 0x67,
- 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x70, 0x61, 0x6e, 0x6e, 0x65, 0x72, 0x2e, 0x61, 0x64,
- 0x6d, 0x69, 0x6e, 0x2e, 0x64, 0x61, 0x74, 0x61, 0x62, 0x61, 0x73, 0x65, 0x2e, 0x76, 0x31, 0x2e,
- 0x52, 0x65, 0x73, 0x74, 0x6f, 0x72, 0x65, 0x49, 0x6e, 0x66, 0x6f, 0x42, 0x03, 0xe0, 0x41, 0x03,
- 0x52, 0x0b, 0x72, 0x65, 0x73, 0x74, 0x6f, 0x72, 0x65, 0x49, 0x6e, 0x66, 0x6f, 0x12, 0x64, 0x0a,
- 0x11, 0x65, 0x6e, 0x63, 0x72, 0x79, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x63, 0x6f, 0x6e, 0x66,
- 0x69, 0x67, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x32, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c,
- 0x65, 0x2e, 0x73, 0x70, 0x61, 0x6e, 0x6e, 0x65, 0x72, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e,
- 0x64, 0x61, 0x74, 0x61, 0x62, 0x61, 0x73, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x45, 0x6e, 0x63, 0x72,
- 0x79, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x42, 0x03, 0xe0, 0x41,
- 0x03, 0x52, 0x10, 0x65, 0x6e, 0x63, 0x72, 0x79, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x6f, 0x6e,
- 0x66, 0x69, 0x67, 0x12, 0x5e, 0x0a, 0x0f, 0x65, 0x6e, 0x63, 0x72, 0x79, 0x70, 0x74, 0x69, 0x6f,
- 0x6e, 0x5f, 0x69, 0x6e, 0x66, 0x6f, 0x18, 0x08, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x30, 0x2e, 0x67,
- 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x70, 0x61, 0x6e, 0x6e, 0x65, 0x72, 0x2e, 0x61, 0x64,
- 0x6d, 0x69, 0x6e, 0x2e, 0x64, 0x61, 0x74, 0x61, 0x62, 0x61, 0x73, 0x65, 0x2e, 0x76, 0x31, 0x2e,
- 0x45, 0x6e, 0x63, 0x72, 0x79, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x49, 0x6e, 0x66, 0x6f, 0x42, 0x03,
- 0xe0, 0x41, 0x03, 0x52, 0x0e, 0x65, 0x6e, 0x63, 0x72, 0x79, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x49,
- 0x6e, 0x66, 0x6f, 0x12, 0x3d, 0x0a, 0x18, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x5f, 0x72,
- 0x65, 0x74, 0x65, 0x6e, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x70, 0x65, 0x72, 0x69, 0x6f, 0x64, 0x18,
- 0x06, 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x03, 0x52, 0x16, 0x76, 0x65, 0x72, 0x73,
- 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x74, 0x65, 0x6e, 0x74, 0x69, 0x6f, 0x6e, 0x50, 0x65, 0x72, 0x69,
- 0x6f, 0x64, 0x12, 0x53, 0x0a, 0x15, 0x65, 0x61, 0x72, 0x6c, 0x69, 0x65, 0x73, 0x74, 0x5f, 0x76,
- 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x18, 0x07, 0x20, 0x01, 0x28,
- 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f,
- 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x42, 0x03, 0xe0,
- 0x41, 0x03, 0x52, 0x13, 0x65, 0x61, 0x72, 0x6c, 0x69, 0x65, 0x73, 0x74, 0x56, 0x65, 0x72, 0x73,
- 0x69, 0x6f, 0x6e, 0x54, 0x69, 0x6d, 0x65, 0x12, 0x2a, 0x0a, 0x0e, 0x64, 0x65, 0x66, 0x61, 0x75,
- 0x6c, 0x74, 0x5f, 0x6c, 0x65, 0x61, 0x64, 0x65, 0x72, 0x18, 0x09, 0x20, 0x01, 0x28, 0x09, 0x42,
- 0x03, 0xe0, 0x41, 0x03, 0x52, 0x0d, 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x4c, 0x65, 0x61,
- 0x64, 0x65, 0x72, 0x12, 0x61, 0x0a, 0x10, 0x64, 0x61, 0x74, 0x61, 0x62, 0x61, 0x73, 0x65, 0x5f,
- 0x64, 0x69, 0x61, 0x6c, 0x65, 0x63, 0x74, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x31, 0x2e,
- 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x70, 0x61, 0x6e, 0x6e, 0x65, 0x72, 0x2e, 0x61,
- 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x64, 0x61, 0x74, 0x61, 0x62, 0x61, 0x73, 0x65, 0x2e, 0x76, 0x31,
- 0x2e, 0x44, 0x61, 0x74, 0x61, 0x62, 0x61, 0x73, 0x65, 0x44, 0x69, 0x61, 0x6c, 0x65, 0x63, 0x74,
- 0x42, 0x03, 0xe0, 0x41, 0x03, 0x52, 0x0f, 0x64, 0x61, 0x74, 0x61, 0x62, 0x61, 0x73, 0x65, 0x44,
- 0x69, 0x61, 0x6c, 0x65, 0x63, 0x74, 0x12, 0x34, 0x0a, 0x16, 0x65, 0x6e, 0x61, 0x62, 0x6c, 0x65,
- 0x5f, 0x64, 0x72, 0x6f, 0x70, 0x5f, 0x70, 0x72, 0x6f, 0x74, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e,
- 0x18, 0x0b, 0x20, 0x01, 0x28, 0x08, 0x52, 0x14, 0x65, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x44, 0x72,
- 0x6f, 0x70, 0x50, 0x72, 0x6f, 0x74, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x25, 0x0a, 0x0b,
- 0x72, 0x65, 0x63, 0x6f, 0x6e, 0x63, 0x69, 0x6c, 0x69, 0x6e, 0x67, 0x18, 0x0c, 0x20, 0x01, 0x28,
- 0x08, 0x42, 0x03, 0xe0, 0x41, 0x03, 0x52, 0x0b, 0x72, 0x65, 0x63, 0x6f, 0x6e, 0x63, 0x69, 0x6c,
- 0x69, 0x6e, 0x67, 0x22, 0x4d, 0x0a, 0x05, 0x53, 0x74, 0x61, 0x74, 0x65, 0x12, 0x15, 0x0a, 0x11,
- 0x53, 0x54, 0x41, 0x54, 0x45, 0x5f, 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, 0x49, 0x45,
- 0x44, 0x10, 0x00, 0x12, 0x0c, 0x0a, 0x08, 0x43, 0x52, 0x45, 0x41, 0x54, 0x49, 0x4e, 0x47, 0x10,
- 0x01, 0x12, 0x09, 0x0a, 0x05, 0x52, 0x45, 0x41, 0x44, 0x59, 0x10, 0x02, 0x12, 0x14, 0x0a, 0x10,
- 0x52, 0x45, 0x41, 0x44, 0x59, 0x5f, 0x4f, 0x50, 0x54, 0x49, 0x4d, 0x49, 0x5a, 0x49, 0x4e, 0x47,
- 0x10, 0x03, 0x3a, 0x62, 0xea, 0x41, 0x5f, 0x0a, 0x1f, 0x73, 0x70, 0x61, 0x6e, 0x6e, 0x65, 0x72,
- 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f,
- 0x44, 0x61, 0x74, 0x61, 0x62, 0x61, 0x73, 0x65, 0x12, 0x3c, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63,
- 0x74, 0x73, 0x2f, 0x7b, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x7d, 0x2f, 0x69, 0x6e, 0x73,
- 0x74, 0x61, 0x6e, 0x63, 0x65, 0x73, 0x2f, 0x7b, 0x69, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65,
- 0x7d, 0x2f, 0x64, 0x61, 0x74, 0x61, 0x62, 0x61, 0x73, 0x65, 0x73, 0x2f, 0x7b, 0x64, 0x61, 0x74,
- 0x61, 0x62, 0x61, 0x73, 0x65, 0x7d, 0x22, 0x93, 0x01, 0x0a, 0x14, 0x4c, 0x69, 0x73, 0x74, 0x44,
- 0x61, 0x74, 0x61, 0x62, 0x61, 0x73, 0x65, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12,
- 0x3f, 0x0a, 0x06, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42,
- 0x27, 0xe0, 0x41, 0x02, 0xfa, 0x41, 0x21, 0x0a, 0x1f, 0x73, 0x70, 0x61, 0x6e, 0x6e, 0x65, 0x72,
- 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f,
- 0x49, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x52, 0x06, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74,
- 0x12, 0x1b, 0x0a, 0x09, 0x70, 0x61, 0x67, 0x65, 0x5f, 0x73, 0x69, 0x7a, 0x65, 0x18, 0x03, 0x20,
- 0x01, 0x28, 0x05, 0x52, 0x08, 0x70, 0x61, 0x67, 0x65, 0x53, 0x69, 0x7a, 0x65, 0x12, 0x1d, 0x0a,
- 0x0a, 0x70, 0x61, 0x67, 0x65, 0x5f, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x18, 0x04, 0x20, 0x01, 0x28,
- 0x09, 0x52, 0x09, 0x70, 0x61, 0x67, 0x65, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x22, 0x89, 0x01, 0x0a,
- 0x15, 0x4c, 0x69, 0x73, 0x74, 0x44, 0x61, 0x74, 0x61, 0x62, 0x61, 0x73, 0x65, 0x73, 0x52, 0x65,
- 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x48, 0x0a, 0x09, 0x64, 0x61, 0x74, 0x61, 0x62, 0x61,
- 0x73, 0x65, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x2a, 0x2e, 0x67, 0x6f, 0x6f, 0x67,
- 0x6c, 0x65, 0x2e, 0x73, 0x70, 0x61, 0x6e, 0x6e, 0x65, 0x72, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e,
- 0x2e, 0x64, 0x61, 0x74, 0x61, 0x62, 0x61, 0x73, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x44, 0x61, 0x74,
- 0x61, 0x62, 0x61, 0x73, 0x65, 0x52, 0x09, 0x64, 0x61, 0x74, 0x61, 0x62, 0x61, 0x73, 0x65, 0x73,
- 0x12, 0x26, 0x0a, 0x0f, 0x6e, 0x65, 0x78, 0x74, 0x5f, 0x70, 0x61, 0x67, 0x65, 0x5f, 0x74, 0x6f,
- 0x6b, 0x65, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0d, 0x6e, 0x65, 0x78, 0x74, 0x50,
- 0x61, 0x67, 0x65, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x22, 0xb3, 0x03, 0x0a, 0x15, 0x43, 0x72, 0x65,
- 0x61, 0x74, 0x65, 0x44, 0x61, 0x74, 0x61, 0x62, 0x61, 0x73, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65,
- 0x73, 0x74, 0x12, 0x3f, 0x0a, 0x06, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x18, 0x01, 0x20, 0x01,
- 0x28, 0x09, 0x42, 0x27, 0xe0, 0x41, 0x02, 0xfa, 0x41, 0x21, 0x0a, 0x1f, 0x73, 0x70, 0x61, 0x6e,
- 0x6e, 0x65, 0x72, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63,
- 0x6f, 0x6d, 0x2f, 0x49, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x52, 0x06, 0x70, 0x61, 0x72,
- 0x65, 0x6e, 0x74, 0x12, 0x2e, 0x0a, 0x10, 0x63, 0x72, 0x65, 0x61, 0x74, 0x65, 0x5f, 0x73, 0x74,
- 0x61, 0x74, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0,
- 0x41, 0x02, 0x52, 0x0f, 0x63, 0x72, 0x65, 0x61, 0x74, 0x65, 0x53, 0x74, 0x61, 0x74, 0x65, 0x6d,
- 0x65, 0x6e, 0x74, 0x12, 0x2e, 0x0a, 0x10, 0x65, 0x78, 0x74, 0x72, 0x61, 0x5f, 0x73, 0x74, 0x61,
- 0x74, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x09, 0x42, 0x03, 0xe0,
- 0x41, 0x01, 0x52, 0x0f, 0x65, 0x78, 0x74, 0x72, 0x61, 0x53, 0x74, 0x61, 0x74, 0x65, 0x6d, 0x65,
- 0x6e, 0x74, 0x73, 0x12, 0x64, 0x0a, 0x11, 0x65, 0x6e, 0x63, 0x72, 0x79, 0x70, 0x74, 0x69, 0x6f,
- 0x6e, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x32,
- 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x70, 0x61, 0x6e, 0x6e, 0x65, 0x72, 0x2e,
- 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x64, 0x61, 0x74, 0x61, 0x62, 0x61, 0x73, 0x65, 0x2e, 0x76,
- 0x31, 0x2e, 0x45, 0x6e, 0x63, 0x72, 0x79, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x6f, 0x6e, 0x66,
- 0x69, 0x67, 0x42, 0x03, 0xe0, 0x41, 0x01, 0x52, 0x10, 0x65, 0x6e, 0x63, 0x72, 0x79, 0x70, 0x74,
- 0x69, 0x6f, 0x6e, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x61, 0x0a, 0x10, 0x64, 0x61, 0x74,
- 0x61, 0x62, 0x61, 0x73, 0x65, 0x5f, 0x64, 0x69, 0x61, 0x6c, 0x65, 0x63, 0x74, 0x18, 0x05, 0x20,
- 0x01, 0x28, 0x0e, 0x32, 0x31, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x70, 0x61,
- 0x6e, 0x6e, 0x65, 0x72, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x64, 0x61, 0x74, 0x61, 0x62,
- 0x61, 0x73, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x44, 0x61, 0x74, 0x61, 0x62, 0x61, 0x73, 0x65, 0x44,
- 0x69, 0x61, 0x6c, 0x65, 0x63, 0x74, 0x42, 0x03, 0xe0, 0x41, 0x01, 0x52, 0x0f, 0x64, 0x61, 0x74,
- 0x61, 0x62, 0x61, 0x73, 0x65, 0x44, 0x69, 0x61, 0x6c, 0x65, 0x63, 0x74, 0x12, 0x30, 0x0a, 0x11,
- 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x5f, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72,
- 0x73, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0c, 0x42, 0x03, 0xe0, 0x41, 0x01, 0x52, 0x10, 0x70, 0x72,
- 0x6f, 0x74, 0x6f, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x73, 0x22, 0x5a,
- 0x0a, 0x16, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x44, 0x61, 0x74, 0x61, 0x62, 0x61, 0x73, 0x65,
- 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x12, 0x40, 0x0a, 0x08, 0x64, 0x61, 0x74, 0x61,
- 0x62, 0x61, 0x73, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x24, 0xfa, 0x41, 0x21, 0x0a,
- 0x1f, 0x73, 0x70, 0x61, 0x6e, 0x6e, 0x65, 0x72, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61,
- 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x44, 0x61, 0x74, 0x61, 0x62, 0x61, 0x73, 0x65,
- 0x52, 0x08, 0x64, 0x61, 0x74, 0x61, 0x62, 0x61, 0x73, 0x65, 0x22, 0x51, 0x0a, 0x12, 0x47, 0x65,
- 0x74, 0x44, 0x61, 0x74, 0x61, 0x62, 0x61, 0x73, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74,
- 0x12, 0x3b, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x27,
- 0xe0, 0x41, 0x02, 0xfa, 0x41, 0x21, 0x0a, 0x1f, 0x73, 0x70, 0x61, 0x6e, 0x6e, 0x65, 0x72, 0x2e,
- 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x44,
- 0x61, 0x74, 0x61, 0x62, 0x61, 0x73, 0x65, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x22, 0xa6, 0x01,
- 0x0a, 0x15, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x44, 0x61, 0x74, 0x61, 0x62, 0x61, 0x73, 0x65,
- 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x4b, 0x0a, 0x08, 0x64, 0x61, 0x74, 0x61, 0x62,
- 0x61, 0x73, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2a, 0x2e, 0x67, 0x6f, 0x6f, 0x67,
- 0x6c, 0x65, 0x2e, 0x73, 0x70, 0x61, 0x6e, 0x6e, 0x65, 0x72, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e,
- 0x2e, 0x64, 0x61, 0x74, 0x61, 0x62, 0x61, 0x73, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x44, 0x61, 0x74,
- 0x61, 0x62, 0x61, 0x73, 0x65, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x08, 0x64, 0x61, 0x74, 0x61,
- 0x62, 0x61, 0x73, 0x65, 0x12, 0x40, 0x0a, 0x0b, 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, 0x5f, 0x6d,
- 0x61, 0x73, 0x6b, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67,
- 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, 0x6c,
- 0x64, 0x4d, 0x61, 0x73, 0x6b, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x0a, 0x75, 0x70, 0x64, 0x61,
- 0x74, 0x65, 0x4d, 0x61, 0x73, 0x6b, 0x22, 0xf9, 0x01, 0x0a, 0x16, 0x55, 0x70, 0x64, 0x61, 0x74,
- 0x65, 0x44, 0x61, 0x74, 0x61, 0x62, 0x61, 0x73, 0x65, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74,
- 0x61, 0x12, 0x51, 0x0a, 0x07, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x18, 0x01, 0x20, 0x01,
- 0x28, 0x0b, 0x32, 0x37, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x70, 0x61, 0x6e,
- 0x6e, 0x65, 0x72, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x64, 0x61, 0x74, 0x61, 0x62, 0x61,
- 0x73, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x44, 0x61, 0x74, 0x61,
- 0x62, 0x61, 0x73, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x52, 0x07, 0x72, 0x65, 0x71,
- 0x75, 0x65, 0x73, 0x74, 0x12, 0x4f, 0x0a, 0x08, 0x70, 0x72, 0x6f, 0x67, 0x72, 0x65, 0x73, 0x73,
- 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x33, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e,
- 0x73, 0x70, 0x61, 0x6e, 0x6e, 0x65, 0x72, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x64, 0x61,
- 0x74, 0x61, 0x62, 0x61, 0x73, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x4f, 0x70, 0x65, 0x72, 0x61, 0x74,
- 0x69, 0x6f, 0x6e, 0x50, 0x72, 0x6f, 0x67, 0x72, 0x65, 0x73, 0x73, 0x52, 0x08, 0x70, 0x72, 0x6f,
- 0x67, 0x72, 0x65, 0x73, 0x73, 0x12, 0x3b, 0x0a, 0x0b, 0x63, 0x61, 0x6e, 0x63, 0x65, 0x6c, 0x5f,
- 0x74, 0x69, 0x6d, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f,
- 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d,
- 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x0a, 0x63, 0x61, 0x6e, 0x63, 0x65, 0x6c, 0x54, 0x69,
- 0x6d, 0x65, 0x22, 0xd9, 0x01, 0x0a, 0x18, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x44, 0x61, 0x74,
- 0x61, 0x62, 0x61, 0x73, 0x65, 0x44, 0x64, 0x6c, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12,
- 0x43, 0x0a, 0x08, 0x64, 0x61, 0x74, 0x61, 0x62, 0x61, 0x73, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28,
- 0x09, 0x42, 0x27, 0xe0, 0x41, 0x02, 0xfa, 0x41, 0x21, 0x0a, 0x1f, 0x73, 0x70, 0x61, 0x6e, 0x6e,
- 0x65, 0x72, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f,
- 0x6d, 0x2f, 0x44, 0x61, 0x74, 0x61, 0x62, 0x61, 0x73, 0x65, 0x52, 0x08, 0x64, 0x61, 0x74, 0x61,
- 0x62, 0x61, 0x73, 0x65, 0x12, 0x23, 0x0a, 0x0a, 0x73, 0x74, 0x61, 0x74, 0x65, 0x6d, 0x65, 0x6e,
- 0x74, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x0a, 0x73,
- 0x74, 0x61, 0x74, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x73, 0x12, 0x21, 0x0a, 0x0c, 0x6f, 0x70, 0x65,
- 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x69, 0x64, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52,
- 0x0b, 0x6f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x49, 0x64, 0x12, 0x30, 0x0a, 0x11,
- 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x5f, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72,
- 0x73, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0c, 0x42, 0x03, 0xe0, 0x41, 0x01, 0x52, 0x10, 0x70, 0x72,
- 0x6f, 0x74, 0x6f, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x73, 0x22, 0x74,
- 0x0a, 0x16, 0x44, 0x64, 0x6c, 0x53, 0x74, 0x61, 0x74, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x41, 0x63,
- 0x74, 0x69, 0x6f, 0x6e, 0x49, 0x6e, 0x66, 0x6f, 0x12, 0x16, 0x0a, 0x06, 0x61, 0x63, 0x74, 0x69,
- 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e,
- 0x12, 0x1f, 0x0a, 0x0b, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18,
- 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x54, 0x79, 0x70,
- 0x65, 0x12, 0x21, 0x0a, 0x0c, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x5f, 0x6e, 0x61, 0x6d, 0x65,
- 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x09, 0x52, 0x0b, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x4e,
- 0x61, 0x6d, 0x65, 0x73, 0x22, 0x8e, 0x03, 0x0a, 0x19, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x44,
- 0x61, 0x74, 0x61, 0x62, 0x61, 0x73, 0x65, 0x44, 0x64, 0x6c, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61,
- 0x74, 0x61, 0x12, 0x40, 0x0a, 0x08, 0x64, 0x61, 0x74, 0x61, 0x62, 0x61, 0x73, 0x65, 0x18, 0x01,
- 0x20, 0x01, 0x28, 0x09, 0x42, 0x24, 0xfa, 0x41, 0x21, 0x0a, 0x1f, 0x73, 0x70, 0x61, 0x6e, 0x6e,
- 0x65, 0x72, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f,
- 0x6d, 0x2f, 0x44, 0x61, 0x74, 0x61, 0x62, 0x61, 0x73, 0x65, 0x52, 0x08, 0x64, 0x61, 0x74, 0x61,
- 0x62, 0x61, 0x73, 0x65, 0x12, 0x1e, 0x0a, 0x0a, 0x73, 0x74, 0x61, 0x74, 0x65, 0x6d, 0x65, 0x6e,
- 0x74, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x09, 0x52, 0x0a, 0x73, 0x74, 0x61, 0x74, 0x65, 0x6d,
- 0x65, 0x6e, 0x74, 0x73, 0x12, 0x47, 0x0a, 0x11, 0x63, 0x6f, 0x6d, 0x6d, 0x69, 0x74, 0x5f, 0x74,
- 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32,
- 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75,
- 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x10, 0x63, 0x6f, 0x6d,
- 0x6d, 0x69, 0x74, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x73, 0x12, 0x21, 0x0a,
- 0x09, 0x74, 0x68, 0x72, 0x6f, 0x74, 0x74, 0x6c, 0x65, 0x64, 0x18, 0x04, 0x20, 0x01, 0x28, 0x08,
- 0x42, 0x03, 0xe0, 0x41, 0x03, 0x52, 0x09, 0x74, 0x68, 0x72, 0x6f, 0x74, 0x74, 0x6c, 0x65, 0x64,
- 0x12, 0x4f, 0x0a, 0x08, 0x70, 0x72, 0x6f, 0x67, 0x72, 0x65, 0x73, 0x73, 0x18, 0x05, 0x20, 0x03,
- 0x28, 0x0b, 0x32, 0x33, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x70, 0x61, 0x6e,
- 0x6e, 0x65, 0x72, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x64, 0x61, 0x74, 0x61, 0x62, 0x61,
- 0x73, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x4f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x50,
- 0x72, 0x6f, 0x67, 0x72, 0x65, 0x73, 0x73, 0x52, 0x08, 0x70, 0x72, 0x6f, 0x67, 0x72, 0x65, 0x73,
- 0x73, 0x12, 0x52, 0x0a, 0x07, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x06, 0x20, 0x03,
- 0x28, 0x0b, 0x32, 0x38, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x70, 0x61, 0x6e,
- 0x6e, 0x65, 0x72, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x64, 0x61, 0x74, 0x61, 0x62, 0x61,
- 0x73, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x44, 0x64, 0x6c, 0x53, 0x74, 0x61, 0x74, 0x65, 0x6d, 0x65,
- 0x6e, 0x74, 0x41, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x49, 0x6e, 0x66, 0x6f, 0x52, 0x07, 0x61, 0x63,
- 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x22, 0x5a, 0x0a, 0x13, 0x44, 0x72, 0x6f, 0x70, 0x44, 0x61, 0x74,
- 0x61, 0x62, 0x61, 0x73, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x43, 0x0a, 0x08,
- 0x64, 0x61, 0x74, 0x61, 0x62, 0x61, 0x73, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x27,
- 0xe0, 0x41, 0x02, 0xfa, 0x41, 0x21, 0x0a, 0x1f, 0x73, 0x70, 0x61, 0x6e, 0x6e, 0x65, 0x72, 0x2e,
- 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x44,
- 0x61, 0x74, 0x61, 0x62, 0x61, 0x73, 0x65, 0x52, 0x08, 0x64, 0x61, 0x74, 0x61, 0x62, 0x61, 0x73,
- 0x65, 0x22, 0x5c, 0x0a, 0x15, 0x47, 0x65, 0x74, 0x44, 0x61, 0x74, 0x61, 0x62, 0x61, 0x73, 0x65,
- 0x44, 0x64, 0x6c, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x43, 0x0a, 0x08, 0x64, 0x61,
- 0x74, 0x61, 0x62, 0x61, 0x73, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x27, 0xe0, 0x41,
- 0x02, 0xfa, 0x41, 0x21, 0x0a, 0x1f, 0x73, 0x70, 0x61, 0x6e, 0x6e, 0x65, 0x72, 0x2e, 0x67, 0x6f,
- 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x44, 0x61, 0x74,
- 0x61, 0x62, 0x61, 0x73, 0x65, 0x52, 0x08, 0x64, 0x61, 0x74, 0x61, 0x62, 0x61, 0x73, 0x65, 0x22,
- 0x65, 0x0a, 0x16, 0x47, 0x65, 0x74, 0x44, 0x61, 0x74, 0x61, 0x62, 0x61, 0x73, 0x65, 0x44, 0x64,
- 0x6c, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x1e, 0x0a, 0x0a, 0x73, 0x74, 0x61,
- 0x74, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x09, 0x52, 0x0a, 0x73,
- 0x74, 0x61, 0x74, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x73, 0x12, 0x2b, 0x0a, 0x11, 0x70, 0x72, 0x6f,
- 0x74, 0x6f, 0x5f, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x73, 0x18, 0x02,
- 0x20, 0x01, 0x28, 0x0c, 0x52, 0x10, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x44, 0x65, 0x73, 0x63, 0x72,
- 0x69, 0x70, 0x74, 0x6f, 0x72, 0x73, 0x22, 0xb4, 0x01, 0x0a, 0x1d, 0x4c, 0x69, 0x73, 0x74, 0x44,
- 0x61, 0x74, 0x61, 0x62, 0x61, 0x73, 0x65, 0x4f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e,
- 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x3f, 0x0a, 0x06, 0x70, 0x61, 0x72, 0x65,
- 0x6e, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x27, 0xe0, 0x41, 0x02, 0xfa, 0x41, 0x21,
- 0x0a, 0x1f, 0x73, 0x70, 0x61, 0x6e, 0x6e, 0x65, 0x72, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65,
- 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x49, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63,
- 0x65, 0x52, 0x06, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x12, 0x16, 0x0a, 0x06, 0x66, 0x69, 0x6c,
- 0x74, 0x65, 0x72, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x66, 0x69, 0x6c, 0x74, 0x65,
- 0x72, 0x12, 0x1b, 0x0a, 0x09, 0x70, 0x61, 0x67, 0x65, 0x5f, 0x73, 0x69, 0x7a, 0x65, 0x18, 0x03,
- 0x20, 0x01, 0x28, 0x05, 0x52, 0x08, 0x70, 0x61, 0x67, 0x65, 0x53, 0x69, 0x7a, 0x65, 0x12, 0x1d,
- 0x0a, 0x0a, 0x70, 0x61, 0x67, 0x65, 0x5f, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x18, 0x04, 0x20, 0x01,
- 0x28, 0x09, 0x52, 0x09, 0x70, 0x61, 0x67, 0x65, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x22, 0x87, 0x01,
- 0x0a, 0x1e, 0x4c, 0x69, 0x73, 0x74, 0x44, 0x61, 0x74, 0x61, 0x62, 0x61, 0x73, 0x65, 0x4f, 0x70,
- 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65,
- 0x12, 0x3d, 0x0a, 0x0a, 0x6f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x01,
- 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6c, 0x6f,
- 0x6e, 0x67, 0x72, 0x75, 0x6e, 0x6e, 0x69, 0x6e, 0x67, 0x2e, 0x4f, 0x70, 0x65, 0x72, 0x61, 0x74,
- 0x69, 0x6f, 0x6e, 0x52, 0x0a, 0x6f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12,
- 0x26, 0x0a, 0x0f, 0x6e, 0x65, 0x78, 0x74, 0x5f, 0x70, 0x61, 0x67, 0x65, 0x5f, 0x74, 0x6f, 0x6b,
- 0x65, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0d, 0x6e, 0x65, 0x78, 0x74, 0x50, 0x61,
- 0x67, 0x65, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x22, 0xbc, 0x02, 0x0a, 0x16, 0x52, 0x65, 0x73, 0x74,
- 0x6f, 0x72, 0x65, 0x44, 0x61, 0x74, 0x61, 0x62, 0x61, 0x73, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65,
- 0x73, 0x74, 0x12, 0x3f, 0x0a, 0x06, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x18, 0x01, 0x20, 0x01,
- 0x28, 0x09, 0x42, 0x27, 0xe0, 0x41, 0x02, 0xfa, 0x41, 0x21, 0x0a, 0x1f, 0x73, 0x70, 0x61, 0x6e,
- 0x6e, 0x65, 0x72, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63,
- 0x6f, 0x6d, 0x2f, 0x49, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x52, 0x06, 0x70, 0x61, 0x72,
- 0x65, 0x6e, 0x74, 0x12, 0x24, 0x0a, 0x0b, 0x64, 0x61, 0x74, 0x61, 0x62, 0x61, 0x73, 0x65, 0x5f,
- 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x0a, 0x64,
- 0x61, 0x74, 0x61, 0x62, 0x61, 0x73, 0x65, 0x49, 0x64, 0x12, 0x3c, 0x0a, 0x06, 0x62, 0x61, 0x63,
- 0x6b, 0x75, 0x70, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x42, 0x22, 0xfa, 0x41, 0x1f, 0x0a, 0x1d,
- 0x73, 0x70, 0x61, 0x6e, 0x6e, 0x65, 0x72, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70,
- 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x42, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x48, 0x00, 0x52,
- 0x06, 0x62, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x12, 0x73, 0x0a, 0x11, 0x65, 0x6e, 0x63, 0x72, 0x79,
- 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x18, 0x04, 0x20, 0x01,
- 0x28, 0x0b, 0x32, 0x41, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x70, 0x61, 0x6e,
- 0x6e, 0x65, 0x72, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x64, 0x61, 0x74, 0x61, 0x62, 0x61,
- 0x73, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x52, 0x65, 0x73, 0x74, 0x6f, 0x72, 0x65, 0x44, 0x61, 0x74,
- 0x61, 0x62, 0x61, 0x73, 0x65, 0x45, 0x6e, 0x63, 0x72, 0x79, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x43,
- 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x42, 0x03, 0xe0, 0x41, 0x01, 0x52, 0x10, 0x65, 0x6e, 0x63, 0x72,
- 0x79, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x42, 0x08, 0x0a, 0x06,
- 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x22, 0xde, 0x03, 0x0a, 0x1f, 0x52, 0x65, 0x73, 0x74, 0x6f,
- 0x72, 0x65, 0x44, 0x61, 0x74, 0x61, 0x62, 0x61, 0x73, 0x65, 0x45, 0x6e, 0x63, 0x72, 0x79, 0x70,
- 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x7e, 0x0a, 0x0f, 0x65, 0x6e,
- 0x63, 0x72, 0x79, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x01, 0x20,
- 0x01, 0x28, 0x0e, 0x32, 0x50, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x70, 0x61,
- 0x6e, 0x6e, 0x65, 0x72, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x64, 0x61, 0x74, 0x61, 0x62,
- 0x61, 0x73, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x52, 0x65, 0x73, 0x74, 0x6f, 0x72, 0x65, 0x44, 0x61,
- 0x74, 0x61, 0x62, 0x61, 0x73, 0x65, 0x45, 0x6e, 0x63, 0x72, 0x79, 0x70, 0x74, 0x69, 0x6f, 0x6e,
- 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x45, 0x6e, 0x63, 0x72, 0x79, 0x70, 0x74, 0x69, 0x6f,
- 0x6e, 0x54, 0x79, 0x70, 0x65, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x0e, 0x65, 0x6e, 0x63, 0x72,
- 0x79, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x54, 0x79, 0x70, 0x65, 0x12, 0x4b, 0x0a, 0x0c, 0x6b, 0x6d,
- 0x73, 0x5f, 0x6b, 0x65, 0x79, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09,
- 0x42, 0x29, 0xe0, 0x41, 0x01, 0xfa, 0x41, 0x23, 0x0a, 0x21, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x6b,
- 0x6d, 0x73, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f,
- 0x6d, 0x2f, 0x43, 0x72, 0x79, 0x70, 0x74, 0x6f, 0x4b, 0x65, 0x79, 0x52, 0x0a, 0x6b, 0x6d, 0x73,
- 0x4b, 0x65, 0x79, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x4d, 0x0a, 0x0d, 0x6b, 0x6d, 0x73, 0x5f, 0x6b,
- 0x65, 0x79, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x09, 0x42, 0x29,
- 0xe0, 0x41, 0x01, 0xfa, 0x41, 0x23, 0x0a, 0x21, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x6b, 0x6d, 0x73,
- 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f,
- 0x43, 0x72, 0x79, 0x70, 0x74, 0x6f, 0x4b, 0x65, 0x79, 0x52, 0x0b, 0x6b, 0x6d, 0x73, 0x4b, 0x65,
- 0x79, 0x4e, 0x61, 0x6d, 0x65, 0x73, 0x22, 0x9e, 0x01, 0x0a, 0x0e, 0x45, 0x6e, 0x63, 0x72, 0x79,
- 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x54, 0x79, 0x70, 0x65, 0x12, 0x1f, 0x0a, 0x1b, 0x45, 0x4e, 0x43,
- 0x52, 0x59, 0x50, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x55, 0x4e, 0x53,
- 0x50, 0x45, 0x43, 0x49, 0x46, 0x49, 0x45, 0x44, 0x10, 0x00, 0x12, 0x2b, 0x0a, 0x27, 0x55, 0x53,
- 0x45, 0x5f, 0x43, 0x4f, 0x4e, 0x46, 0x49, 0x47, 0x5f, 0x44, 0x45, 0x46, 0x41, 0x55, 0x4c, 0x54,
- 0x5f, 0x4f, 0x52, 0x5f, 0x42, 0x41, 0x43, 0x4b, 0x55, 0x50, 0x5f, 0x45, 0x4e, 0x43, 0x52, 0x59,
- 0x50, 0x54, 0x49, 0x4f, 0x4e, 0x10, 0x01, 0x12, 0x1d, 0x0a, 0x19, 0x47, 0x4f, 0x4f, 0x47, 0x4c,
- 0x45, 0x5f, 0x44, 0x45, 0x46, 0x41, 0x55, 0x4c, 0x54, 0x5f, 0x45, 0x4e, 0x43, 0x52, 0x59, 0x50,
- 0x54, 0x49, 0x4f, 0x4e, 0x10, 0x02, 0x12, 0x1f, 0x0a, 0x1b, 0x43, 0x55, 0x53, 0x54, 0x4f, 0x4d,
- 0x45, 0x52, 0x5f, 0x4d, 0x41, 0x4e, 0x41, 0x47, 0x45, 0x44, 0x5f, 0x45, 0x4e, 0x43, 0x52, 0x59,
- 0x50, 0x54, 0x49, 0x4f, 0x4e, 0x10, 0x03, 0x22, 0xe0, 0x03, 0x0a, 0x17, 0x52, 0x65, 0x73, 0x74,
- 0x6f, 0x72, 0x65, 0x44, 0x61, 0x74, 0x61, 0x62, 0x61, 0x73, 0x65, 0x4d, 0x65, 0x74, 0x61, 0x64,
- 0x61, 0x74, 0x61, 0x12, 0x38, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28,
- 0x09, 0x42, 0x24, 0xfa, 0x41, 0x21, 0x0a, 0x1f, 0x73, 0x70, 0x61, 0x6e, 0x6e, 0x65, 0x72, 0x2e,
- 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x44,
- 0x61, 0x74, 0x61, 0x62, 0x61, 0x73, 0x65, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x54, 0x0a,
- 0x0b, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x02, 0x20, 0x01,
- 0x28, 0x0e, 0x32, 0x33, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x70, 0x61, 0x6e,
- 0x6e, 0x65, 0x72, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x64, 0x61, 0x74, 0x61, 0x62, 0x61,
- 0x73, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x52, 0x65, 0x73, 0x74, 0x6f, 0x72, 0x65, 0x53, 0x6f, 0x75,
- 0x72, 0x63, 0x65, 0x54, 0x79, 0x70, 0x65, 0x52, 0x0a, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x54,
- 0x79, 0x70, 0x65, 0x12, 0x4f, 0x0a, 0x0b, 0x62, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x5f, 0x69, 0x6e,
- 0x66, 0x6f, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c,
- 0x65, 0x2e, 0x73, 0x70, 0x61, 0x6e, 0x6e, 0x65, 0x72, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e,
- 0x64, 0x61, 0x74, 0x61, 0x62, 0x61, 0x73, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x42, 0x61, 0x63, 0x6b,
- 0x75, 0x70, 0x49, 0x6e, 0x66, 0x6f, 0x48, 0x00, 0x52, 0x0a, 0x62, 0x61, 0x63, 0x6b, 0x75, 0x70,
- 0x49, 0x6e, 0x66, 0x6f, 0x12, 0x4f, 0x0a, 0x08, 0x70, 0x72, 0x6f, 0x67, 0x72, 0x65, 0x73, 0x73,
- 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x33, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e,
- 0x73, 0x70, 0x61, 0x6e, 0x6e, 0x65, 0x72, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x64, 0x61,
- 0x74, 0x61, 0x62, 0x61, 0x73, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x4f, 0x70, 0x65, 0x72, 0x61, 0x74,
- 0x69, 0x6f, 0x6e, 0x50, 0x72, 0x6f, 0x67, 0x72, 0x65, 0x73, 0x73, 0x52, 0x08, 0x70, 0x72, 0x6f,
- 0x67, 0x72, 0x65, 0x73, 0x73, 0x12, 0x3b, 0x0a, 0x0b, 0x63, 0x61, 0x6e, 0x63, 0x65, 0x6c, 0x5f,
- 0x74, 0x69, 0x6d, 0x65, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f,
- 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d,
- 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x0a, 0x63, 0x61, 0x6e, 0x63, 0x65, 0x6c, 0x54, 0x69,
- 0x6d, 0x65, 0x12, 0x47, 0x0a, 0x20, 0x6f, 0x70, 0x74, 0x69, 0x6d, 0x69, 0x7a, 0x65, 0x5f, 0x64,
- 0x61, 0x74, 0x61, 0x62, 0x61, 0x73, 0x65, 0x5f, 0x6f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f,
- 0x6e, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x06, 0x20, 0x01, 0x28, 0x09, 0x52, 0x1d, 0x6f, 0x70,
- 0x74, 0x69, 0x6d, 0x69, 0x7a, 0x65, 0x44, 0x61, 0x74, 0x61, 0x62, 0x61, 0x73, 0x65, 0x4f, 0x70,
- 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4e, 0x61, 0x6d, 0x65, 0x42, 0x0d, 0x0a, 0x0b, 0x73,
- 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x69, 0x6e, 0x66, 0x6f, 0x22, 0xad, 0x01, 0x0a, 0x20, 0x4f,
- 0x70, 0x74, 0x69, 0x6d, 0x69, 0x7a, 0x65, 0x52, 0x65, 0x73, 0x74, 0x6f, 0x72, 0x65, 0x64, 0x44,
- 0x61, 0x74, 0x61, 0x62, 0x61, 0x73, 0x65, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x12,
- 0x38, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x24, 0xfa,
- 0x41, 0x21, 0x0a, 0x1f, 0x73, 0x70, 0x61, 0x6e, 0x6e, 0x65, 0x72, 0x2e, 0x67, 0x6f, 0x6f, 0x67,
- 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x44, 0x61, 0x74, 0x61, 0x62,
- 0x61, 0x73, 0x65, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x4f, 0x0a, 0x08, 0x70, 0x72, 0x6f,
- 0x67, 0x72, 0x65, 0x73, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x33, 0x2e, 0x67, 0x6f,
- 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x70, 0x61, 0x6e, 0x6e, 0x65, 0x72, 0x2e, 0x61, 0x64, 0x6d,
- 0x69, 0x6e, 0x2e, 0x64, 0x61, 0x74, 0x61, 0x62, 0x61, 0x73, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x4f,
- 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x50, 0x72, 0x6f, 0x67, 0x72, 0x65, 0x73, 0x73,
- 0x52, 0x08, 0x70, 0x72, 0x6f, 0x67, 0x72, 0x65, 0x73, 0x73, 0x22, 0xa4, 0x01, 0x0a, 0x0c, 0x44,
- 0x61, 0x74, 0x61, 0x62, 0x61, 0x73, 0x65, 0x52, 0x6f, 0x6c, 0x65, 0x12, 0x17, 0x0a, 0x04, 0x6e,
- 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x04,
- 0x6e, 0x61, 0x6d, 0x65, 0x3a, 0x7b, 0xea, 0x41, 0x78, 0x0a, 0x23, 0x73, 0x70, 0x61, 0x6e, 0x6e,
- 0x65, 0x72, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f,
- 0x6d, 0x2f, 0x44, 0x61, 0x74, 0x61, 0x62, 0x61, 0x73, 0x65, 0x52, 0x6f, 0x6c, 0x65, 0x12, 0x51,
- 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x7b, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63,
- 0x74, 0x7d, 0x2f, 0x69, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x73, 0x2f, 0x7b, 0x69, 0x6e,
- 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x7d, 0x2f, 0x64, 0x61, 0x74, 0x61, 0x62, 0x61, 0x73, 0x65,
- 0x73, 0x2f, 0x7b, 0x64, 0x61, 0x74, 0x61, 0x62, 0x61, 0x73, 0x65, 0x7d, 0x2f, 0x64, 0x61, 0x74,
- 0x61, 0x62, 0x61, 0x73, 0x65, 0x52, 0x6f, 0x6c, 0x65, 0x73, 0x2f, 0x7b, 0x72, 0x6f, 0x6c, 0x65,
- 0x7d, 0x22, 0x97, 0x01, 0x0a, 0x18, 0x4c, 0x69, 0x73, 0x74, 0x44, 0x61, 0x74, 0x61, 0x62, 0x61,
- 0x73, 0x65, 0x52, 0x6f, 0x6c, 0x65, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x3f,
- 0x0a, 0x06, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x27,
- 0xe0, 0x41, 0x02, 0xfa, 0x41, 0x21, 0x0a, 0x1f, 0x73, 0x70, 0x61, 0x6e, 0x6e, 0x65, 0x72, 0x2e,
- 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x44,
- 0x61, 0x74, 0x61, 0x62, 0x61, 0x73, 0x65, 0x52, 0x06, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x12,
- 0x1b, 0x0a, 0x09, 0x70, 0x61, 0x67, 0x65, 0x5f, 0x73, 0x69, 0x7a, 0x65, 0x18, 0x02, 0x20, 0x01,
- 0x28, 0x05, 0x52, 0x08, 0x70, 0x61, 0x67, 0x65, 0x53, 0x69, 0x7a, 0x65, 0x12, 0x1d, 0x0a, 0x0a,
- 0x70, 0x61, 0x67, 0x65, 0x5f, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09,
- 0x52, 0x09, 0x70, 0x61, 0x67, 0x65, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x22, 0x9a, 0x01, 0x0a, 0x19,
- 0x4c, 0x69, 0x73, 0x74, 0x44, 0x61, 0x74, 0x61, 0x62, 0x61, 0x73, 0x65, 0x52, 0x6f, 0x6c, 0x65,
- 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x55, 0x0a, 0x0e, 0x64, 0x61, 0x74,
- 0x61, 0x62, 0x61, 0x73, 0x65, 0x5f, 0x72, 0x6f, 0x6c, 0x65, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28,
- 0x0b, 0x32, 0x2e, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x70, 0x61, 0x6e, 0x6e,
- 0x65, 0x72, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x64, 0x61, 0x74, 0x61, 0x62, 0x61, 0x73,
- 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x44, 0x61, 0x74, 0x61, 0x62, 0x61, 0x73, 0x65, 0x52, 0x6f, 0x6c,
- 0x65, 0x52, 0x0d, 0x64, 0x61, 0x74, 0x61, 0x62, 0x61, 0x73, 0x65, 0x52, 0x6f, 0x6c, 0x65, 0x73,
- 0x12, 0x26, 0x0a, 0x0f, 0x6e, 0x65, 0x78, 0x74, 0x5f, 0x70, 0x61, 0x67, 0x65, 0x5f, 0x74, 0x6f,
- 0x6b, 0x65, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0d, 0x6e, 0x65, 0x78, 0x74, 0x50,
- 0x61, 0x67, 0x65, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x2a, 0x35, 0x0a, 0x11, 0x52, 0x65, 0x73, 0x74,
- 0x6f, 0x72, 0x65, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x54, 0x79, 0x70, 0x65, 0x12, 0x14, 0x0a,
- 0x10, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, 0x49, 0x45,
- 0x44, 0x10, 0x00, 0x12, 0x0a, 0x0a, 0x06, 0x42, 0x41, 0x43, 0x4b, 0x55, 0x50, 0x10, 0x01, 0x32,
- 0x98, 0x31, 0x0a, 0x0d, 0x44, 0x61, 0x74, 0x61, 0x62, 0x61, 0x73, 0x65, 0x41, 0x64, 0x6d, 0x69,
- 0x6e, 0x12, 0xc0, 0x01, 0x0a, 0x0d, 0x4c, 0x69, 0x73, 0x74, 0x44, 0x61, 0x74, 0x61, 0x62, 0x61,
- 0x73, 0x65, 0x73, 0x12, 0x36, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x70, 0x61,
- 0x6e, 0x6e, 0x65, 0x72, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x64, 0x61, 0x74, 0x61, 0x62,
- 0x61, 0x73, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x44, 0x61, 0x74, 0x61, 0x62,
- 0x61, 0x73, 0x65, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x37, 0x2e, 0x67, 0x6f,
- 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x70, 0x61, 0x6e, 0x6e, 0x65, 0x72, 0x2e, 0x61, 0x64, 0x6d,
- 0x69, 0x6e, 0x2e, 0x64, 0x61, 0x74, 0x61, 0x62, 0x61, 0x73, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x4c,
- 0x69, 0x73, 0x74, 0x44, 0x61, 0x74, 0x61, 0x62, 0x61, 0x73, 0x65, 0x73, 0x52, 0x65, 0x73, 0x70,
- 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x3e, 0xda, 0x41, 0x06, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x82,
- 0xd3, 0xe4, 0x93, 0x02, 0x2f, 0x12, 0x2d, 0x2f, 0x76, 0x31, 0x2f, 0x7b, 0x70, 0x61, 0x72, 0x65,
- 0x6e, 0x74, 0x3d, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x2a, 0x2f, 0x69, 0x6e,
- 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x73, 0x2f, 0x2a, 0x7d, 0x2f, 0x64, 0x61, 0x74, 0x61, 0x62,
- 0x61, 0x73, 0x65, 0x73, 0x12, 0xa4, 0x02, 0x0a, 0x0e, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x44,
- 0x61, 0x74, 0x61, 0x62, 0x61, 0x73, 0x65, 0x12, 0x37, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65,
- 0x2e, 0x73, 0x70, 0x61, 0x6e, 0x6e, 0x65, 0x72, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x64,
- 0x61, 0x74, 0x61, 0x62, 0x61, 0x73, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x43, 0x72, 0x65, 0x61, 0x74,
- 0x65, 0x44, 0x61, 0x74, 0x61, 0x62, 0x61, 0x73, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74,
- 0x1a, 0x1d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6c, 0x6f, 0x6e, 0x67, 0x72, 0x75,
- 0x6e, 0x6e, 0x69, 0x6e, 0x67, 0x2e, 0x4f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x22,
- 0xb9, 0x01, 0xca, 0x41, 0x64, 0x0a, 0x29, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x70,
- 0x61, 0x6e, 0x6e, 0x65, 0x72, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x64, 0x61, 0x74, 0x61,
- 0x62, 0x61, 0x73, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x44, 0x61, 0x74, 0x61, 0x62, 0x61, 0x73, 0x65,
- 0x12, 0x37, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x70, 0x61, 0x6e, 0x6e, 0x65, 0x72,
- 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x64, 0x61, 0x74, 0x61, 0x62, 0x61, 0x73, 0x65, 0x2e,
- 0x76, 0x31, 0x2e, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x44, 0x61, 0x74, 0x61, 0x62, 0x61, 0x73,
- 0x65, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0xda, 0x41, 0x17, 0x70, 0x61, 0x72, 0x65,
- 0x6e, 0x74, 0x2c, 0x63, 0x72, 0x65, 0x61, 0x74, 0x65, 0x5f, 0x73, 0x74, 0x61, 0x74, 0x65, 0x6d,
- 0x65, 0x6e, 0x74, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x32, 0x3a, 0x01, 0x2a, 0x22, 0x2d, 0x2f, 0x76,
- 0x31, 0x2f, 0x7b, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x3d, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63,
- 0x74, 0x73, 0x2f, 0x2a, 0x2f, 0x69, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x73, 0x2f, 0x2a,
- 0x7d, 0x2f, 0x64, 0x61, 0x74, 0x61, 0x62, 0x61, 0x73, 0x65, 0x73, 0x12, 0xad, 0x01, 0x0a, 0x0b,
- 0x47, 0x65, 0x74, 0x44, 0x61, 0x74, 0x61, 0x62, 0x61, 0x73, 0x65, 0x12, 0x34, 0x2e, 0x67, 0x6f,
- 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x70, 0x61, 0x6e, 0x6e, 0x65, 0x72, 0x2e, 0x61, 0x64, 0x6d,
- 0x69, 0x6e, 0x2e, 0x64, 0x61, 0x74, 0x61, 0x62, 0x61, 0x73, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x47,
- 0x65, 0x74, 0x44, 0x61, 0x74, 0x61, 0x62, 0x61, 0x73, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73,
- 0x74, 0x1a, 0x2a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x70, 0x61, 0x6e, 0x6e,
- 0x65, 0x72, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x64, 0x61, 0x74, 0x61, 0x62, 0x61, 0x73,
- 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x44, 0x61, 0x74, 0x61, 0x62, 0x61, 0x73, 0x65, 0x22, 0x3c, 0xda,
- 0x41, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x2f, 0x12, 0x2d, 0x2f, 0x76,
- 0x31, 0x2f, 0x7b, 0x6e, 0x61, 0x6d, 0x65, 0x3d, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73,
- 0x2f, 0x2a, 0x2f, 0x69, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x73, 0x2f, 0x2a, 0x2f, 0x64,
- 0x61, 0x74, 0x61, 0x62, 0x61, 0x73, 0x65, 0x73, 0x2f, 0x2a, 0x7d, 0x12, 0xef, 0x01, 0x0a, 0x0e,
- 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x44, 0x61, 0x74, 0x61, 0x62, 0x61, 0x73, 0x65, 0x12, 0x37,
- 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x70, 0x61, 0x6e, 0x6e, 0x65, 0x72, 0x2e,
- 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x64, 0x61, 0x74, 0x61, 0x62, 0x61, 0x73, 0x65, 0x2e, 0x76,
- 0x31, 0x2e, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x44, 0x61, 0x74, 0x61, 0x62, 0x61, 0x73, 0x65,
- 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65,
- 0x2e, 0x6c, 0x6f, 0x6e, 0x67, 0x72, 0x75, 0x6e, 0x6e, 0x69, 0x6e, 0x67, 0x2e, 0x4f, 0x70, 0x65,
- 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x22, 0x84, 0x01, 0xca, 0x41, 0x22, 0x0a, 0x08, 0x44, 0x61,
- 0x74, 0x61, 0x62, 0x61, 0x73, 0x65, 0x12, 0x16, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x44, 0x61,
- 0x74, 0x61, 0x62, 0x61, 0x73, 0x65, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0xda, 0x41,
- 0x14, 0x64, 0x61, 0x74, 0x61, 0x62, 0x61, 0x73, 0x65, 0x2c, 0x75, 0x70, 0x64, 0x61, 0x74, 0x65,
- 0x5f, 0x6d, 0x61, 0x73, 0x6b, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x42, 0x3a, 0x08, 0x64, 0x61, 0x74,
- 0x61, 0x62, 0x61, 0x73, 0x65, 0x32, 0x36, 0x2f, 0x76, 0x31, 0x2f, 0x7b, 0x64, 0x61, 0x74, 0x61,
- 0x62, 0x61, 0x73, 0x65, 0x2e, 0x6e, 0x61, 0x6d, 0x65, 0x3d, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63,
- 0x74, 0x73, 0x2f, 0x2a, 0x2f, 0x69, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x73, 0x2f, 0x2a,
- 0x2f, 0x64, 0x61, 0x74, 0x61, 0x62, 0x61, 0x73, 0x65, 0x73, 0x2f, 0x2a, 0x7d, 0x12, 0x9d, 0x02,
- 0x0a, 0x11, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x44, 0x61, 0x74, 0x61, 0x62, 0x61, 0x73, 0x65,
- 0x44, 0x64, 0x6c, 0x12, 0x3a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x70, 0x61,
- 0x6e, 0x6e, 0x65, 0x72, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x64, 0x61, 0x74, 0x61, 0x62,
- 0x61, 0x73, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x44, 0x61, 0x74,
- 0x61, 0x62, 0x61, 0x73, 0x65, 0x44, 0x64, 0x6c, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a,
- 0x1d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6c, 0x6f, 0x6e, 0x67, 0x72, 0x75, 0x6e,
- 0x6e, 0x69, 0x6e, 0x67, 0x2e, 0x4f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x22, 0xac,
- 0x01, 0xca, 0x41, 0x53, 0x0a, 0x15, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f,
- 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x12, 0x3a, 0x67, 0x6f, 0x6f,
- 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x70, 0x61, 0x6e, 0x6e, 0x65, 0x72, 0x2e, 0x61, 0x64, 0x6d, 0x69,
- 0x6e, 0x2e, 0x64, 0x61, 0x74, 0x61, 0x62, 0x61, 0x73, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x55, 0x70,
- 0x64, 0x61, 0x74, 0x65, 0x44, 0x61, 0x74, 0x61, 0x62, 0x61, 0x73, 0x65, 0x44, 0x64, 0x6c, 0x4d,
- 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0xda, 0x41, 0x13, 0x64, 0x61, 0x74, 0x61, 0x62, 0x61,
- 0x73, 0x65, 0x2c, 0x73, 0x74, 0x61, 0x74, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x73, 0x82, 0xd3, 0xe4,
- 0x93, 0x02, 0x3a, 0x3a, 0x01, 0x2a, 0x32, 0x35, 0x2f, 0x76, 0x31, 0x2f, 0x7b, 0x64, 0x61, 0x74,
- 0x61, 0x62, 0x61, 0x73, 0x65, 0x3d, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x2a,
- 0x2f, 0x69, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x73, 0x2f, 0x2a, 0x2f, 0x64, 0x61, 0x74,
- 0x61, 0x62, 0x61, 0x73, 0x65, 0x73, 0x2f, 0x2a, 0x7d, 0x2f, 0x64, 0x64, 0x6c, 0x12, 0xa3, 0x01,
- 0x0a, 0x0c, 0x44, 0x72, 0x6f, 0x70, 0x44, 0x61, 0x74, 0x61, 0x62, 0x61, 0x73, 0x65, 0x12, 0x35,
- 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x70, 0x61, 0x6e, 0x6e, 0x65, 0x72, 0x2e,
- 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x64, 0x61, 0x74, 0x61, 0x62, 0x61, 0x73, 0x65, 0x2e, 0x76,
- 0x31, 0x2e, 0x44, 0x72, 0x6f, 0x70, 0x44, 0x61, 0x74, 0x61, 0x62, 0x61, 0x73, 0x65, 0x52, 0x65,
- 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x16, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70,
- 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x22, 0x44, 0xda,
- 0x41, 0x08, 0x64, 0x61, 0x74, 0x61, 0x62, 0x61, 0x73, 0x65, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x33,
- 0x2a, 0x31, 0x2f, 0x76, 0x31, 0x2f, 0x7b, 0x64, 0x61, 0x74, 0x61, 0x62, 0x61, 0x73, 0x65, 0x3d,
- 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x2a, 0x2f, 0x69, 0x6e, 0x73, 0x74, 0x61,
- 0x6e, 0x63, 0x65, 0x73, 0x2f, 0x2a, 0x2f, 0x64, 0x61, 0x74, 0x61, 0x62, 0x61, 0x73, 0x65, 0x73,
- 0x2f, 0x2a, 0x7d, 0x12, 0xcd, 0x01, 0x0a, 0x0e, 0x47, 0x65, 0x74, 0x44, 0x61, 0x74, 0x61, 0x62,
- 0x61, 0x73, 0x65, 0x44, 0x64, 0x6c, 0x12, 0x37, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e,
- 0x73, 0x70, 0x61, 0x6e, 0x6e, 0x65, 0x72, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x64, 0x61,
- 0x74, 0x61, 0x62, 0x61, 0x73, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x47, 0x65, 0x74, 0x44, 0x61, 0x74,
- 0x61, 0x62, 0x61, 0x73, 0x65, 0x44, 0x64, 0x6c, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a,
- 0x38, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x70, 0x61, 0x6e, 0x6e, 0x65, 0x72,
- 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x64, 0x61, 0x74, 0x61, 0x62, 0x61, 0x73, 0x65, 0x2e,
- 0x76, 0x31, 0x2e, 0x47, 0x65, 0x74, 0x44, 0x61, 0x74, 0x61, 0x62, 0x61, 0x73, 0x65, 0x44, 0x64,
- 0x6c, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x48, 0xda, 0x41, 0x08, 0x64, 0x61,
- 0x74, 0x61, 0x62, 0x61, 0x73, 0x65, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x37, 0x12, 0x35, 0x2f, 0x76,
- 0x31, 0x2f, 0x7b, 0x64, 0x61, 0x74, 0x61, 0x62, 0x61, 0x73, 0x65, 0x3d, 0x70, 0x72, 0x6f, 0x6a,
- 0x65, 0x63, 0x74, 0x73, 0x2f, 0x2a, 0x2f, 0x69, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x73,
- 0x2f, 0x2a, 0x2f, 0x64, 0x61, 0x74, 0x61, 0x62, 0x61, 0x73, 0x65, 0x73, 0x2f, 0x2a, 0x7d, 0x2f,
- 0x64, 0x64, 0x6c, 0x12, 0xc2, 0x02, 0x0a, 0x0c, 0x53, 0x65, 0x74, 0x49, 0x61, 0x6d, 0x50, 0x6f,
- 0x6c, 0x69, 0x63, 0x79, 0x12, 0x22, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x69, 0x61,
- 0x6d, 0x2e, 0x76, 0x31, 0x2e, 0x53, 0x65, 0x74, 0x49, 0x61, 0x6d, 0x50, 0x6f, 0x6c, 0x69, 0x63,
- 0x79, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x15, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c,
- 0x65, 0x2e, 0x69, 0x61, 0x6d, 0x2e, 0x76, 0x31, 0x2e, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x22,
- 0xf6, 0x01, 0xda, 0x41, 0x0f, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x2c, 0x70, 0x6f,
- 0x6c, 0x69, 0x63, 0x79, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0xdd, 0x01, 0x3a, 0x01, 0x2a, 0x5a, 0x41,
- 0x3a, 0x01, 0x2a, 0x22, 0x3c, 0x2f, 0x76, 0x31, 0x2f, 0x7b, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72,
- 0x63, 0x65, 0x3d, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x2a, 0x2f, 0x69, 0x6e,
- 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x73, 0x2f, 0x2a, 0x2f, 0x62, 0x61, 0x63, 0x6b, 0x75, 0x70,
- 0x73, 0x2f, 0x2a, 0x7d, 0x3a, 0x73, 0x65, 0x74, 0x49, 0x61, 0x6d, 0x50, 0x6f, 0x6c, 0x69, 0x63,
- 0x79, 0x5a, 0x55, 0x3a, 0x01, 0x2a, 0x22, 0x50, 0x2f, 0x76, 0x31, 0x2f, 0x7b, 0x72, 0x65, 0x73,
- 0x6f, 0x75, 0x72, 0x63, 0x65, 0x3d, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x2a,
- 0x2f, 0x69, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x73, 0x2f, 0x2a, 0x2f, 0x64, 0x61, 0x74,
- 0x61, 0x62, 0x61, 0x73, 0x65, 0x73, 0x2f, 0x2a, 0x2f, 0x62, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x53,
- 0x63, 0x68, 0x65, 0x64, 0x75, 0x6c, 0x65, 0x73, 0x2f, 0x2a, 0x7d, 0x3a, 0x73, 0x65, 0x74, 0x49,
- 0x61, 0x6d, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x22, 0x3e, 0x2f, 0x76, 0x31, 0x2f, 0x7b, 0x72,
- 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x3d, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73,
- 0x2f, 0x2a, 0x2f, 0x69, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x73, 0x2f, 0x2a, 0x2f, 0x64,
- 0x61, 0x74, 0x61, 0x62, 0x61, 0x73, 0x65, 0x73, 0x2f, 0x2a, 0x7d, 0x3a, 0x73, 0x65, 0x74, 0x49,
- 0x61, 0x6d, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x12, 0xbb, 0x02, 0x0a, 0x0c, 0x47, 0x65, 0x74,
- 0x49, 0x61, 0x6d, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x12, 0x22, 0x2e, 0x67, 0x6f, 0x6f, 0x67,
- 0x6c, 0x65, 0x2e, 0x69, 0x61, 0x6d, 0x2e, 0x76, 0x31, 0x2e, 0x47, 0x65, 0x74, 0x49, 0x61, 0x6d,
- 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x15, 0x2e,
- 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x69, 0x61, 0x6d, 0x2e, 0x76, 0x31, 0x2e, 0x50, 0x6f,
- 0x6c, 0x69, 0x63, 0x79, 0x22, 0xef, 0x01, 0xda, 0x41, 0x08, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72,
- 0x63, 0x65, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0xdd, 0x01, 0x3a, 0x01, 0x2a, 0x5a, 0x41, 0x3a, 0x01,
- 0x2a, 0x22, 0x3c, 0x2f, 0x76, 0x31, 0x2f, 0x7b, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65,
- 0x3d, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x2a, 0x2f, 0x69, 0x6e, 0x73, 0x74,
- 0x61, 0x6e, 0x63, 0x65, 0x73, 0x2f, 0x2a, 0x2f, 0x62, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x73, 0x2f,
- 0x2a, 0x7d, 0x3a, 0x67, 0x65, 0x74, 0x49, 0x61, 0x6d, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x5a,
- 0x55, 0x3a, 0x01, 0x2a, 0x22, 0x50, 0x2f, 0x76, 0x31, 0x2f, 0x7b, 0x72, 0x65, 0x73, 0x6f, 0x75,
- 0x72, 0x63, 0x65, 0x3d, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x2a, 0x2f, 0x69,
- 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x73, 0x2f, 0x2a, 0x2f, 0x64, 0x61, 0x74, 0x61, 0x62,
- 0x61, 0x73, 0x65, 0x73, 0x2f, 0x2a, 0x2f, 0x62, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x53, 0x63, 0x68,
- 0x65, 0x64, 0x75, 0x6c, 0x65, 0x73, 0x2f, 0x2a, 0x7d, 0x3a, 0x67, 0x65, 0x74, 0x49, 0x61, 0x6d,
- 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x22, 0x3e, 0x2f, 0x76, 0x31, 0x2f, 0x7b, 0x72, 0x65, 0x73,
- 0x6f, 0x75, 0x72, 0x63, 0x65, 0x3d, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x2a,
- 0x2f, 0x69, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x73, 0x2f, 0x2a, 0x2f, 0x64, 0x61, 0x74,
- 0x61, 0x62, 0x61, 0x73, 0x65, 0x73, 0x2f, 0x2a, 0x7d, 0x3a, 0x67, 0x65, 0x74, 0x49, 0x61, 0x6d,
- 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x12, 0xd4, 0x03, 0x0a, 0x12, 0x54, 0x65, 0x73, 0x74, 0x49,
- 0x61, 0x6d, 0x50, 0x65, 0x72, 0x6d, 0x69, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x28, 0x2e,
- 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x69, 0x61, 0x6d, 0x2e, 0x76, 0x31, 0x2e, 0x54, 0x65,
- 0x73, 0x74, 0x49, 0x61, 0x6d, 0x50, 0x65, 0x72, 0x6d, 0x69, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x73,
- 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x29, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65,
- 0x2e, 0x69, 0x61, 0x6d, 0x2e, 0x76, 0x31, 0x2e, 0x54, 0x65, 0x73, 0x74, 0x49, 0x61, 0x6d, 0x50,
- 0x65, 0x72, 0x6d, 0x69, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e,
- 0x73, 0x65, 0x22, 0xe8, 0x02, 0xda, 0x41, 0x14, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65,
- 0x2c, 0x70, 0x65, 0x72, 0x6d, 0x69, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x82, 0xd3, 0xe4, 0x93,
- 0x02, 0xca, 0x02, 0x3a, 0x01, 0x2a, 0x5a, 0x47, 0x3a, 0x01, 0x2a, 0x22, 0x42, 0x2f, 0x76, 0x31,
- 0x2f, 0x7b, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x3d, 0x70, 0x72, 0x6f, 0x6a, 0x65,
- 0x63, 0x74, 0x73, 0x2f, 0x2a, 0x2f, 0x69, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x73, 0x2f,
- 0x2a, 0x2f, 0x62, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x73, 0x2f, 0x2a, 0x7d, 0x3a, 0x74, 0x65, 0x73,
- 0x74, 0x49, 0x61, 0x6d, 0x50, 0x65, 0x72, 0x6d, 0x69, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x5a,
- 0x5b, 0x3a, 0x01, 0x2a, 0x22, 0x56, 0x2f, 0x76, 0x31, 0x2f, 0x7b, 0x72, 0x65, 0x73, 0x6f, 0x75,
- 0x72, 0x63, 0x65, 0x3d, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x2a, 0x2f, 0x69,
- 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x73, 0x2f, 0x2a, 0x2f, 0x64, 0x61, 0x74, 0x61, 0x62,
- 0x61, 0x73, 0x65, 0x73, 0x2f, 0x2a, 0x2f, 0x62, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x53, 0x63, 0x68,
- 0x65, 0x64, 0x75, 0x6c, 0x65, 0x73, 0x2f, 0x2a, 0x7d, 0x3a, 0x74, 0x65, 0x73, 0x74, 0x49, 0x61,
- 0x6d, 0x50, 0x65, 0x72, 0x6d, 0x69, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x5a, 0x59, 0x3a, 0x01,
- 0x2a, 0x22, 0x54, 0x2f, 0x76, 0x31, 0x2f, 0x7b, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65,
- 0x3d, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x2a, 0x2f, 0x69, 0x6e, 0x73, 0x74,
- 0x61, 0x6e, 0x63, 0x65, 0x73, 0x2f, 0x2a, 0x2f, 0x64, 0x61, 0x74, 0x61, 0x62, 0x61, 0x73, 0x65,
- 0x73, 0x2f, 0x2a, 0x2f, 0x64, 0x61, 0x74, 0x61, 0x62, 0x61, 0x73, 0x65, 0x52, 0x6f, 0x6c, 0x65,
- 0x73, 0x2f, 0x2a, 0x7d, 0x3a, 0x74, 0x65, 0x73, 0x74, 0x49, 0x61, 0x6d, 0x50, 0x65, 0x72, 0x6d,
- 0x69, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x22, 0x44, 0x2f, 0x76, 0x31, 0x2f, 0x7b, 0x72, 0x65,
- 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x3d, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f,
- 0x2a, 0x2f, 0x69, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x73, 0x2f, 0x2a, 0x2f, 0x64, 0x61,
- 0x74, 0x61, 0x62, 0x61, 0x73, 0x65, 0x73, 0x2f, 0x2a, 0x7d, 0x3a, 0x74, 0x65, 0x73, 0x74, 0x49,
- 0x61, 0x6d, 0x50, 0x65, 0x72, 0x6d, 0x69, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x9f, 0x02,
- 0x0a, 0x0c, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x42, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x12, 0x35,
- 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x70, 0x61, 0x6e, 0x6e, 0x65, 0x72, 0x2e,
- 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x64, 0x61, 0x74, 0x61, 0x62, 0x61, 0x73, 0x65, 0x2e, 0x76,
- 0x31, 0x2e, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x42, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x52, 0x65,
- 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6c,
- 0x6f, 0x6e, 0x67, 0x72, 0x75, 0x6e, 0x6e, 0x69, 0x6e, 0x67, 0x2e, 0x4f, 0x70, 0x65, 0x72, 0x61,
- 0x74, 0x69, 0x6f, 0x6e, 0x22, 0xb8, 0x01, 0xca, 0x41, 0x60, 0x0a, 0x27, 0x67, 0x6f, 0x6f, 0x67,
- 0x6c, 0x65, 0x2e, 0x73, 0x70, 0x61, 0x6e, 0x6e, 0x65, 0x72, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e,
- 0x2e, 0x64, 0x61, 0x74, 0x61, 0x62, 0x61, 0x73, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x42, 0x61, 0x63,
- 0x6b, 0x75, 0x70, 0x12, 0x35, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x70, 0x61, 0x6e,
- 0x6e, 0x65, 0x72, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x64, 0x61, 0x74, 0x61, 0x62, 0x61,
- 0x73, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x42, 0x61, 0x63, 0x6b,
- 0x75, 0x70, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0xda, 0x41, 0x17, 0x70, 0x61, 0x72,
- 0x65, 0x6e, 0x74, 0x2c, 0x62, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x2c, 0x62, 0x61, 0x63, 0x6b, 0x75,
- 0x70, 0x5f, 0x69, 0x64, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x35, 0x3a, 0x06, 0x62, 0x61, 0x63, 0x6b,
- 0x75, 0x70, 0x22, 0x2b, 0x2f, 0x76, 0x31, 0x2f, 0x7b, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x3d,
- 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x2a, 0x2f, 0x69, 0x6e, 0x73, 0x74, 0x61,
- 0x6e, 0x63, 0x65, 0x73, 0x2f, 0x2a, 0x7d, 0x2f, 0x62, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x73, 0x12,
- 0xac, 0x02, 0x0a, 0x0a, 0x43, 0x6f, 0x70, 0x79, 0x42, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x12, 0x33,
- 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x70, 0x61, 0x6e, 0x6e, 0x65, 0x72, 0x2e,
- 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x64, 0x61, 0x74, 0x61, 0x62, 0x61, 0x73, 0x65, 0x2e, 0x76,
- 0x31, 0x2e, 0x43, 0x6f, 0x70, 0x79, 0x42, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x52, 0x65, 0x71, 0x75,
- 0x65, 0x73, 0x74, 0x1a, 0x1d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6c, 0x6f, 0x6e,
- 0x67, 0x72, 0x75, 0x6e, 0x6e, 0x69, 0x6e, 0x67, 0x2e, 0x4f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x69,
- 0x6f, 0x6e, 0x22, 0xc9, 0x01, 0xca, 0x41, 0x5e, 0x0a, 0x27, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65,
- 0x2e, 0x73, 0x70, 0x61, 0x6e, 0x6e, 0x65, 0x72, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x64,
- 0x61, 0x74, 0x61, 0x62, 0x61, 0x73, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x42, 0x61, 0x63, 0x6b, 0x75,
- 0x70, 0x12, 0x33, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x70, 0x61, 0x6e, 0x6e, 0x65,
- 0x72, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x64, 0x61, 0x74, 0x61, 0x62, 0x61, 0x73, 0x65,
- 0x2e, 0x76, 0x31, 0x2e, 0x43, 0x6f, 0x70, 0x79, 0x42, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x4d, 0x65,
- 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0xda, 0x41, 0x2a, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x2c,
- 0x62, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x5f, 0x69, 0x64, 0x2c, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65,
- 0x5f, 0x62, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x2c, 0x65, 0x78, 0x70, 0x69, 0x72, 0x65, 0x5f, 0x74,
- 0x69, 0x6d, 0x65, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x35, 0x3a, 0x01, 0x2a, 0x22, 0x30, 0x2f, 0x76,
- 0x31, 0x2f, 0x7b, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x3d, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63,
- 0x74, 0x73, 0x2f, 0x2a, 0x2f, 0x69, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x73, 0x2f, 0x2a,
- 0x7d, 0x2f, 0x62, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x73, 0x3a, 0x63, 0x6f, 0x70, 0x79, 0x12, 0xa5,
- 0x01, 0x0a, 0x09, 0x47, 0x65, 0x74, 0x42, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x12, 0x32, 0x2e, 0x67,
- 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x70, 0x61, 0x6e, 0x6e, 0x65, 0x72, 0x2e, 0x61, 0x64,
- 0x6d, 0x69, 0x6e, 0x2e, 0x64, 0x61, 0x74, 0x61, 0x62, 0x61, 0x73, 0x65, 0x2e, 0x76, 0x31, 0x2e,
- 0x47, 0x65, 0x74, 0x42, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74,
- 0x1a, 0x28, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x70, 0x61, 0x6e, 0x6e, 0x65,
- 0x72, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x64, 0x61, 0x74, 0x61, 0x62, 0x61, 0x73, 0x65,
- 0x2e, 0x76, 0x31, 0x2e, 0x42, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x22, 0x3a, 0xda, 0x41, 0x04, 0x6e,
- 0x61, 0x6d, 0x65, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x2d, 0x12, 0x2b, 0x2f, 0x76, 0x31, 0x2f, 0x7b,
- 0x6e, 0x61, 0x6d, 0x65, 0x3d, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x2a, 0x2f,
- 0x69, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x73, 0x2f, 0x2a, 0x2f, 0x62, 0x61, 0x63, 0x6b,
- 0x75, 0x70, 0x73, 0x2f, 0x2a, 0x7d, 0x12, 0xc8, 0x01, 0x0a, 0x0c, 0x55, 0x70, 0x64, 0x61, 0x74,
- 0x65, 0x42, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x12, 0x35, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65,
- 0x2e, 0x73, 0x70, 0x61, 0x6e, 0x6e, 0x65, 0x72, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x64,
- 0x61, 0x74, 0x61, 0x62, 0x61, 0x73, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x55, 0x70, 0x64, 0x61, 0x74,
- 0x65, 0x42, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x28,
- 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x70, 0x61, 0x6e, 0x6e, 0x65, 0x72, 0x2e,
- 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x64, 0x61, 0x74, 0x61, 0x62, 0x61, 0x73, 0x65, 0x2e, 0x76,
- 0x31, 0x2e, 0x42, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x22, 0x57, 0xda, 0x41, 0x12, 0x62, 0x61, 0x63,
- 0x6b, 0x75, 0x70, 0x2c, 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, 0x5f, 0x6d, 0x61, 0x73, 0x6b, 0x82,
- 0xd3, 0xe4, 0x93, 0x02, 0x3c, 0x3a, 0x06, 0x62, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x32, 0x32, 0x2f,
- 0x76, 0x31, 0x2f, 0x7b, 0x62, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x2e, 0x6e, 0x61, 0x6d, 0x65, 0x3d,
- 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x2a, 0x2f, 0x69, 0x6e, 0x73, 0x74, 0x61,
- 0x6e, 0x63, 0x65, 0x73, 0x2f, 0x2a, 0x2f, 0x62, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x73, 0x2f, 0x2a,
- 0x7d, 0x12, 0x99, 0x01, 0x0a, 0x0c, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x42, 0x61, 0x63, 0x6b,
- 0x75, 0x70, 0x12, 0x35, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x70, 0x61, 0x6e,
- 0x6e, 0x65, 0x72, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x64, 0x61, 0x74, 0x61, 0x62, 0x61,
- 0x73, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x42, 0x61, 0x63, 0x6b,
- 0x75, 0x70, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x16, 0x2e, 0x67, 0x6f, 0x6f, 0x67,
- 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6d, 0x70, 0x74,
- 0x79, 0x22, 0x3a, 0xda, 0x41, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x2d,
- 0x2a, 0x2b, 0x2f, 0x76, 0x31, 0x2f, 0x7b, 0x6e, 0x61, 0x6d, 0x65, 0x3d, 0x70, 0x72, 0x6f, 0x6a,
- 0x65, 0x63, 0x74, 0x73, 0x2f, 0x2a, 0x2f, 0x69, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x73,
- 0x2f, 0x2a, 0x2f, 0x62, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x73, 0x2f, 0x2a, 0x7d, 0x12, 0xb8, 0x01,
- 0x0a, 0x0b, 0x4c, 0x69, 0x73, 0x74, 0x42, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x73, 0x12, 0x34, 0x2e,
- 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x70, 0x61, 0x6e, 0x6e, 0x65, 0x72, 0x2e, 0x61,
- 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x64, 0x61, 0x74, 0x61, 0x62, 0x61, 0x73, 0x65, 0x2e, 0x76, 0x31,
- 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x42, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x73, 0x52, 0x65, 0x71, 0x75,
- 0x65, 0x73, 0x74, 0x1a, 0x35, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x70, 0x61,
- 0x6e, 0x6e, 0x65, 0x72, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x64, 0x61, 0x74, 0x61, 0x62,
- 0x61, 0x73, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x42, 0x61, 0x63, 0x6b, 0x75,
- 0x70, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x3c, 0xda, 0x41, 0x06, 0x70,
- 0x61, 0x72, 0x65, 0x6e, 0x74, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x2d, 0x12, 0x2b, 0x2f, 0x76, 0x31,
- 0x2f, 0x7b, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x3d, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74,
- 0x73, 0x2f, 0x2a, 0x2f, 0x69, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x73, 0x2f, 0x2a, 0x7d,
- 0x2f, 0x62, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x73, 0x12, 0xb1, 0x02, 0x0a, 0x0f, 0x52, 0x65, 0x73,
- 0x74, 0x6f, 0x72, 0x65, 0x44, 0x61, 0x74, 0x61, 0x62, 0x61, 0x73, 0x65, 0x12, 0x38, 0x2e, 0x67,
- 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x70, 0x61, 0x6e, 0x6e, 0x65, 0x72, 0x2e, 0x61, 0x64,
- 0x6d, 0x69, 0x6e, 0x2e, 0x64, 0x61, 0x74, 0x61, 0x62, 0x61, 0x73, 0x65, 0x2e, 0x76, 0x31, 0x2e,
- 0x52, 0x65, 0x73, 0x74, 0x6f, 0x72, 0x65, 0x44, 0x61, 0x74, 0x61, 0x62, 0x61, 0x73, 0x65, 0x52,
- 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e,
- 0x6c, 0x6f, 0x6e, 0x67, 0x72, 0x75, 0x6e, 0x6e, 0x69, 0x6e, 0x67, 0x2e, 0x4f, 0x70, 0x65, 0x72,
- 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x22, 0xc4, 0x01, 0xca, 0x41, 0x65, 0x0a, 0x29, 0x67, 0x6f, 0x6f,
- 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x70, 0x61, 0x6e, 0x6e, 0x65, 0x72, 0x2e, 0x61, 0x64, 0x6d, 0x69,
- 0x6e, 0x2e, 0x64, 0x61, 0x74, 0x61, 0x62, 0x61, 0x73, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x44, 0x61,
- 0x74, 0x61, 0x62, 0x61, 0x73, 0x65, 0x12, 0x38, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73,
- 0x70, 0x61, 0x6e, 0x6e, 0x65, 0x72, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x64, 0x61, 0x74,
- 0x61, 0x62, 0x61, 0x73, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x52, 0x65, 0x73, 0x74, 0x6f, 0x72, 0x65,
- 0x44, 0x61, 0x74, 0x61, 0x62, 0x61, 0x73, 0x65, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61,
- 0xda, 0x41, 0x19, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x2c, 0x64, 0x61, 0x74, 0x61, 0x62, 0x61,
- 0x73, 0x65, 0x5f, 0x69, 0x64, 0x2c, 0x62, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x82, 0xd3, 0xe4, 0x93,
- 0x02, 0x3a, 0x3a, 0x01, 0x2a, 0x22, 0x35, 0x2f, 0x76, 0x31, 0x2f, 0x7b, 0x70, 0x61, 0x72, 0x65,
- 0x6e, 0x74, 0x3d, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x2a, 0x2f, 0x69, 0x6e,
- 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x73, 0x2f, 0x2a, 0x7d, 0x2f, 0x64, 0x61, 0x74, 0x61, 0x62,
- 0x61, 0x73, 0x65, 0x73, 0x3a, 0x72, 0x65, 0x73, 0x74, 0x6f, 0x72, 0x65, 0x12, 0xe4, 0x01, 0x0a,
- 0x16, 0x4c, 0x69, 0x73, 0x74, 0x44, 0x61, 0x74, 0x61, 0x62, 0x61, 0x73, 0x65, 0x4f, 0x70, 0x65,
- 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x3f, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65,
- 0x2e, 0x73, 0x70, 0x61, 0x6e, 0x6e, 0x65, 0x72, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x64,
- 0x61, 0x74, 0x61, 0x62, 0x61, 0x73, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x44,
- 0x61, 0x74, 0x61, 0x62, 0x61, 0x73, 0x65, 0x4f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e,
- 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x40, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c,
- 0x65, 0x2e, 0x73, 0x70, 0x61, 0x6e, 0x6e, 0x65, 0x72, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e,
- 0x64, 0x61, 0x74, 0x61, 0x62, 0x61, 0x73, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x4c, 0x69, 0x73, 0x74,
- 0x44, 0x61, 0x74, 0x61, 0x62, 0x61, 0x73, 0x65, 0x4f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f,
- 0x6e, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x47, 0xda, 0x41, 0x06, 0x70,
- 0x61, 0x72, 0x65, 0x6e, 0x74, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x38, 0x12, 0x36, 0x2f, 0x76, 0x31,
- 0x2f, 0x7b, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x3d, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74,
- 0x73, 0x2f, 0x2a, 0x2f, 0x69, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x73, 0x2f, 0x2a, 0x7d,
- 0x2f, 0x64, 0x61, 0x74, 0x61, 0x62, 0x61, 0x73, 0x65, 0x4f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x69,
- 0x6f, 0x6e, 0x73, 0x12, 0xdc, 0x01, 0x0a, 0x14, 0x4c, 0x69, 0x73, 0x74, 0x42, 0x61, 0x63, 0x6b,
- 0x75, 0x70, 0x4f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x3d, 0x2e, 0x67,
- 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x70, 0x61, 0x6e, 0x6e, 0x65, 0x72, 0x2e, 0x61, 0x64,
- 0x6d, 0x69, 0x6e, 0x2e, 0x64, 0x61, 0x74, 0x61, 0x62, 0x61, 0x73, 0x65, 0x2e, 0x76, 0x31, 0x2e,
- 0x4c, 0x69, 0x73, 0x74, 0x42, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x4f, 0x70, 0x65, 0x72, 0x61, 0x74,
- 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x3e, 0x2e, 0x67, 0x6f,
- 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x70, 0x61, 0x6e, 0x6e, 0x65, 0x72, 0x2e, 0x61, 0x64, 0x6d,
- 0x69, 0x6e, 0x2e, 0x64, 0x61, 0x74, 0x61, 0x62, 0x61, 0x73, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x4c,
- 0x69, 0x73, 0x74, 0x42, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x4f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x69,
- 0x6f, 0x6e, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x45, 0xda, 0x41, 0x06,
- 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x36, 0x12, 0x34, 0x2f, 0x76,
- 0x31, 0x2f, 0x7b, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x3d, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63,
- 0x74, 0x73, 0x2f, 0x2a, 0x2f, 0x69, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x73, 0x2f, 0x2a,
- 0x7d, 0x2f, 0x62, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x4f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f,
- 0x6e, 0x73, 0x12, 0xdc, 0x01, 0x0a, 0x11, 0x4c, 0x69, 0x73, 0x74, 0x44, 0x61, 0x74, 0x61, 0x62,
- 0x61, 0x73, 0x65, 0x52, 0x6f, 0x6c, 0x65, 0x73, 0x12, 0x3a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c,
- 0x65, 0x2e, 0x73, 0x70, 0x61, 0x6e, 0x6e, 0x65, 0x72, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e,
- 0x64, 0x61, 0x74, 0x61, 0x62, 0x61, 0x73, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x4c, 0x69, 0x73, 0x74,
- 0x44, 0x61, 0x74, 0x61, 0x62, 0x61, 0x73, 0x65, 0x52, 0x6f, 0x6c, 0x65, 0x73, 0x52, 0x65, 0x71,
- 0x75, 0x65, 0x73, 0x74, 0x1a, 0x3b, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x70,
- 0x61, 0x6e, 0x6e, 0x65, 0x72, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x64, 0x61, 0x74, 0x61,
- 0x62, 0x61, 0x73, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x44, 0x61, 0x74, 0x61,
- 0x62, 0x61, 0x73, 0x65, 0x52, 0x6f, 0x6c, 0x65, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73,
- 0x65, 0x22, 0x4e, 0xda, 0x41, 0x06, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x82, 0xd3, 0xe4, 0x93,
- 0x02, 0x3f, 0x12, 0x3d, 0x2f, 0x76, 0x31, 0x2f, 0x7b, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x3d,
- 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x2a, 0x2f, 0x69, 0x6e, 0x73, 0x74, 0x61,
- 0x6e, 0x63, 0x65, 0x73, 0x2f, 0x2a, 0x2f, 0x64, 0x61, 0x74, 0x61, 0x62, 0x61, 0x73, 0x65, 0x73,
- 0x2f, 0x2a, 0x7d, 0x2f, 0x64, 0x61, 0x74, 0x61, 0x62, 0x61, 0x73, 0x65, 0x52, 0x6f, 0x6c, 0x65,
- 0x73, 0x12, 0x8e, 0x02, 0x0a, 0x14, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x42, 0x61, 0x63, 0x6b,
- 0x75, 0x70, 0x53, 0x63, 0x68, 0x65, 0x64, 0x75, 0x6c, 0x65, 0x12, 0x3d, 0x2e, 0x67, 0x6f, 0x6f,
- 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x70, 0x61, 0x6e, 0x6e, 0x65, 0x72, 0x2e, 0x61, 0x64, 0x6d, 0x69,
- 0x6e, 0x2e, 0x64, 0x61, 0x74, 0x61, 0x62, 0x61, 0x73, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x43, 0x72,
- 0x65, 0x61, 0x74, 0x65, 0x42, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x53, 0x63, 0x68, 0x65, 0x64, 0x75,
- 0x6c, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x30, 0x2e, 0x67, 0x6f, 0x6f, 0x67,
- 0x6c, 0x65, 0x2e, 0x73, 0x70, 0x61, 0x6e, 0x6e, 0x65, 0x72, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e,
- 0x2e, 0x64, 0x61, 0x74, 0x61, 0x62, 0x61, 0x73, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x42, 0x61, 0x63,
- 0x6b, 0x75, 0x70, 0x53, 0x63, 0x68, 0x65, 0x64, 0x75, 0x6c, 0x65, 0x22, 0x84, 0x01, 0xda, 0x41,
- 0x29, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x2c, 0x62, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x5f, 0x73,
- 0x63, 0x68, 0x65, 0x64, 0x75, 0x6c, 0x65, 0x2c, 0x62, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x5f, 0x73,
- 0x63, 0x68, 0x65, 0x64, 0x75, 0x6c, 0x65, 0x5f, 0x69, 0x64, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x52,
- 0x3a, 0x0f, 0x62, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x5f, 0x73, 0x63, 0x68, 0x65, 0x64, 0x75, 0x6c,
- 0x65, 0x22, 0x3f, 0x2f, 0x76, 0x31, 0x2f, 0x7b, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x3d, 0x70,
- 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x2a, 0x2f, 0x69, 0x6e, 0x73, 0x74, 0x61, 0x6e,
- 0x63, 0x65, 0x73, 0x2f, 0x2a, 0x2f, 0x64, 0x61, 0x74, 0x61, 0x62, 0x61, 0x73, 0x65, 0x73, 0x2f,
- 0x2a, 0x7d, 0x2f, 0x62, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x53, 0x63, 0x68, 0x65, 0x64, 0x75, 0x6c,
- 0x65, 0x73, 0x12, 0xd1, 0x01, 0x0a, 0x11, 0x47, 0x65, 0x74, 0x42, 0x61, 0x63, 0x6b, 0x75, 0x70,
- 0x53, 0x63, 0x68, 0x65, 0x64, 0x75, 0x6c, 0x65, 0x12, 0x3a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c,
- 0x65, 0x2e, 0x73, 0x70, 0x61, 0x6e, 0x6e, 0x65, 0x72, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e,
- 0x64, 0x61, 0x74, 0x61, 0x62, 0x61, 0x73, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x47, 0x65, 0x74, 0x42,
- 0x61, 0x63, 0x6b, 0x75, 0x70, 0x53, 0x63, 0x68, 0x65, 0x64, 0x75, 0x6c, 0x65, 0x52, 0x65, 0x71,
- 0x75, 0x65, 0x73, 0x74, 0x1a, 0x30, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x70,
- 0x61, 0x6e, 0x6e, 0x65, 0x72, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x64, 0x61, 0x74, 0x61,
- 0x62, 0x61, 0x73, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x42, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x53, 0x63,
- 0x68, 0x65, 0x64, 0x75, 0x6c, 0x65, 0x22, 0x4e, 0xda, 0x41, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x82,
- 0xd3, 0xe4, 0x93, 0x02, 0x41, 0x12, 0x3f, 0x2f, 0x76, 0x31, 0x2f, 0x7b, 0x6e, 0x61, 0x6d, 0x65,
- 0x3d, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x2a, 0x2f, 0x69, 0x6e, 0x73, 0x74,
- 0x61, 0x6e, 0x63, 0x65, 0x73, 0x2f, 0x2a, 0x2f, 0x64, 0x61, 0x74, 0x61, 0x62, 0x61, 0x73, 0x65,
- 0x73, 0x2f, 0x2a, 0x2f, 0x62, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x53, 0x63, 0x68, 0x65, 0x64, 0x75,
- 0x6c, 0x65, 0x73, 0x2f, 0x2a, 0x7d, 0x12, 0x90, 0x02, 0x0a, 0x14, 0x55, 0x70, 0x64, 0x61, 0x74,
- 0x65, 0x42, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x53, 0x63, 0x68, 0x65, 0x64, 0x75, 0x6c, 0x65, 0x12,
- 0x3d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x70, 0x61, 0x6e, 0x6e, 0x65, 0x72,
- 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x64, 0x61, 0x74, 0x61, 0x62, 0x61, 0x73, 0x65, 0x2e,
- 0x76, 0x31, 0x2e, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x42, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x53,
- 0x63, 0x68, 0x65, 0x64, 0x75, 0x6c, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x30,
- 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x70, 0x61, 0x6e, 0x6e, 0x65, 0x72, 0x2e,
- 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x64, 0x61, 0x74, 0x61, 0x62, 0x61, 0x73, 0x65, 0x2e, 0x76,
- 0x31, 0x2e, 0x42, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x53, 0x63, 0x68, 0x65, 0x64, 0x75, 0x6c, 0x65,
- 0x22, 0x86, 0x01, 0xda, 0x41, 0x1b, 0x62, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x5f, 0x73, 0x63, 0x68,
- 0x65, 0x64, 0x75, 0x6c, 0x65, 0x2c, 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, 0x5f, 0x6d, 0x61, 0x73,
- 0x6b, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x62, 0x3a, 0x0f, 0x62, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x5f,
- 0x73, 0x63, 0x68, 0x65, 0x64, 0x75, 0x6c, 0x65, 0x32, 0x4f, 0x2f, 0x76, 0x31, 0x2f, 0x7b, 0x62,
- 0x61, 0x63, 0x6b, 0x75, 0x70, 0x5f, 0x73, 0x63, 0x68, 0x65, 0x64, 0x75, 0x6c, 0x65, 0x2e, 0x6e,
- 0x61, 0x6d, 0x65, 0x3d, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x2a, 0x2f, 0x69,
- 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x73, 0x2f, 0x2a, 0x2f, 0x64, 0x61, 0x74, 0x61, 0x62,
- 0x61, 0x73, 0x65, 0x73, 0x2f, 0x2a, 0x2f, 0x62, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x53, 0x63, 0x68,
- 0x65, 0x64, 0x75, 0x6c, 0x65, 0x73, 0x2f, 0x2a, 0x7d, 0x12, 0xbd, 0x01, 0x0a, 0x14, 0x44, 0x65,
- 0x6c, 0x65, 0x74, 0x65, 0x42, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x53, 0x63, 0x68, 0x65, 0x64, 0x75,
- 0x6c, 0x65, 0x12, 0x3d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x70, 0x61, 0x6e,
- 0x6e, 0x65, 0x72, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x64, 0x61, 0x74, 0x61, 0x62, 0x61,
- 0x73, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x42, 0x61, 0x63, 0x6b,
- 0x75, 0x70, 0x53, 0x63, 0x68, 0x65, 0x64, 0x75, 0x6c, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73,
- 0x74, 0x1a, 0x16, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f,
- 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x22, 0x4e, 0xda, 0x41, 0x04, 0x6e, 0x61,
- 0x6d, 0x65, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x41, 0x2a, 0x3f, 0x2f, 0x76, 0x31, 0x2f, 0x7b, 0x6e,
- 0x61, 0x6d, 0x65, 0x3d, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x2a, 0x2f, 0x69,
- 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x73, 0x2f, 0x2a, 0x2f, 0x64, 0x61, 0x74, 0x61, 0x62,
- 0x61, 0x73, 0x65, 0x73, 0x2f, 0x2a, 0x2f, 0x62, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x53, 0x63, 0x68,
- 0x65, 0x64, 0x75, 0x6c, 0x65, 0x73, 0x2f, 0x2a, 0x7d, 0x12, 0xe4, 0x01, 0x0a, 0x13, 0x4c, 0x69,
- 0x73, 0x74, 0x42, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x53, 0x63, 0x68, 0x65, 0x64, 0x75, 0x6c, 0x65,
- 0x73, 0x12, 0x3c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x70, 0x61, 0x6e, 0x6e,
- 0x65, 0x72, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x64, 0x61, 0x74, 0x61, 0x62, 0x61, 0x73,
- 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x42, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x53,
- 0x63, 0x68, 0x65, 0x64, 0x75, 0x6c, 0x65, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a,
- 0x3d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x70, 0x61, 0x6e, 0x6e, 0x65, 0x72,
- 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x64, 0x61, 0x74, 0x61, 0x62, 0x61, 0x73, 0x65, 0x2e,
- 0x76, 0x31, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x42, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x53, 0x63, 0x68,
- 0x65, 0x64, 0x75, 0x6c, 0x65, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x50,
- 0xda, 0x41, 0x06, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x41, 0x12,
- 0x3f, 0x2f, 0x76, 0x31, 0x2f, 0x7b, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x3d, 0x70, 0x72, 0x6f,
- 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x2a, 0x2f, 0x69, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65,
- 0x73, 0x2f, 0x2a, 0x2f, 0x64, 0x61, 0x74, 0x61, 0x62, 0x61, 0x73, 0x65, 0x73, 0x2f, 0x2a, 0x7d,
- 0x2f, 0x62, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x53, 0x63, 0x68, 0x65, 0x64, 0x75, 0x6c, 0x65, 0x73,
- 0x1a, 0x78, 0xca, 0x41, 0x16, 0x73, 0x70, 0x61, 0x6e, 0x6e, 0x65, 0x72, 0x2e, 0x67, 0x6f, 0x6f,
- 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0xd2, 0x41, 0x5c, 0x68, 0x74,
- 0x74, 0x70, 0x73, 0x3a, 0x2f, 0x2f, 0x77, 0x77, 0x77, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65,
- 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x61, 0x75, 0x74, 0x68, 0x2f, 0x63, 0x6c,
- 0x6f, 0x75, 0x64, 0x2d, 0x70, 0x6c, 0x61, 0x74, 0x66, 0x6f, 0x72, 0x6d, 0x2c, 0x68, 0x74, 0x74,
- 0x70, 0x73, 0x3a, 0x2f, 0x2f, 0x77, 0x77, 0x77, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61,
- 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x61, 0x75, 0x74, 0x68, 0x2f, 0x73, 0x70, 0x61,
- 0x6e, 0x6e, 0x65, 0x72, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x42, 0xd8, 0x02, 0xea, 0x41, 0x4a,
- 0x0a, 0x1f, 0x73, 0x70, 0x61, 0x6e, 0x6e, 0x65, 0x72, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65,
- 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x49, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63,
- 0x65, 0x12, 0x27, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x7b, 0x70, 0x72, 0x6f,
- 0x6a, 0x65, 0x63, 0x74, 0x7d, 0x2f, 0x69, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x73, 0x2f,
- 0x7b, 0x69, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x7d, 0x0a, 0x24, 0x63, 0x6f, 0x6d, 0x2e,
- 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x70, 0x61, 0x6e, 0x6e, 0x65, 0x72, 0x2e, 0x61,
- 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x64, 0x61, 0x74, 0x61, 0x62, 0x61, 0x73, 0x65, 0x2e, 0x76, 0x31,
- 0x42, 0x19, 0x53, 0x70, 0x61, 0x6e, 0x6e, 0x65, 0x72, 0x44, 0x61, 0x74, 0x61, 0x62, 0x61, 0x73,
- 0x65, 0x41, 0x64, 0x6d, 0x69, 0x6e, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x46, 0x63,
- 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x63, 0x6f, 0x6d, 0x2f,
- 0x67, 0x6f, 0x2f, 0x73, 0x70, 0x61, 0x6e, 0x6e, 0x65, 0x72, 0x2f, 0x61, 0x64, 0x6d, 0x69, 0x6e,
- 0x2f, 0x64, 0x61, 0x74, 0x61, 0x62, 0x61, 0x73, 0x65, 0x2f, 0x61, 0x70, 0x69, 0x76, 0x31, 0x2f,
- 0x64, 0x61, 0x74, 0x61, 0x62, 0x61, 0x73, 0x65, 0x70, 0x62, 0x3b, 0x64, 0x61, 0x74, 0x61, 0x62,
- 0x61, 0x73, 0x65, 0x70, 0x62, 0xaa, 0x02, 0x26, 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x43,
- 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x53, 0x70, 0x61, 0x6e, 0x6e, 0x65, 0x72, 0x2e, 0x41, 0x64, 0x6d,
- 0x69, 0x6e, 0x2e, 0x44, 0x61, 0x74, 0x61, 0x62, 0x61, 0x73, 0x65, 0x2e, 0x56, 0x31, 0xca, 0x02,
- 0x26, 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x5c, 0x43, 0x6c, 0x6f, 0x75, 0x64, 0x5c, 0x53, 0x70,
- 0x61, 0x6e, 0x6e, 0x65, 0x72, 0x5c, 0x41, 0x64, 0x6d, 0x69, 0x6e, 0x5c, 0x44, 0x61, 0x74, 0x61,
- 0x62, 0x61, 0x73, 0x65, 0x5c, 0x56, 0x31, 0xea, 0x02, 0x2b, 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65,
- 0x3a, 0x3a, 0x43, 0x6c, 0x6f, 0x75, 0x64, 0x3a, 0x3a, 0x53, 0x70, 0x61, 0x6e, 0x6e, 0x65, 0x72,
- 0x3a, 0x3a, 0x41, 0x64, 0x6d, 0x69, 0x6e, 0x3a, 0x3a, 0x44, 0x61, 0x74, 0x61, 0x62, 0x61, 0x73,
- 0x65, 0x3a, 0x3a, 0x56, 0x31, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33,
-}
-
-var (
- file_google_spanner_admin_database_v1_spanner_database_admin_proto_rawDescOnce sync.Once
- file_google_spanner_admin_database_v1_spanner_database_admin_proto_rawDescData = file_google_spanner_admin_database_v1_spanner_database_admin_proto_rawDesc
-)
-
-func file_google_spanner_admin_database_v1_spanner_database_admin_proto_rawDescGZIP() []byte {
- file_google_spanner_admin_database_v1_spanner_database_admin_proto_rawDescOnce.Do(func() {
- file_google_spanner_admin_database_v1_spanner_database_admin_proto_rawDescData = protoimpl.X.CompressGZIP(file_google_spanner_admin_database_v1_spanner_database_admin_proto_rawDescData)
- })
- return file_google_spanner_admin_database_v1_spanner_database_admin_proto_rawDescData
-}
-
-var file_google_spanner_admin_database_v1_spanner_database_admin_proto_enumTypes = make([]protoimpl.EnumInfo, 3)
-var file_google_spanner_admin_database_v1_spanner_database_admin_proto_msgTypes = make([]protoimpl.MessageInfo, 24)
-var file_google_spanner_admin_database_v1_spanner_database_admin_proto_goTypes = []any{
- (RestoreSourceType)(0), // 0: google.spanner.admin.database.v1.RestoreSourceType
- (Database_State)(0), // 1: google.spanner.admin.database.v1.Database.State
- (RestoreDatabaseEncryptionConfig_EncryptionType)(0), // 2: google.spanner.admin.database.v1.RestoreDatabaseEncryptionConfig.EncryptionType
- (*RestoreInfo)(nil), // 3: google.spanner.admin.database.v1.RestoreInfo
- (*Database)(nil), // 4: google.spanner.admin.database.v1.Database
- (*ListDatabasesRequest)(nil), // 5: google.spanner.admin.database.v1.ListDatabasesRequest
- (*ListDatabasesResponse)(nil), // 6: google.spanner.admin.database.v1.ListDatabasesResponse
- (*CreateDatabaseRequest)(nil), // 7: google.spanner.admin.database.v1.CreateDatabaseRequest
- (*CreateDatabaseMetadata)(nil), // 8: google.spanner.admin.database.v1.CreateDatabaseMetadata
- (*GetDatabaseRequest)(nil), // 9: google.spanner.admin.database.v1.GetDatabaseRequest
- (*UpdateDatabaseRequest)(nil), // 10: google.spanner.admin.database.v1.UpdateDatabaseRequest
- (*UpdateDatabaseMetadata)(nil), // 11: google.spanner.admin.database.v1.UpdateDatabaseMetadata
- (*UpdateDatabaseDdlRequest)(nil), // 12: google.spanner.admin.database.v1.UpdateDatabaseDdlRequest
- (*DdlStatementActionInfo)(nil), // 13: google.spanner.admin.database.v1.DdlStatementActionInfo
- (*UpdateDatabaseDdlMetadata)(nil), // 14: google.spanner.admin.database.v1.UpdateDatabaseDdlMetadata
- (*DropDatabaseRequest)(nil), // 15: google.spanner.admin.database.v1.DropDatabaseRequest
- (*GetDatabaseDdlRequest)(nil), // 16: google.spanner.admin.database.v1.GetDatabaseDdlRequest
- (*GetDatabaseDdlResponse)(nil), // 17: google.spanner.admin.database.v1.GetDatabaseDdlResponse
- (*ListDatabaseOperationsRequest)(nil), // 18: google.spanner.admin.database.v1.ListDatabaseOperationsRequest
- (*ListDatabaseOperationsResponse)(nil), // 19: google.spanner.admin.database.v1.ListDatabaseOperationsResponse
- (*RestoreDatabaseRequest)(nil), // 20: google.spanner.admin.database.v1.RestoreDatabaseRequest
- (*RestoreDatabaseEncryptionConfig)(nil), // 21: google.spanner.admin.database.v1.RestoreDatabaseEncryptionConfig
- (*RestoreDatabaseMetadata)(nil), // 22: google.spanner.admin.database.v1.RestoreDatabaseMetadata
- (*OptimizeRestoredDatabaseMetadata)(nil), // 23: google.spanner.admin.database.v1.OptimizeRestoredDatabaseMetadata
- (*DatabaseRole)(nil), // 24: google.spanner.admin.database.v1.DatabaseRole
- (*ListDatabaseRolesRequest)(nil), // 25: google.spanner.admin.database.v1.ListDatabaseRolesRequest
- (*ListDatabaseRolesResponse)(nil), // 26: google.spanner.admin.database.v1.ListDatabaseRolesResponse
- (*BackupInfo)(nil), // 27: google.spanner.admin.database.v1.BackupInfo
- (*timestamppb.Timestamp)(nil), // 28: google.protobuf.Timestamp
- (*EncryptionConfig)(nil), // 29: google.spanner.admin.database.v1.EncryptionConfig
- (*EncryptionInfo)(nil), // 30: google.spanner.admin.database.v1.EncryptionInfo
- (DatabaseDialect)(0), // 31: google.spanner.admin.database.v1.DatabaseDialect
- (*fieldmaskpb.FieldMask)(nil), // 32: google.protobuf.FieldMask
- (*OperationProgress)(nil), // 33: google.spanner.admin.database.v1.OperationProgress
- (*longrunningpb.Operation)(nil), // 34: google.longrunning.Operation
- (*iampb.SetIamPolicyRequest)(nil), // 35: google.iam.v1.SetIamPolicyRequest
- (*iampb.GetIamPolicyRequest)(nil), // 36: google.iam.v1.GetIamPolicyRequest
- (*iampb.TestIamPermissionsRequest)(nil), // 37: google.iam.v1.TestIamPermissionsRequest
- (*CreateBackupRequest)(nil), // 38: google.spanner.admin.database.v1.CreateBackupRequest
- (*CopyBackupRequest)(nil), // 39: google.spanner.admin.database.v1.CopyBackupRequest
- (*GetBackupRequest)(nil), // 40: google.spanner.admin.database.v1.GetBackupRequest
- (*UpdateBackupRequest)(nil), // 41: google.spanner.admin.database.v1.UpdateBackupRequest
- (*DeleteBackupRequest)(nil), // 42: google.spanner.admin.database.v1.DeleteBackupRequest
- (*ListBackupsRequest)(nil), // 43: google.spanner.admin.database.v1.ListBackupsRequest
- (*ListBackupOperationsRequest)(nil), // 44: google.spanner.admin.database.v1.ListBackupOperationsRequest
- (*CreateBackupScheduleRequest)(nil), // 45: google.spanner.admin.database.v1.CreateBackupScheduleRequest
- (*GetBackupScheduleRequest)(nil), // 46: google.spanner.admin.database.v1.GetBackupScheduleRequest
- (*UpdateBackupScheduleRequest)(nil), // 47: google.spanner.admin.database.v1.UpdateBackupScheduleRequest
- (*DeleteBackupScheduleRequest)(nil), // 48: google.spanner.admin.database.v1.DeleteBackupScheduleRequest
- (*ListBackupSchedulesRequest)(nil), // 49: google.spanner.admin.database.v1.ListBackupSchedulesRequest
- (*emptypb.Empty)(nil), // 50: google.protobuf.Empty
- (*iampb.Policy)(nil), // 51: google.iam.v1.Policy
- (*iampb.TestIamPermissionsResponse)(nil), // 52: google.iam.v1.TestIamPermissionsResponse
- (*Backup)(nil), // 53: google.spanner.admin.database.v1.Backup
- (*ListBackupsResponse)(nil), // 54: google.spanner.admin.database.v1.ListBackupsResponse
- (*ListBackupOperationsResponse)(nil), // 55: google.spanner.admin.database.v1.ListBackupOperationsResponse
- (*BackupSchedule)(nil), // 56: google.spanner.admin.database.v1.BackupSchedule
- (*ListBackupSchedulesResponse)(nil), // 57: google.spanner.admin.database.v1.ListBackupSchedulesResponse
-}
-var file_google_spanner_admin_database_v1_spanner_database_admin_proto_depIdxs = []int32{
- 0, // 0: google.spanner.admin.database.v1.RestoreInfo.source_type:type_name -> google.spanner.admin.database.v1.RestoreSourceType
- 27, // 1: google.spanner.admin.database.v1.RestoreInfo.backup_info:type_name -> google.spanner.admin.database.v1.BackupInfo
- 1, // 2: google.spanner.admin.database.v1.Database.state:type_name -> google.spanner.admin.database.v1.Database.State
- 28, // 3: google.spanner.admin.database.v1.Database.create_time:type_name -> google.protobuf.Timestamp
- 3, // 4: google.spanner.admin.database.v1.Database.restore_info:type_name -> google.spanner.admin.database.v1.RestoreInfo
- 29, // 5: google.spanner.admin.database.v1.Database.encryption_config:type_name -> google.spanner.admin.database.v1.EncryptionConfig
- 30, // 6: google.spanner.admin.database.v1.Database.encryption_info:type_name -> google.spanner.admin.database.v1.EncryptionInfo
- 28, // 7: google.spanner.admin.database.v1.Database.earliest_version_time:type_name -> google.protobuf.Timestamp
- 31, // 8: google.spanner.admin.database.v1.Database.database_dialect:type_name -> google.spanner.admin.database.v1.DatabaseDialect
- 4, // 9: google.spanner.admin.database.v1.ListDatabasesResponse.databases:type_name -> google.spanner.admin.database.v1.Database
- 29, // 10: google.spanner.admin.database.v1.CreateDatabaseRequest.encryption_config:type_name -> google.spanner.admin.database.v1.EncryptionConfig
- 31, // 11: google.spanner.admin.database.v1.CreateDatabaseRequest.database_dialect:type_name -> google.spanner.admin.database.v1.DatabaseDialect
- 4, // 12: google.spanner.admin.database.v1.UpdateDatabaseRequest.database:type_name -> google.spanner.admin.database.v1.Database
- 32, // 13: google.spanner.admin.database.v1.UpdateDatabaseRequest.update_mask:type_name -> google.protobuf.FieldMask
- 10, // 14: google.spanner.admin.database.v1.UpdateDatabaseMetadata.request:type_name -> google.spanner.admin.database.v1.UpdateDatabaseRequest
- 33, // 15: google.spanner.admin.database.v1.UpdateDatabaseMetadata.progress:type_name -> google.spanner.admin.database.v1.OperationProgress
- 28, // 16: google.spanner.admin.database.v1.UpdateDatabaseMetadata.cancel_time:type_name -> google.protobuf.Timestamp
- 28, // 17: google.spanner.admin.database.v1.UpdateDatabaseDdlMetadata.commit_timestamps:type_name -> google.protobuf.Timestamp
- 33, // 18: google.spanner.admin.database.v1.UpdateDatabaseDdlMetadata.progress:type_name -> google.spanner.admin.database.v1.OperationProgress
- 13, // 19: google.spanner.admin.database.v1.UpdateDatabaseDdlMetadata.actions:type_name -> google.spanner.admin.database.v1.DdlStatementActionInfo
- 34, // 20: google.spanner.admin.database.v1.ListDatabaseOperationsResponse.operations:type_name -> google.longrunning.Operation
- 21, // 21: google.spanner.admin.database.v1.RestoreDatabaseRequest.encryption_config:type_name -> google.spanner.admin.database.v1.RestoreDatabaseEncryptionConfig
- 2, // 22: google.spanner.admin.database.v1.RestoreDatabaseEncryptionConfig.encryption_type:type_name -> google.spanner.admin.database.v1.RestoreDatabaseEncryptionConfig.EncryptionType
- 0, // 23: google.spanner.admin.database.v1.RestoreDatabaseMetadata.source_type:type_name -> google.spanner.admin.database.v1.RestoreSourceType
- 27, // 24: google.spanner.admin.database.v1.RestoreDatabaseMetadata.backup_info:type_name -> google.spanner.admin.database.v1.BackupInfo
- 33, // 25: google.spanner.admin.database.v1.RestoreDatabaseMetadata.progress:type_name -> google.spanner.admin.database.v1.OperationProgress
- 28, // 26: google.spanner.admin.database.v1.RestoreDatabaseMetadata.cancel_time:type_name -> google.protobuf.Timestamp
- 33, // 27: google.spanner.admin.database.v1.OptimizeRestoredDatabaseMetadata.progress:type_name -> google.spanner.admin.database.v1.OperationProgress
- 24, // 28: google.spanner.admin.database.v1.ListDatabaseRolesResponse.database_roles:type_name -> google.spanner.admin.database.v1.DatabaseRole
- 5, // 29: google.spanner.admin.database.v1.DatabaseAdmin.ListDatabases:input_type -> google.spanner.admin.database.v1.ListDatabasesRequest
- 7, // 30: google.spanner.admin.database.v1.DatabaseAdmin.CreateDatabase:input_type -> google.spanner.admin.database.v1.CreateDatabaseRequest
- 9, // 31: google.spanner.admin.database.v1.DatabaseAdmin.GetDatabase:input_type -> google.spanner.admin.database.v1.GetDatabaseRequest
- 10, // 32: google.spanner.admin.database.v1.DatabaseAdmin.UpdateDatabase:input_type -> google.spanner.admin.database.v1.UpdateDatabaseRequest
- 12, // 33: google.spanner.admin.database.v1.DatabaseAdmin.UpdateDatabaseDdl:input_type -> google.spanner.admin.database.v1.UpdateDatabaseDdlRequest
- 15, // 34: google.spanner.admin.database.v1.DatabaseAdmin.DropDatabase:input_type -> google.spanner.admin.database.v1.DropDatabaseRequest
- 16, // 35: google.spanner.admin.database.v1.DatabaseAdmin.GetDatabaseDdl:input_type -> google.spanner.admin.database.v1.GetDatabaseDdlRequest
- 35, // 36: google.spanner.admin.database.v1.DatabaseAdmin.SetIamPolicy:input_type -> google.iam.v1.SetIamPolicyRequest
- 36, // 37: google.spanner.admin.database.v1.DatabaseAdmin.GetIamPolicy:input_type -> google.iam.v1.GetIamPolicyRequest
- 37, // 38: google.spanner.admin.database.v1.DatabaseAdmin.TestIamPermissions:input_type -> google.iam.v1.TestIamPermissionsRequest
- 38, // 39: google.spanner.admin.database.v1.DatabaseAdmin.CreateBackup:input_type -> google.spanner.admin.database.v1.CreateBackupRequest
- 39, // 40: google.spanner.admin.database.v1.DatabaseAdmin.CopyBackup:input_type -> google.spanner.admin.database.v1.CopyBackupRequest
- 40, // 41: google.spanner.admin.database.v1.DatabaseAdmin.GetBackup:input_type -> google.spanner.admin.database.v1.GetBackupRequest
- 41, // 42: google.spanner.admin.database.v1.DatabaseAdmin.UpdateBackup:input_type -> google.spanner.admin.database.v1.UpdateBackupRequest
- 42, // 43: google.spanner.admin.database.v1.DatabaseAdmin.DeleteBackup:input_type -> google.spanner.admin.database.v1.DeleteBackupRequest
- 43, // 44: google.spanner.admin.database.v1.DatabaseAdmin.ListBackups:input_type -> google.spanner.admin.database.v1.ListBackupsRequest
- 20, // 45: google.spanner.admin.database.v1.DatabaseAdmin.RestoreDatabase:input_type -> google.spanner.admin.database.v1.RestoreDatabaseRequest
- 18, // 46: google.spanner.admin.database.v1.DatabaseAdmin.ListDatabaseOperations:input_type -> google.spanner.admin.database.v1.ListDatabaseOperationsRequest
- 44, // 47: google.spanner.admin.database.v1.DatabaseAdmin.ListBackupOperations:input_type -> google.spanner.admin.database.v1.ListBackupOperationsRequest
- 25, // 48: google.spanner.admin.database.v1.DatabaseAdmin.ListDatabaseRoles:input_type -> google.spanner.admin.database.v1.ListDatabaseRolesRequest
- 45, // 49: google.spanner.admin.database.v1.DatabaseAdmin.CreateBackupSchedule:input_type -> google.spanner.admin.database.v1.CreateBackupScheduleRequest
- 46, // 50: google.spanner.admin.database.v1.DatabaseAdmin.GetBackupSchedule:input_type -> google.spanner.admin.database.v1.GetBackupScheduleRequest
- 47, // 51: google.spanner.admin.database.v1.DatabaseAdmin.UpdateBackupSchedule:input_type -> google.spanner.admin.database.v1.UpdateBackupScheduleRequest
- 48, // 52: google.spanner.admin.database.v1.DatabaseAdmin.DeleteBackupSchedule:input_type -> google.spanner.admin.database.v1.DeleteBackupScheduleRequest
- 49, // 53: google.spanner.admin.database.v1.DatabaseAdmin.ListBackupSchedules:input_type -> google.spanner.admin.database.v1.ListBackupSchedulesRequest
- 6, // 54: google.spanner.admin.database.v1.DatabaseAdmin.ListDatabases:output_type -> google.spanner.admin.database.v1.ListDatabasesResponse
- 34, // 55: google.spanner.admin.database.v1.DatabaseAdmin.CreateDatabase:output_type -> google.longrunning.Operation
- 4, // 56: google.spanner.admin.database.v1.DatabaseAdmin.GetDatabase:output_type -> google.spanner.admin.database.v1.Database
- 34, // 57: google.spanner.admin.database.v1.DatabaseAdmin.UpdateDatabase:output_type -> google.longrunning.Operation
- 34, // 58: google.spanner.admin.database.v1.DatabaseAdmin.UpdateDatabaseDdl:output_type -> google.longrunning.Operation
- 50, // 59: google.spanner.admin.database.v1.DatabaseAdmin.DropDatabase:output_type -> google.protobuf.Empty
- 17, // 60: google.spanner.admin.database.v1.DatabaseAdmin.GetDatabaseDdl:output_type -> google.spanner.admin.database.v1.GetDatabaseDdlResponse
- 51, // 61: google.spanner.admin.database.v1.DatabaseAdmin.SetIamPolicy:output_type -> google.iam.v1.Policy
- 51, // 62: google.spanner.admin.database.v1.DatabaseAdmin.GetIamPolicy:output_type -> google.iam.v1.Policy
- 52, // 63: google.spanner.admin.database.v1.DatabaseAdmin.TestIamPermissions:output_type -> google.iam.v1.TestIamPermissionsResponse
- 34, // 64: google.spanner.admin.database.v1.DatabaseAdmin.CreateBackup:output_type -> google.longrunning.Operation
- 34, // 65: google.spanner.admin.database.v1.DatabaseAdmin.CopyBackup:output_type -> google.longrunning.Operation
- 53, // 66: google.spanner.admin.database.v1.DatabaseAdmin.GetBackup:output_type -> google.spanner.admin.database.v1.Backup
- 53, // 67: google.spanner.admin.database.v1.DatabaseAdmin.UpdateBackup:output_type -> google.spanner.admin.database.v1.Backup
- 50, // 68: google.spanner.admin.database.v1.DatabaseAdmin.DeleteBackup:output_type -> google.protobuf.Empty
- 54, // 69: google.spanner.admin.database.v1.DatabaseAdmin.ListBackups:output_type -> google.spanner.admin.database.v1.ListBackupsResponse
- 34, // 70: google.spanner.admin.database.v1.DatabaseAdmin.RestoreDatabase:output_type -> google.longrunning.Operation
- 19, // 71: google.spanner.admin.database.v1.DatabaseAdmin.ListDatabaseOperations:output_type -> google.spanner.admin.database.v1.ListDatabaseOperationsResponse
- 55, // 72: google.spanner.admin.database.v1.DatabaseAdmin.ListBackupOperations:output_type -> google.spanner.admin.database.v1.ListBackupOperationsResponse
- 26, // 73: google.spanner.admin.database.v1.DatabaseAdmin.ListDatabaseRoles:output_type -> google.spanner.admin.database.v1.ListDatabaseRolesResponse
- 56, // 74: google.spanner.admin.database.v1.DatabaseAdmin.CreateBackupSchedule:output_type -> google.spanner.admin.database.v1.BackupSchedule
- 56, // 75: google.spanner.admin.database.v1.DatabaseAdmin.GetBackupSchedule:output_type -> google.spanner.admin.database.v1.BackupSchedule
- 56, // 76: google.spanner.admin.database.v1.DatabaseAdmin.UpdateBackupSchedule:output_type -> google.spanner.admin.database.v1.BackupSchedule
- 50, // 77: google.spanner.admin.database.v1.DatabaseAdmin.DeleteBackupSchedule:output_type -> google.protobuf.Empty
- 57, // 78: google.spanner.admin.database.v1.DatabaseAdmin.ListBackupSchedules:output_type -> google.spanner.admin.database.v1.ListBackupSchedulesResponse
- 54, // [54:79] is the sub-list for method output_type
- 29, // [29:54] is the sub-list for method input_type
- 29, // [29:29] is the sub-list for extension type_name
- 29, // [29:29] is the sub-list for extension extendee
- 0, // [0:29] is the sub-list for field type_name
-}
-
-func init() { file_google_spanner_admin_database_v1_spanner_database_admin_proto_init() }
-func file_google_spanner_admin_database_v1_spanner_database_admin_proto_init() {
- if File_google_spanner_admin_database_v1_spanner_database_admin_proto != nil {
- return
- }
- file_google_spanner_admin_database_v1_backup_proto_init()
- file_google_spanner_admin_database_v1_backup_schedule_proto_init()
- file_google_spanner_admin_database_v1_common_proto_init()
- if !protoimpl.UnsafeEnabled {
- file_google_spanner_admin_database_v1_spanner_database_admin_proto_msgTypes[0].Exporter = func(v any, i int) any {
- switch v := v.(*RestoreInfo); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_google_spanner_admin_database_v1_spanner_database_admin_proto_msgTypes[1].Exporter = func(v any, i int) any {
- switch v := v.(*Database); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_google_spanner_admin_database_v1_spanner_database_admin_proto_msgTypes[2].Exporter = func(v any, i int) any {
- switch v := v.(*ListDatabasesRequest); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_google_spanner_admin_database_v1_spanner_database_admin_proto_msgTypes[3].Exporter = func(v any, i int) any {
- switch v := v.(*ListDatabasesResponse); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_google_spanner_admin_database_v1_spanner_database_admin_proto_msgTypes[4].Exporter = func(v any, i int) any {
- switch v := v.(*CreateDatabaseRequest); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_google_spanner_admin_database_v1_spanner_database_admin_proto_msgTypes[5].Exporter = func(v any, i int) any {
- switch v := v.(*CreateDatabaseMetadata); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_google_spanner_admin_database_v1_spanner_database_admin_proto_msgTypes[6].Exporter = func(v any, i int) any {
- switch v := v.(*GetDatabaseRequest); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_google_spanner_admin_database_v1_spanner_database_admin_proto_msgTypes[7].Exporter = func(v any, i int) any {
- switch v := v.(*UpdateDatabaseRequest); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_google_spanner_admin_database_v1_spanner_database_admin_proto_msgTypes[8].Exporter = func(v any, i int) any {
- switch v := v.(*UpdateDatabaseMetadata); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_google_spanner_admin_database_v1_spanner_database_admin_proto_msgTypes[9].Exporter = func(v any, i int) any {
- switch v := v.(*UpdateDatabaseDdlRequest); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_google_spanner_admin_database_v1_spanner_database_admin_proto_msgTypes[10].Exporter = func(v any, i int) any {
- switch v := v.(*DdlStatementActionInfo); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_google_spanner_admin_database_v1_spanner_database_admin_proto_msgTypes[11].Exporter = func(v any, i int) any {
- switch v := v.(*UpdateDatabaseDdlMetadata); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_google_spanner_admin_database_v1_spanner_database_admin_proto_msgTypes[12].Exporter = func(v any, i int) any {
- switch v := v.(*DropDatabaseRequest); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_google_spanner_admin_database_v1_spanner_database_admin_proto_msgTypes[13].Exporter = func(v any, i int) any {
- switch v := v.(*GetDatabaseDdlRequest); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_google_spanner_admin_database_v1_spanner_database_admin_proto_msgTypes[14].Exporter = func(v any, i int) any {
- switch v := v.(*GetDatabaseDdlResponse); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_google_spanner_admin_database_v1_spanner_database_admin_proto_msgTypes[15].Exporter = func(v any, i int) any {
- switch v := v.(*ListDatabaseOperationsRequest); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_google_spanner_admin_database_v1_spanner_database_admin_proto_msgTypes[16].Exporter = func(v any, i int) any {
- switch v := v.(*ListDatabaseOperationsResponse); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_google_spanner_admin_database_v1_spanner_database_admin_proto_msgTypes[17].Exporter = func(v any, i int) any {
- switch v := v.(*RestoreDatabaseRequest); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_google_spanner_admin_database_v1_spanner_database_admin_proto_msgTypes[18].Exporter = func(v any, i int) any {
- switch v := v.(*RestoreDatabaseEncryptionConfig); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_google_spanner_admin_database_v1_spanner_database_admin_proto_msgTypes[19].Exporter = func(v any, i int) any {
- switch v := v.(*RestoreDatabaseMetadata); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_google_spanner_admin_database_v1_spanner_database_admin_proto_msgTypes[20].Exporter = func(v any, i int) any {
- switch v := v.(*OptimizeRestoredDatabaseMetadata); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_google_spanner_admin_database_v1_spanner_database_admin_proto_msgTypes[21].Exporter = func(v any, i int) any {
- switch v := v.(*DatabaseRole); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_google_spanner_admin_database_v1_spanner_database_admin_proto_msgTypes[22].Exporter = func(v any, i int) any {
- switch v := v.(*ListDatabaseRolesRequest); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_google_spanner_admin_database_v1_spanner_database_admin_proto_msgTypes[23].Exporter = func(v any, i int) any {
- switch v := v.(*ListDatabaseRolesResponse); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- }
- file_google_spanner_admin_database_v1_spanner_database_admin_proto_msgTypes[0].OneofWrappers = []any{
- (*RestoreInfo_BackupInfo)(nil),
- }
- file_google_spanner_admin_database_v1_spanner_database_admin_proto_msgTypes[17].OneofWrappers = []any{
- (*RestoreDatabaseRequest_Backup)(nil),
- }
- file_google_spanner_admin_database_v1_spanner_database_admin_proto_msgTypes[19].OneofWrappers = []any{
- (*RestoreDatabaseMetadata_BackupInfo)(nil),
- }
- type x struct{}
- out := protoimpl.TypeBuilder{
- File: protoimpl.DescBuilder{
- GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
- RawDescriptor: file_google_spanner_admin_database_v1_spanner_database_admin_proto_rawDesc,
- NumEnums: 3,
- NumMessages: 24,
- NumExtensions: 0,
- NumServices: 1,
- },
- GoTypes: file_google_spanner_admin_database_v1_spanner_database_admin_proto_goTypes,
- DependencyIndexes: file_google_spanner_admin_database_v1_spanner_database_admin_proto_depIdxs,
- EnumInfos: file_google_spanner_admin_database_v1_spanner_database_admin_proto_enumTypes,
- MessageInfos: file_google_spanner_admin_database_v1_spanner_database_admin_proto_msgTypes,
- }.Build()
- File_google_spanner_admin_database_v1_spanner_database_admin_proto = out.File
- file_google_spanner_admin_database_v1_spanner_database_admin_proto_rawDesc = nil
- file_google_spanner_admin_database_v1_spanner_database_admin_proto_goTypes = nil
- file_google_spanner_admin_database_v1_spanner_database_admin_proto_depIdxs = nil
-}
-
-// Reference imports to suppress errors if they are not otherwise used.
-var _ context.Context
-var _ grpc.ClientConnInterface
-
-// This is a compile-time assertion to ensure that this generated file
-// is compatible with the grpc package it is being compiled against.
-const _ = grpc.SupportPackageIsVersion6
-
-// DatabaseAdminClient is the client API for DatabaseAdmin service.
-//
-// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream.
-type DatabaseAdminClient interface {
- // Lists Cloud Spanner databases.
- ListDatabases(ctx context.Context, in *ListDatabasesRequest, opts ...grpc.CallOption) (*ListDatabasesResponse, error)
- // Creates a new Cloud Spanner database and starts to prepare it for serving.
- // The returned [long-running operation][google.longrunning.Operation] will
- // have a name of the format `<database_name>/operations/<operation_id>` and
- // can be used to track preparation of the database. The
- // [metadata][google.longrunning.Operation.metadata] field type is
- // [CreateDatabaseMetadata][google.spanner.admin.database.v1.CreateDatabaseMetadata].
- // The [response][google.longrunning.Operation.response] field type is
- // [Database][google.spanner.admin.database.v1.Database], if successful.
- CreateDatabase(ctx context.Context, in *CreateDatabaseRequest, opts ...grpc.CallOption) (*longrunningpb.Operation, error)
- // Gets the state of a Cloud Spanner database.
- GetDatabase(ctx context.Context, in *GetDatabaseRequest, opts ...grpc.CallOption) (*Database, error)
- // Updates a Cloud Spanner database. The returned
- // [long-running operation][google.longrunning.Operation] can be used to track
- // the progress of updating the database. If the named database does not
- // exist, returns `NOT_FOUND`.
- //
- // While the operation is pending:
- //
- // - The database's
- // [reconciling][google.spanner.admin.database.v1.Database.reconciling]
- // field is set to true.
- // - Cancelling the operation is best-effort. If the cancellation succeeds,
- // the operation metadata's
- // [cancel_time][google.spanner.admin.database.v1.UpdateDatabaseMetadata.cancel_time]
- // is set, the updates are reverted, and the operation terminates with a
- // `CANCELLED` status.
- // - New UpdateDatabase requests will return a `FAILED_PRECONDITION` error
- // until the pending operation is done (returns successfully or with
- // error).
- // - Reading the database via the API continues to give the pre-request
- // values.
- //
- // Upon completion of the returned operation:
- //
- // - The new values are in effect and readable via the API.
- // - The database's
- // [reconciling][google.spanner.admin.database.v1.Database.reconciling]
- // field becomes false.
- //
- // The returned [long-running operation][google.longrunning.Operation] will
- // have a name of the format
- // `projects/<project>/instances/<instance>/databases/<database>/operations/<operation_id>`
- // and can be used to track the database modification. The
- // [metadata][google.longrunning.Operation.metadata] field type is
- // [UpdateDatabaseMetadata][google.spanner.admin.database.v1.UpdateDatabaseMetadata].
- // The [response][google.longrunning.Operation.response] field type is
- // [Database][google.spanner.admin.database.v1.Database], if successful.
- UpdateDatabase(ctx context.Context, in *UpdateDatabaseRequest, opts ...grpc.CallOption) (*longrunningpb.Operation, error)
- // Updates the schema of a Cloud Spanner database by
- // creating/altering/dropping tables, columns, indexes, etc. The returned
- // [long-running operation][google.longrunning.Operation] will have a name of
- // the format `<database_name>/operations/<operation_id>` and can be used to
- // track execution of the schema change(s). The
- // [metadata][google.longrunning.Operation.metadata] field type is
- // [UpdateDatabaseDdlMetadata][google.spanner.admin.database.v1.UpdateDatabaseDdlMetadata].
- // The operation has no response.
- UpdateDatabaseDdl(ctx context.Context, in *UpdateDatabaseDdlRequest, opts ...grpc.CallOption) (*longrunningpb.Operation, error)
- // Drops (aka deletes) a Cloud Spanner database.
- // Completed backups for the database will be retained according to their
- // `expire_time`.
- // Note: Cloud Spanner might continue to accept requests for a few seconds
- // after the database has been deleted.
- DropDatabase(ctx context.Context, in *DropDatabaseRequest, opts ...grpc.CallOption) (*emptypb.Empty, error)
- // Returns the schema of a Cloud Spanner database as a list of formatted
- // DDL statements. This method does not show pending schema updates, those may
- // be queried using the [Operations][google.longrunning.Operations] API.
- GetDatabaseDdl(ctx context.Context, in *GetDatabaseDdlRequest, opts ...grpc.CallOption) (*GetDatabaseDdlResponse, error)
- // Sets the access control policy on a database or backup resource.
- // Replaces any existing policy.
- //
- // Authorization requires `spanner.databases.setIamPolicy`
- // permission on [resource][google.iam.v1.SetIamPolicyRequest.resource].
- // For backups, authorization requires `spanner.backups.setIamPolicy`
- // permission on [resource][google.iam.v1.SetIamPolicyRequest.resource].
- SetIamPolicy(ctx context.Context, in *iampb.SetIamPolicyRequest, opts ...grpc.CallOption) (*iampb.Policy, error)
- // Gets the access control policy for a database or backup resource.
- // Returns an empty policy if a database or backup exists but does not have a
- // policy set.
- //
- // Authorization requires `spanner.databases.getIamPolicy` permission on
- // [resource][google.iam.v1.GetIamPolicyRequest.resource].
- // For backups, authorization requires `spanner.backups.getIamPolicy`
- // permission on [resource][google.iam.v1.GetIamPolicyRequest.resource].
- GetIamPolicy(ctx context.Context, in *iampb.GetIamPolicyRequest, opts ...grpc.CallOption) (*iampb.Policy, error)
- // Returns permissions that the caller has on the specified database or backup
- // resource.
- //
- // Attempting this RPC on a non-existent Cloud Spanner database will
- // result in a NOT_FOUND error if the user has
- // `spanner.databases.list` permission on the containing Cloud
- // Spanner instance. Otherwise returns an empty set of permissions.
- // Calling this method on a backup that does not exist will
- // result in a NOT_FOUND error if the user has
- // `spanner.backups.list` permission on the containing instance.
- TestIamPermissions(ctx context.Context, in *iampb.TestIamPermissionsRequest, opts ...grpc.CallOption) (*iampb.TestIamPermissionsResponse, error)
- // Starts creating a new Cloud Spanner Backup.
- // The returned backup [long-running operation][google.longrunning.Operation]
- // will have a name of the format
- // `projects/<project>/instances/<instance>/backups/<backup>/operations/<operation_id>`
- // and can be used to track creation of the backup. The
- // [metadata][google.longrunning.Operation.metadata] field type is
- // [CreateBackupMetadata][google.spanner.admin.database.v1.CreateBackupMetadata].
- // The [response][google.longrunning.Operation.response] field type is
- // [Backup][google.spanner.admin.database.v1.Backup], if successful.
- // Cancelling the returned operation will stop the creation and delete the
- // backup. There can be only one pending backup creation per database. Backup
- // creation of different databases can run concurrently.
- CreateBackup(ctx context.Context, in *CreateBackupRequest, opts ...grpc.CallOption) (*longrunningpb.Operation, error)
- // Starts copying a Cloud Spanner Backup.
- // The returned backup [long-running operation][google.longrunning.Operation]
- // will have a name of the format
- // `projects/<project>/instances/<instance>/backups/<backup>/operations/<operation_id>`
- // and can be used to track copying of the backup. The operation is associated
- // with the destination backup.
- // The [metadata][google.longrunning.Operation.metadata] field type is
- // [CopyBackupMetadata][google.spanner.admin.database.v1.CopyBackupMetadata].
- // The [response][google.longrunning.Operation.response] field type is
- // [Backup][google.spanner.admin.database.v1.Backup], if successful.
- // Cancelling the returned operation will stop the copying and delete the
- // destination backup. Concurrent CopyBackup requests can run on the same
- // source backup.
- CopyBackup(ctx context.Context, in *CopyBackupRequest, opts ...grpc.CallOption) (*longrunningpb.Operation, error)
- // Gets metadata on a pending or completed
- // [Backup][google.spanner.admin.database.v1.Backup].
- GetBackup(ctx context.Context, in *GetBackupRequest, opts ...grpc.CallOption) (*Backup, error)
- // Updates a pending or completed
- // [Backup][google.spanner.admin.database.v1.Backup].
- UpdateBackup(ctx context.Context, in *UpdateBackupRequest, opts ...grpc.CallOption) (*Backup, error)
- // Deletes a pending or completed
- // [Backup][google.spanner.admin.database.v1.Backup].
- DeleteBackup(ctx context.Context, in *DeleteBackupRequest, opts ...grpc.CallOption) (*emptypb.Empty, error)
- // Lists completed and pending backups.
- // Backups returned are ordered by `create_time` in descending order,
- // starting from the most recent `create_time`.
- ListBackups(ctx context.Context, in *ListBackupsRequest, opts ...grpc.CallOption) (*ListBackupsResponse, error)
- // Create a new database by restoring from a completed backup. The new
- // database must be in the same project and in an instance with the same
- // instance configuration as the instance containing
- // the backup. The returned database [long-running
- // operation][google.longrunning.Operation] has a name of the format
- // `projects/<project>/instances/<instance>/databases/<database>/operations/<operation_id>`,
- // and can be used to track the progress of the operation, and to cancel it.
- // The [metadata][google.longrunning.Operation.metadata] field type is
- // [RestoreDatabaseMetadata][google.spanner.admin.database.v1.RestoreDatabaseMetadata].
- // The [response][google.longrunning.Operation.response] type
- // is [Database][google.spanner.admin.database.v1.Database], if
- // successful. Cancelling the returned operation will stop the restore and
- // delete the database.
- // There can be only one database being restored into an instance at a time.
- // Once the restore operation completes, a new restore operation can be
- // initiated, without waiting for the optimize operation associated with the
- // first restore to complete.
- RestoreDatabase(ctx context.Context, in *RestoreDatabaseRequest, opts ...grpc.CallOption) (*longrunningpb.Operation, error)
- // Lists database [longrunning-operations][google.longrunning.Operation].
- // A database operation has a name of the form
- // `projects/<project>/instances/<instance>/databases/<database>/operations/<operation>`.
- // The long-running operation
- // [metadata][google.longrunning.Operation.metadata] field type
- // `metadata.type_url` describes the type of the metadata. Operations returned
- // include those that have completed/failed/canceled within the last 7 days,
- // and pending operations.
- ListDatabaseOperations(ctx context.Context, in *ListDatabaseOperationsRequest, opts ...grpc.CallOption) (*ListDatabaseOperationsResponse, error)
- // Lists the backup [long-running operations][google.longrunning.Operation] in
- // the given instance. A backup operation has a name of the form
- // `projects/<project>/instances/<instance>/backups/<backup>/operations/<operation>`.
- // The long-running operation
- // [metadata][google.longrunning.Operation.metadata] field type
- // `metadata.type_url` describes the type of the metadata. Operations returned
- // include those that have completed/failed/canceled within the last 7 days,
- // and pending operations. Operations returned are ordered by
- // `operation.metadata.value.progress.start_time` in descending order starting
- // from the most recently started operation.
- ListBackupOperations(ctx context.Context, in *ListBackupOperationsRequest, opts ...grpc.CallOption) (*ListBackupOperationsResponse, error)
- // Lists Cloud Spanner database roles.
- ListDatabaseRoles(ctx context.Context, in *ListDatabaseRolesRequest, opts ...grpc.CallOption) (*ListDatabaseRolesResponse, error)
- // Creates a new backup schedule.
- CreateBackupSchedule(ctx context.Context, in *CreateBackupScheduleRequest, opts ...grpc.CallOption) (*BackupSchedule, error)
- // Gets backup schedule for the input schedule name.
- GetBackupSchedule(ctx context.Context, in *GetBackupScheduleRequest, opts ...grpc.CallOption) (*BackupSchedule, error)
- // Updates a backup schedule.
- UpdateBackupSchedule(ctx context.Context, in *UpdateBackupScheduleRequest, opts ...grpc.CallOption) (*BackupSchedule, error)
- // Deletes a backup schedule.
- DeleteBackupSchedule(ctx context.Context, in *DeleteBackupScheduleRequest, opts ...grpc.CallOption) (*emptypb.Empty, error)
- // Lists all the backup schedules for the database.
- ListBackupSchedules(ctx context.Context, in *ListBackupSchedulesRequest, opts ...grpc.CallOption) (*ListBackupSchedulesResponse, error)
-}
-
-type databaseAdminClient struct {
- cc grpc.ClientConnInterface
-}
-
-func NewDatabaseAdminClient(cc grpc.ClientConnInterface) DatabaseAdminClient {
- return &databaseAdminClient{cc}
-}
-
-func (c *databaseAdminClient) ListDatabases(ctx context.Context, in *ListDatabasesRequest, opts ...grpc.CallOption) (*ListDatabasesResponse, error) {
- out := new(ListDatabasesResponse)
- err := c.cc.Invoke(ctx, "/google.spanner.admin.database.v1.DatabaseAdmin/ListDatabases", in, out, opts...)
- if err != nil {
- return nil, err
- }
- return out, nil
-}
-
-func (c *databaseAdminClient) CreateDatabase(ctx context.Context, in *CreateDatabaseRequest, opts ...grpc.CallOption) (*longrunningpb.Operation, error) {
- out := new(longrunningpb.Operation)
- err := c.cc.Invoke(ctx, "/google.spanner.admin.database.v1.DatabaseAdmin/CreateDatabase", in, out, opts...)
- if err != nil {
- return nil, err
- }
- return out, nil
-}
-
-func (c *databaseAdminClient) GetDatabase(ctx context.Context, in *GetDatabaseRequest, opts ...grpc.CallOption) (*Database, error) {
- out := new(Database)
- err := c.cc.Invoke(ctx, "/google.spanner.admin.database.v1.DatabaseAdmin/GetDatabase", in, out, opts...)
- if err != nil {
- return nil, err
- }
- return out, nil
-}
-
-func (c *databaseAdminClient) UpdateDatabase(ctx context.Context, in *UpdateDatabaseRequest, opts ...grpc.CallOption) (*longrunningpb.Operation, error) {
- out := new(longrunningpb.Operation)
- err := c.cc.Invoke(ctx, "/google.spanner.admin.database.v1.DatabaseAdmin/UpdateDatabase", in, out, opts...)
- if err != nil {
- return nil, err
- }
- return out, nil
-}
-
-func (c *databaseAdminClient) UpdateDatabaseDdl(ctx context.Context, in *UpdateDatabaseDdlRequest, opts ...grpc.CallOption) (*longrunningpb.Operation, error) {
- out := new(longrunningpb.Operation)
- err := c.cc.Invoke(ctx, "/google.spanner.admin.database.v1.DatabaseAdmin/UpdateDatabaseDdl", in, out, opts...)
- if err != nil {
- return nil, err
- }
- return out, nil
-}
-
-func (c *databaseAdminClient) DropDatabase(ctx context.Context, in *DropDatabaseRequest, opts ...grpc.CallOption) (*emptypb.Empty, error) {
- out := new(emptypb.Empty)
- err := c.cc.Invoke(ctx, "/google.spanner.admin.database.v1.DatabaseAdmin/DropDatabase", in, out, opts...)
- if err != nil {
- return nil, err
- }
- return out, nil
-}
-
-func (c *databaseAdminClient) GetDatabaseDdl(ctx context.Context, in *GetDatabaseDdlRequest, opts ...grpc.CallOption) (*GetDatabaseDdlResponse, error) {
- out := new(GetDatabaseDdlResponse)
- err := c.cc.Invoke(ctx, "/google.spanner.admin.database.v1.DatabaseAdmin/GetDatabaseDdl", in, out, opts...)
- if err != nil {
- return nil, err
- }
- return out, nil
-}
-
-func (c *databaseAdminClient) SetIamPolicy(ctx context.Context, in *iampb.SetIamPolicyRequest, opts ...grpc.CallOption) (*iampb.Policy, error) {
- out := new(iampb.Policy)
- err := c.cc.Invoke(ctx, "/google.spanner.admin.database.v1.DatabaseAdmin/SetIamPolicy", in, out, opts...)
- if err != nil {
- return nil, err
- }
- return out, nil
-}
-
-func (c *databaseAdminClient) GetIamPolicy(ctx context.Context, in *iampb.GetIamPolicyRequest, opts ...grpc.CallOption) (*iampb.Policy, error) {
- out := new(iampb.Policy)
- err := c.cc.Invoke(ctx, "/google.spanner.admin.database.v1.DatabaseAdmin/GetIamPolicy", in, out, opts...)
- if err != nil {
- return nil, err
- }
- return out, nil
-}
-
-func (c *databaseAdminClient) TestIamPermissions(ctx context.Context, in *iampb.TestIamPermissionsRequest, opts ...grpc.CallOption) (*iampb.TestIamPermissionsResponse, error) {
- out := new(iampb.TestIamPermissionsResponse)
- err := c.cc.Invoke(ctx, "/google.spanner.admin.database.v1.DatabaseAdmin/TestIamPermissions", in, out, opts...)
- if err != nil {
- return nil, err
- }
- return out, nil
-}
-
-func (c *databaseAdminClient) CreateBackup(ctx context.Context, in *CreateBackupRequest, opts ...grpc.CallOption) (*longrunningpb.Operation, error) {
- out := new(longrunningpb.Operation)
- err := c.cc.Invoke(ctx, "/google.spanner.admin.database.v1.DatabaseAdmin/CreateBackup", in, out, opts...)
- if err != nil {
- return nil, err
- }
- return out, nil
-}
-
-func (c *databaseAdminClient) CopyBackup(ctx context.Context, in *CopyBackupRequest, opts ...grpc.CallOption) (*longrunningpb.Operation, error) {
- out := new(longrunningpb.Operation)
- err := c.cc.Invoke(ctx, "/google.spanner.admin.database.v1.DatabaseAdmin/CopyBackup", in, out, opts...)
- if err != nil {
- return nil, err
- }
- return out, nil
-}
-
-func (c *databaseAdminClient) GetBackup(ctx context.Context, in *GetBackupRequest, opts ...grpc.CallOption) (*Backup, error) {
- out := new(Backup)
- err := c.cc.Invoke(ctx, "/google.spanner.admin.database.v1.DatabaseAdmin/GetBackup", in, out, opts...)
- if err != nil {
- return nil, err
- }
- return out, nil
-}
-
-func (c *databaseAdminClient) UpdateBackup(ctx context.Context, in *UpdateBackupRequest, opts ...grpc.CallOption) (*Backup, error) {
- out := new(Backup)
- err := c.cc.Invoke(ctx, "/google.spanner.admin.database.v1.DatabaseAdmin/UpdateBackup", in, out, opts...)
- if err != nil {
- return nil, err
- }
- return out, nil
-}
-
-func (c *databaseAdminClient) DeleteBackup(ctx context.Context, in *DeleteBackupRequest, opts ...grpc.CallOption) (*emptypb.Empty, error) {
- out := new(emptypb.Empty)
- err := c.cc.Invoke(ctx, "/google.spanner.admin.database.v1.DatabaseAdmin/DeleteBackup", in, out, opts...)
- if err != nil {
- return nil, err
- }
- return out, nil
-}
-
-func (c *databaseAdminClient) ListBackups(ctx context.Context, in *ListBackupsRequest, opts ...grpc.CallOption) (*ListBackupsResponse, error) {
- out := new(ListBackupsResponse)
- err := c.cc.Invoke(ctx, "/google.spanner.admin.database.v1.DatabaseAdmin/ListBackups", in, out, opts...)
- if err != nil {
- return nil, err
- }
- return out, nil
-}
-
-func (c *databaseAdminClient) RestoreDatabase(ctx context.Context, in *RestoreDatabaseRequest, opts ...grpc.CallOption) (*longrunningpb.Operation, error) {
- out := new(longrunningpb.Operation)
- err := c.cc.Invoke(ctx, "/google.spanner.admin.database.v1.DatabaseAdmin/RestoreDatabase", in, out, opts...)
- if err != nil {
- return nil, err
- }
- return out, nil
-}
-
-func (c *databaseAdminClient) ListDatabaseOperations(ctx context.Context, in *ListDatabaseOperationsRequest, opts ...grpc.CallOption) (*ListDatabaseOperationsResponse, error) {
- out := new(ListDatabaseOperationsResponse)
- err := c.cc.Invoke(ctx, "/google.spanner.admin.database.v1.DatabaseAdmin/ListDatabaseOperations", in, out, opts...)
- if err != nil {
- return nil, err
- }
- return out, nil
-}
-
-func (c *databaseAdminClient) ListBackupOperations(ctx context.Context, in *ListBackupOperationsRequest, opts ...grpc.CallOption) (*ListBackupOperationsResponse, error) {
- out := new(ListBackupOperationsResponse)
- err := c.cc.Invoke(ctx, "/google.spanner.admin.database.v1.DatabaseAdmin/ListBackupOperations", in, out, opts...)
- if err != nil {
- return nil, err
- }
- return out, nil
-}
-
-func (c *databaseAdminClient) ListDatabaseRoles(ctx context.Context, in *ListDatabaseRolesRequest, opts ...grpc.CallOption) (*ListDatabaseRolesResponse, error) {
- out := new(ListDatabaseRolesResponse)
- err := c.cc.Invoke(ctx, "/google.spanner.admin.database.v1.DatabaseAdmin/ListDatabaseRoles", in, out, opts...)
- if err != nil {
- return nil, err
- }
- return out, nil
-}
-
-func (c *databaseAdminClient) CreateBackupSchedule(ctx context.Context, in *CreateBackupScheduleRequest, opts ...grpc.CallOption) (*BackupSchedule, error) {
- out := new(BackupSchedule)
- err := c.cc.Invoke(ctx, "/google.spanner.admin.database.v1.DatabaseAdmin/CreateBackupSchedule", in, out, opts...)
- if err != nil {
- return nil, err
- }
- return out, nil
-}
-
-func (c *databaseAdminClient) GetBackupSchedule(ctx context.Context, in *GetBackupScheduleRequest, opts ...grpc.CallOption) (*BackupSchedule, error) {
- out := new(BackupSchedule)
- err := c.cc.Invoke(ctx, "/google.spanner.admin.database.v1.DatabaseAdmin/GetBackupSchedule", in, out, opts...)
- if err != nil {
- return nil, err
- }
- return out, nil
-}
-
-func (c *databaseAdminClient) UpdateBackupSchedule(ctx context.Context, in *UpdateBackupScheduleRequest, opts ...grpc.CallOption) (*BackupSchedule, error) {
- out := new(BackupSchedule)
- err := c.cc.Invoke(ctx, "/google.spanner.admin.database.v1.DatabaseAdmin/UpdateBackupSchedule", in, out, opts...)
- if err != nil {
- return nil, err
- }
- return out, nil
-}
-
-func (c *databaseAdminClient) DeleteBackupSchedule(ctx context.Context, in *DeleteBackupScheduleRequest, opts ...grpc.CallOption) (*emptypb.Empty, error) {
- out := new(emptypb.Empty)
- err := c.cc.Invoke(ctx, "/google.spanner.admin.database.v1.DatabaseAdmin/DeleteBackupSchedule", in, out, opts...)
- if err != nil {
- return nil, err
- }
- return out, nil
-}
-
-func (c *databaseAdminClient) ListBackupSchedules(ctx context.Context, in *ListBackupSchedulesRequest, opts ...grpc.CallOption) (*ListBackupSchedulesResponse, error) {
- out := new(ListBackupSchedulesResponse)
- err := c.cc.Invoke(ctx, "/google.spanner.admin.database.v1.DatabaseAdmin/ListBackupSchedules", in, out, opts...)
- if err != nil {
- return nil, err
- }
- return out, nil
-}
-
-// DatabaseAdminServer is the server API for DatabaseAdmin service.
-type DatabaseAdminServer interface {
- // Lists Cloud Spanner databases.
- ListDatabases(context.Context, *ListDatabasesRequest) (*ListDatabasesResponse, error)
- // Creates a new Cloud Spanner database and starts to prepare it for serving.
- // The returned [long-running operation][google.longrunning.Operation] will
- // have a name of the format `<database_name>/operations/<operation_id>` and
- // can be used to track preparation of the database. The
- // [metadata][google.longrunning.Operation.metadata] field type is
- // [CreateDatabaseMetadata][google.spanner.admin.database.v1.CreateDatabaseMetadata].
- // The [response][google.longrunning.Operation.response] field type is
- // [Database][google.spanner.admin.database.v1.Database], if successful.
- CreateDatabase(context.Context, *CreateDatabaseRequest) (*longrunningpb.Operation, error)
- // Gets the state of a Cloud Spanner database.
- GetDatabase(context.Context, *GetDatabaseRequest) (*Database, error)
- // Updates a Cloud Spanner database. The returned
- // [long-running operation][google.longrunning.Operation] can be used to track
- // the progress of updating the database. If the named database does not
- // exist, returns `NOT_FOUND`.
- //
- // While the operation is pending:
- //
- // - The database's
- // [reconciling][google.spanner.admin.database.v1.Database.reconciling]
- // field is set to true.
- // - Cancelling the operation is best-effort. If the cancellation succeeds,
- // the operation metadata's
- // [cancel_time][google.spanner.admin.database.v1.UpdateDatabaseMetadata.cancel_time]
- // is set, the updates are reverted, and the operation terminates with a
- // `CANCELLED` status.
- // - New UpdateDatabase requests will return a `FAILED_PRECONDITION` error
- // until the pending operation is done (returns successfully or with
- // error).
- // - Reading the database via the API continues to give the pre-request
- // values.
- //
- // Upon completion of the returned operation:
- //
- // - The new values are in effect and readable via the API.
- // - The database's
- // [reconciling][google.spanner.admin.database.v1.Database.reconciling]
- // field becomes false.
- //
- // The returned [long-running operation][google.longrunning.Operation] will
- // have a name of the format
- // `projects/<project>/instances/<instance>/databases/<database>/operations/<operation_id>`
- // and can be used to track the database modification. The
- // [metadata][google.longrunning.Operation.metadata] field type is
- // [UpdateDatabaseMetadata][google.spanner.admin.database.v1.UpdateDatabaseMetadata].
- // The [response][google.longrunning.Operation.response] field type is
- // [Database][google.spanner.admin.database.v1.Database], if successful.
- UpdateDatabase(context.Context, *UpdateDatabaseRequest) (*longrunningpb.Operation, error)
- // Updates the schema of a Cloud Spanner database by
- // creating/altering/dropping tables, columns, indexes, etc. The returned
- // [long-running operation][google.longrunning.Operation] will have a name of
- // the format `<database_name>/operations/<operation_id>` and can be used to
- // track execution of the schema change(s). The
- // [metadata][google.longrunning.Operation.metadata] field type is
- // [UpdateDatabaseDdlMetadata][google.spanner.admin.database.v1.UpdateDatabaseDdlMetadata].
- // The operation has no response.
- UpdateDatabaseDdl(context.Context, *UpdateDatabaseDdlRequest) (*longrunningpb.Operation, error)
- // Drops (aka deletes) a Cloud Spanner database.
- // Completed backups for the database will be retained according to their
- // `expire_time`.
- // Note: Cloud Spanner might continue to accept requests for a few seconds
- // after the database has been deleted.
- DropDatabase(context.Context, *DropDatabaseRequest) (*emptypb.Empty, error)
- // Returns the schema of a Cloud Spanner database as a list of formatted
- // DDL statements. This method does not show pending schema updates, those may
- // be queried using the [Operations][google.longrunning.Operations] API.
- GetDatabaseDdl(context.Context, *GetDatabaseDdlRequest) (*GetDatabaseDdlResponse, error)
- // Sets the access control policy on a database or backup resource.
- // Replaces any existing policy.
- //
- // Authorization requires `spanner.databases.setIamPolicy`
- // permission on [resource][google.iam.v1.SetIamPolicyRequest.resource].
- // For backups, authorization requires `spanner.backups.setIamPolicy`
- // permission on [resource][google.iam.v1.SetIamPolicyRequest.resource].
- SetIamPolicy(context.Context, *iampb.SetIamPolicyRequest) (*iampb.Policy, error)
- // Gets the access control policy for a database or backup resource.
- // Returns an empty policy if a database or backup exists but does not have a
- // policy set.
- //
- // Authorization requires `spanner.databases.getIamPolicy` permission on
- // [resource][google.iam.v1.GetIamPolicyRequest.resource].
- // For backups, authorization requires `spanner.backups.getIamPolicy`
- // permission on [resource][google.iam.v1.GetIamPolicyRequest.resource].
- GetIamPolicy(context.Context, *iampb.GetIamPolicyRequest) (*iampb.Policy, error)
- // Returns permissions that the caller has on the specified database or backup
- // resource.
- //
- // Attempting this RPC on a non-existent Cloud Spanner database will
- // result in a NOT_FOUND error if the user has
- // `spanner.databases.list` permission on the containing Cloud
- // Spanner instance. Otherwise returns an empty set of permissions.
- // Calling this method on a backup that does not exist will
- // result in a NOT_FOUND error if the user has
- // `spanner.backups.list` permission on the containing instance.
- TestIamPermissions(context.Context, *iampb.TestIamPermissionsRequest) (*iampb.TestIamPermissionsResponse, error)
- // Starts creating a new Cloud Spanner Backup.
- // The returned backup [long-running operation][google.longrunning.Operation]
- // will have a name of the format
- // `projects/<project>/instances/<instance>/backups/<backup>/operations/<operation_id>`
- // and can be used to track creation of the backup. The
- // [metadata][google.longrunning.Operation.metadata] field type is
- // [CreateBackupMetadata][google.spanner.admin.database.v1.CreateBackupMetadata].
- // The [response][google.longrunning.Operation.response] field type is
- // [Backup][google.spanner.admin.database.v1.Backup], if successful.
- // Cancelling the returned operation will stop the creation and delete the
- // backup. There can be only one pending backup creation per database. Backup
- // creation of different databases can run concurrently.
- CreateBackup(context.Context, *CreateBackupRequest) (*longrunningpb.Operation, error)
- // Starts copying a Cloud Spanner Backup.
- // The returned backup [long-running operation][google.longrunning.Operation]
- // will have a name of the format
- // `projects/<project>/instances/<instance>/backups/<backup>/operations/<operation_id>`
- // and can be used to track copying of the backup. The operation is associated
- // with the destination backup.
- // The [metadata][google.longrunning.Operation.metadata] field type is
- // [CopyBackupMetadata][google.spanner.admin.database.v1.CopyBackupMetadata].
- // The [response][google.longrunning.Operation.response] field type is
- // [Backup][google.spanner.admin.database.v1.Backup], if successful.
- // Cancelling the returned operation will stop the copying and delete the
- // destination backup. Concurrent CopyBackup requests can run on the same
- // source backup.
- CopyBackup(context.Context, *CopyBackupRequest) (*longrunningpb.Operation, error)
- // Gets metadata on a pending or completed
- // [Backup][google.spanner.admin.database.v1.Backup].
- GetBackup(context.Context, *GetBackupRequest) (*Backup, error)
- // Updates a pending or completed
- // [Backup][google.spanner.admin.database.v1.Backup].
- UpdateBackup(context.Context, *UpdateBackupRequest) (*Backup, error)
- // Deletes a pending or completed
- // [Backup][google.spanner.admin.database.v1.Backup].
- DeleteBackup(context.Context, *DeleteBackupRequest) (*emptypb.Empty, error)
- // Lists completed and pending backups.
- // Backups returned are ordered by `create_time` in descending order,
- // starting from the most recent `create_time`.
- ListBackups(context.Context, *ListBackupsRequest) (*ListBackupsResponse, error)
- // Create a new database by restoring from a completed backup. The new
- // database must be in the same project and in an instance with the same
- // instance configuration as the instance containing
- // the backup. The returned database [long-running
- // operation][google.longrunning.Operation] has a name of the format
- // `projects/<project>/instances/<instance>/databases/<database>/operations/<operation_id>`,
- // and can be used to track the progress of the operation, and to cancel it.
- // The [metadata][google.longrunning.Operation.metadata] field type is
- // [RestoreDatabaseMetadata][google.spanner.admin.database.v1.RestoreDatabaseMetadata].
- // The [response][google.longrunning.Operation.response] type
- // is [Database][google.spanner.admin.database.v1.Database], if
- // successful. Cancelling the returned operation will stop the restore and
- // delete the database.
- // There can be only one database being restored into an instance at a time.
- // Once the restore operation completes, a new restore operation can be
- // initiated, without waiting for the optimize operation associated with the
- // first restore to complete.
- RestoreDatabase(context.Context, *RestoreDatabaseRequest) (*longrunningpb.Operation, error)
- // Lists database [longrunning-operations][google.longrunning.Operation].
- // A database operation has a name of the form
- // `projects/<project>/instances/<instance>/databases/<database>/operations/<operation>`.
- // The long-running operation
- // [metadata][google.longrunning.Operation.metadata] field type
- // `metadata.type_url` describes the type of the metadata. Operations returned
- // include those that have completed/failed/canceled within the last 7 days,
- // and pending operations.
- ListDatabaseOperations(context.Context, *ListDatabaseOperationsRequest) (*ListDatabaseOperationsResponse, error)
- // Lists the backup [long-running operations][google.longrunning.Operation] in
- // the given instance. A backup operation has a name of the form
- // `projects/<project>/instances/<instance>/backups/<backup>/operations/<operation>`.
- // The long-running operation
- // [metadata][google.longrunning.Operation.metadata] field type
- // `metadata.type_url` describes the type of the metadata. Operations returned
- // include those that have completed/failed/canceled within the last 7 days,
- // and pending operations. Operations returned are ordered by
- // `operation.metadata.value.progress.start_time` in descending order starting
- // from the most recently started operation.
- ListBackupOperations(context.Context, *ListBackupOperationsRequest) (*ListBackupOperationsResponse, error)
- // Lists Cloud Spanner database roles.
- ListDatabaseRoles(context.Context, *ListDatabaseRolesRequest) (*ListDatabaseRolesResponse, error)
- // Creates a new backup schedule.
- CreateBackupSchedule(context.Context, *CreateBackupScheduleRequest) (*BackupSchedule, error)
- // Gets backup schedule for the input schedule name.
- GetBackupSchedule(context.Context, *GetBackupScheduleRequest) (*BackupSchedule, error)
- // Updates a backup schedule.
- UpdateBackupSchedule(context.Context, *UpdateBackupScheduleRequest) (*BackupSchedule, error)
- // Deletes a backup schedule.
- DeleteBackupSchedule(context.Context, *DeleteBackupScheduleRequest) (*emptypb.Empty, error)
- // Lists all the backup schedules for the database.
- ListBackupSchedules(context.Context, *ListBackupSchedulesRequest) (*ListBackupSchedulesResponse, error)
-}
-
-// UnimplementedDatabaseAdminServer can be embedded to have forward compatible implementations.
-type UnimplementedDatabaseAdminServer struct {
-}
-
-func (*UnimplementedDatabaseAdminServer) ListDatabases(context.Context, *ListDatabasesRequest) (*ListDatabasesResponse, error) {
- return nil, status.Errorf(codes.Unimplemented, "method ListDatabases not implemented")
-}
-func (*UnimplementedDatabaseAdminServer) CreateDatabase(context.Context, *CreateDatabaseRequest) (*longrunningpb.Operation, error) {
- return nil, status.Errorf(codes.Unimplemented, "method CreateDatabase not implemented")
-}
-func (*UnimplementedDatabaseAdminServer) GetDatabase(context.Context, *GetDatabaseRequest) (*Database, error) {
- return nil, status.Errorf(codes.Unimplemented, "method GetDatabase not implemented")
-}
-func (*UnimplementedDatabaseAdminServer) UpdateDatabase(context.Context, *UpdateDatabaseRequest) (*longrunningpb.Operation, error) {
- return nil, status.Errorf(codes.Unimplemented, "method UpdateDatabase not implemented")
-}
-func (*UnimplementedDatabaseAdminServer) UpdateDatabaseDdl(context.Context, *UpdateDatabaseDdlRequest) (*longrunningpb.Operation, error) {
- return nil, status.Errorf(codes.Unimplemented, "method UpdateDatabaseDdl not implemented")
-}
-func (*UnimplementedDatabaseAdminServer) DropDatabase(context.Context, *DropDatabaseRequest) (*emptypb.Empty, error) {
- return nil, status.Errorf(codes.Unimplemented, "method DropDatabase not implemented")
-}
-func (*UnimplementedDatabaseAdminServer) GetDatabaseDdl(context.Context, *GetDatabaseDdlRequest) (*GetDatabaseDdlResponse, error) {
- return nil, status.Errorf(codes.Unimplemented, "method GetDatabaseDdl not implemented")
-}
-func (*UnimplementedDatabaseAdminServer) SetIamPolicy(context.Context, *iampb.SetIamPolicyRequest) (*iampb.Policy, error) {
- return nil, status.Errorf(codes.Unimplemented, "method SetIamPolicy not implemented")
-}
-func (*UnimplementedDatabaseAdminServer) GetIamPolicy(context.Context, *iampb.GetIamPolicyRequest) (*iampb.Policy, error) {
- return nil, status.Errorf(codes.Unimplemented, "method GetIamPolicy not implemented")
-}
-func (*UnimplementedDatabaseAdminServer) TestIamPermissions(context.Context, *iampb.TestIamPermissionsRequest) (*iampb.TestIamPermissionsResponse, error) {
- return nil, status.Errorf(codes.Unimplemented, "method TestIamPermissions not implemented")
-}
-func (*UnimplementedDatabaseAdminServer) CreateBackup(context.Context, *CreateBackupRequest) (*longrunningpb.Operation, error) {
- return nil, status.Errorf(codes.Unimplemented, "method CreateBackup not implemented")
-}
-func (*UnimplementedDatabaseAdminServer) CopyBackup(context.Context, *CopyBackupRequest) (*longrunningpb.Operation, error) {
- return nil, status.Errorf(codes.Unimplemented, "method CopyBackup not implemented")
-}
-func (*UnimplementedDatabaseAdminServer) GetBackup(context.Context, *GetBackupRequest) (*Backup, error) {
- return nil, status.Errorf(codes.Unimplemented, "method GetBackup not implemented")
-}
-func (*UnimplementedDatabaseAdminServer) UpdateBackup(context.Context, *UpdateBackupRequest) (*Backup, error) {
- return nil, status.Errorf(codes.Unimplemented, "method UpdateBackup not implemented")
-}
-func (*UnimplementedDatabaseAdminServer) DeleteBackup(context.Context, *DeleteBackupRequest) (*emptypb.Empty, error) {
- return nil, status.Errorf(codes.Unimplemented, "method DeleteBackup not implemented")
-}
-func (*UnimplementedDatabaseAdminServer) ListBackups(context.Context, *ListBackupsRequest) (*ListBackupsResponse, error) {
- return nil, status.Errorf(codes.Unimplemented, "method ListBackups not implemented")
-}
-func (*UnimplementedDatabaseAdminServer) RestoreDatabase(context.Context, *RestoreDatabaseRequest) (*longrunningpb.Operation, error) {
- return nil, status.Errorf(codes.Unimplemented, "method RestoreDatabase not implemented")
-}
-func (*UnimplementedDatabaseAdminServer) ListDatabaseOperations(context.Context, *ListDatabaseOperationsRequest) (*ListDatabaseOperationsResponse, error) {
- return nil, status.Errorf(codes.Unimplemented, "method ListDatabaseOperations not implemented")
-}
-func (*UnimplementedDatabaseAdminServer) ListBackupOperations(context.Context, *ListBackupOperationsRequest) (*ListBackupOperationsResponse, error) {
- return nil, status.Errorf(codes.Unimplemented, "method ListBackupOperations not implemented")
-}
-func (*UnimplementedDatabaseAdminServer) ListDatabaseRoles(context.Context, *ListDatabaseRolesRequest) (*ListDatabaseRolesResponse, error) {
- return nil, status.Errorf(codes.Unimplemented, "method ListDatabaseRoles not implemented")
-}
-func (*UnimplementedDatabaseAdminServer) CreateBackupSchedule(context.Context, *CreateBackupScheduleRequest) (*BackupSchedule, error) {
- return nil, status.Errorf(codes.Unimplemented, "method CreateBackupSchedule not implemented")
-}
-func (*UnimplementedDatabaseAdminServer) GetBackupSchedule(context.Context, *GetBackupScheduleRequest) (*BackupSchedule, error) {
- return nil, status.Errorf(codes.Unimplemented, "method GetBackupSchedule not implemented")
-}
-func (*UnimplementedDatabaseAdminServer) UpdateBackupSchedule(context.Context, *UpdateBackupScheduleRequest) (*BackupSchedule, error) {
- return nil, status.Errorf(codes.Unimplemented, "method UpdateBackupSchedule not implemented")
-}
-func (*UnimplementedDatabaseAdminServer) DeleteBackupSchedule(context.Context, *DeleteBackupScheduleRequest) (*emptypb.Empty, error) {
- return nil, status.Errorf(codes.Unimplemented, "method DeleteBackupSchedule not implemented")
-}
-func (*UnimplementedDatabaseAdminServer) ListBackupSchedules(context.Context, *ListBackupSchedulesRequest) (*ListBackupSchedulesResponse, error) {
- return nil, status.Errorf(codes.Unimplemented, "method ListBackupSchedules not implemented")
-}
-
-func RegisterDatabaseAdminServer(s *grpc.Server, srv DatabaseAdminServer) {
- s.RegisterService(&_DatabaseAdmin_serviceDesc, srv)
-}
-
-func _DatabaseAdmin_ListDatabases_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
- in := new(ListDatabasesRequest)
- if err := dec(in); err != nil {
- return nil, err
- }
- if interceptor == nil {
- return srv.(DatabaseAdminServer).ListDatabases(ctx, in)
- }
- info := &grpc.UnaryServerInfo{
- Server: srv,
- FullMethod: "/google.spanner.admin.database.v1.DatabaseAdmin/ListDatabases",
- }
- handler := func(ctx context.Context, req interface{}) (interface{}, error) {
- return srv.(DatabaseAdminServer).ListDatabases(ctx, req.(*ListDatabasesRequest))
- }
- return interceptor(ctx, in, info, handler)
-}
-
-func _DatabaseAdmin_CreateDatabase_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
- in := new(CreateDatabaseRequest)
- if err := dec(in); err != nil {
- return nil, err
- }
- if interceptor == nil {
- return srv.(DatabaseAdminServer).CreateDatabase(ctx, in)
- }
- info := &grpc.UnaryServerInfo{
- Server: srv,
- FullMethod: "/google.spanner.admin.database.v1.DatabaseAdmin/CreateDatabase",
- }
- handler := func(ctx context.Context, req interface{}) (interface{}, error) {
- return srv.(DatabaseAdminServer).CreateDatabase(ctx, req.(*CreateDatabaseRequest))
- }
- return interceptor(ctx, in, info, handler)
-}
-
-func _DatabaseAdmin_GetDatabase_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
- in := new(GetDatabaseRequest)
- if err := dec(in); err != nil {
- return nil, err
- }
- if interceptor == nil {
- return srv.(DatabaseAdminServer).GetDatabase(ctx, in)
- }
- info := &grpc.UnaryServerInfo{
- Server: srv,
- FullMethod: "/google.spanner.admin.database.v1.DatabaseAdmin/GetDatabase",
- }
- handler := func(ctx context.Context, req interface{}) (interface{}, error) {
- return srv.(DatabaseAdminServer).GetDatabase(ctx, req.(*GetDatabaseRequest))
- }
- return interceptor(ctx, in, info, handler)
-}
-
-func _DatabaseAdmin_UpdateDatabase_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
- in := new(UpdateDatabaseRequest)
- if err := dec(in); err != nil {
- return nil, err
- }
- if interceptor == nil {
- return srv.(DatabaseAdminServer).UpdateDatabase(ctx, in)
- }
- info := &grpc.UnaryServerInfo{
- Server: srv,
- FullMethod: "/google.spanner.admin.database.v1.DatabaseAdmin/UpdateDatabase",
- }
- handler := func(ctx context.Context, req interface{}) (interface{}, error) {
- return srv.(DatabaseAdminServer).UpdateDatabase(ctx, req.(*UpdateDatabaseRequest))
- }
- return interceptor(ctx, in, info, handler)
-}
-
-func _DatabaseAdmin_UpdateDatabaseDdl_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
- in := new(UpdateDatabaseDdlRequest)
- if err := dec(in); err != nil {
- return nil, err
- }
- if interceptor == nil {
- return srv.(DatabaseAdminServer).UpdateDatabaseDdl(ctx, in)
- }
- info := &grpc.UnaryServerInfo{
- Server: srv,
- FullMethod: "/google.spanner.admin.database.v1.DatabaseAdmin/UpdateDatabaseDdl",
- }
- handler := func(ctx context.Context, req interface{}) (interface{}, error) {
- return srv.(DatabaseAdminServer).UpdateDatabaseDdl(ctx, req.(*UpdateDatabaseDdlRequest))
- }
- return interceptor(ctx, in, info, handler)
-}
-
-func _DatabaseAdmin_DropDatabase_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
- in := new(DropDatabaseRequest)
- if err := dec(in); err != nil {
- return nil, err
- }
- if interceptor == nil {
- return srv.(DatabaseAdminServer).DropDatabase(ctx, in)
- }
- info := &grpc.UnaryServerInfo{
- Server: srv,
- FullMethod: "/google.spanner.admin.database.v1.DatabaseAdmin/DropDatabase",
- }
- handler := func(ctx context.Context, req interface{}) (interface{}, error) {
- return srv.(DatabaseAdminServer).DropDatabase(ctx, req.(*DropDatabaseRequest))
- }
- return interceptor(ctx, in, info, handler)
-}
-
-func _DatabaseAdmin_GetDatabaseDdl_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
- in := new(GetDatabaseDdlRequest)
- if err := dec(in); err != nil {
- return nil, err
- }
- if interceptor == nil {
- return srv.(DatabaseAdminServer).GetDatabaseDdl(ctx, in)
- }
- info := &grpc.UnaryServerInfo{
- Server: srv,
- FullMethod: "/google.spanner.admin.database.v1.DatabaseAdmin/GetDatabaseDdl",
- }
- handler := func(ctx context.Context, req interface{}) (interface{}, error) {
- return srv.(DatabaseAdminServer).GetDatabaseDdl(ctx, req.(*GetDatabaseDdlRequest))
- }
- return interceptor(ctx, in, info, handler)
-}
-
-func _DatabaseAdmin_SetIamPolicy_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
- in := new(iampb.SetIamPolicyRequest)
- if err := dec(in); err != nil {
- return nil, err
- }
- if interceptor == nil {
- return srv.(DatabaseAdminServer).SetIamPolicy(ctx, in)
- }
- info := &grpc.UnaryServerInfo{
- Server: srv,
- FullMethod: "/google.spanner.admin.database.v1.DatabaseAdmin/SetIamPolicy",
- }
- handler := func(ctx context.Context, req interface{}) (interface{}, error) {
- return srv.(DatabaseAdminServer).SetIamPolicy(ctx, req.(*iampb.SetIamPolicyRequest))
- }
- return interceptor(ctx, in, info, handler)
-}
-
-func _DatabaseAdmin_GetIamPolicy_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
- in := new(iampb.GetIamPolicyRequest)
- if err := dec(in); err != nil {
- return nil, err
- }
- if interceptor == nil {
- return srv.(DatabaseAdminServer).GetIamPolicy(ctx, in)
- }
- info := &grpc.UnaryServerInfo{
- Server: srv,
- FullMethod: "/google.spanner.admin.database.v1.DatabaseAdmin/GetIamPolicy",
- }
- handler := func(ctx context.Context, req interface{}) (interface{}, error) {
- return srv.(DatabaseAdminServer).GetIamPolicy(ctx, req.(*iampb.GetIamPolicyRequest))
- }
- return interceptor(ctx, in, info, handler)
-}
-
-func _DatabaseAdmin_TestIamPermissions_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
- in := new(iampb.TestIamPermissionsRequest)
- if err := dec(in); err != nil {
- return nil, err
- }
- if interceptor == nil {
- return srv.(DatabaseAdminServer).TestIamPermissions(ctx, in)
- }
- info := &grpc.UnaryServerInfo{
- Server: srv,
- FullMethod: "/google.spanner.admin.database.v1.DatabaseAdmin/TestIamPermissions",
- }
- handler := func(ctx context.Context, req interface{}) (interface{}, error) {
- return srv.(DatabaseAdminServer).TestIamPermissions(ctx, req.(*iampb.TestIamPermissionsRequest))
- }
- return interceptor(ctx, in, info, handler)
-}
-
-func _DatabaseAdmin_CreateBackup_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
- in := new(CreateBackupRequest)
- if err := dec(in); err != nil {
- return nil, err
- }
- if interceptor == nil {
- return srv.(DatabaseAdminServer).CreateBackup(ctx, in)
- }
- info := &grpc.UnaryServerInfo{
- Server: srv,
- FullMethod: "/google.spanner.admin.database.v1.DatabaseAdmin/CreateBackup",
- }
- handler := func(ctx context.Context, req interface{}) (interface{}, error) {
- return srv.(DatabaseAdminServer).CreateBackup(ctx, req.(*CreateBackupRequest))
- }
- return interceptor(ctx, in, info, handler)
-}
-
-func _DatabaseAdmin_CopyBackup_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
- in := new(CopyBackupRequest)
- if err := dec(in); err != nil {
- return nil, err
- }
- if interceptor == nil {
- return srv.(DatabaseAdminServer).CopyBackup(ctx, in)
- }
- info := &grpc.UnaryServerInfo{
- Server: srv,
- FullMethod: "/google.spanner.admin.database.v1.DatabaseAdmin/CopyBackup",
- }
- handler := func(ctx context.Context, req interface{}) (interface{}, error) {
- return srv.(DatabaseAdminServer).CopyBackup(ctx, req.(*CopyBackupRequest))
- }
- return interceptor(ctx, in, info, handler)
-}
-
-func _DatabaseAdmin_GetBackup_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
- in := new(GetBackupRequest)
- if err := dec(in); err != nil {
- return nil, err
- }
- if interceptor == nil {
- return srv.(DatabaseAdminServer).GetBackup(ctx, in)
- }
- info := &grpc.UnaryServerInfo{
- Server: srv,
- FullMethod: "/google.spanner.admin.database.v1.DatabaseAdmin/GetBackup",
- }
- handler := func(ctx context.Context, req interface{}) (interface{}, error) {
- return srv.(DatabaseAdminServer).GetBackup(ctx, req.(*GetBackupRequest))
- }
- return interceptor(ctx, in, info, handler)
-}
-
-func _DatabaseAdmin_UpdateBackup_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
- in := new(UpdateBackupRequest)
- if err := dec(in); err != nil {
- return nil, err
- }
- if interceptor == nil {
- return srv.(DatabaseAdminServer).UpdateBackup(ctx, in)
- }
- info := &grpc.UnaryServerInfo{
- Server: srv,
- FullMethod: "/google.spanner.admin.database.v1.DatabaseAdmin/UpdateBackup",
- }
- handler := func(ctx context.Context, req interface{}) (interface{}, error) {
- return srv.(DatabaseAdminServer).UpdateBackup(ctx, req.(*UpdateBackupRequest))
- }
- return interceptor(ctx, in, info, handler)
-}
-
-func _DatabaseAdmin_DeleteBackup_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
- in := new(DeleteBackupRequest)
- if err := dec(in); err != nil {
- return nil, err
- }
- if interceptor == nil {
- return srv.(DatabaseAdminServer).DeleteBackup(ctx, in)
- }
- info := &grpc.UnaryServerInfo{
- Server: srv,
- FullMethod: "/google.spanner.admin.database.v1.DatabaseAdmin/DeleteBackup",
- }
- handler := func(ctx context.Context, req interface{}) (interface{}, error) {
- return srv.(DatabaseAdminServer).DeleteBackup(ctx, req.(*DeleteBackupRequest))
- }
- return interceptor(ctx, in, info, handler)
-}
-
-func _DatabaseAdmin_ListBackups_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
- in := new(ListBackupsRequest)
- if err := dec(in); err != nil {
- return nil, err
- }
- if interceptor == nil {
- return srv.(DatabaseAdminServer).ListBackups(ctx, in)
- }
- info := &grpc.UnaryServerInfo{
- Server: srv,
- FullMethod: "/google.spanner.admin.database.v1.DatabaseAdmin/ListBackups",
- }
- handler := func(ctx context.Context, req interface{}) (interface{}, error) {
- return srv.(DatabaseAdminServer).ListBackups(ctx, req.(*ListBackupsRequest))
- }
- return interceptor(ctx, in, info, handler)
-}
-
-func _DatabaseAdmin_RestoreDatabase_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
- in := new(RestoreDatabaseRequest)
- if err := dec(in); err != nil {
- return nil, err
- }
- if interceptor == nil {
- return srv.(DatabaseAdminServer).RestoreDatabase(ctx, in)
- }
- info := &grpc.UnaryServerInfo{
- Server: srv,
- FullMethod: "/google.spanner.admin.database.v1.DatabaseAdmin/RestoreDatabase",
- }
- handler := func(ctx context.Context, req interface{}) (interface{}, error) {
- return srv.(DatabaseAdminServer).RestoreDatabase(ctx, req.(*RestoreDatabaseRequest))
- }
- return interceptor(ctx, in, info, handler)
-}
-
-func _DatabaseAdmin_ListDatabaseOperations_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
- in := new(ListDatabaseOperationsRequest)
- if err := dec(in); err != nil {
- return nil, err
- }
- if interceptor == nil {
- return srv.(DatabaseAdminServer).ListDatabaseOperations(ctx, in)
- }
- info := &grpc.UnaryServerInfo{
- Server: srv,
- FullMethod: "/google.spanner.admin.database.v1.DatabaseAdmin/ListDatabaseOperations",
- }
- handler := func(ctx context.Context, req interface{}) (interface{}, error) {
- return srv.(DatabaseAdminServer).ListDatabaseOperations(ctx, req.(*ListDatabaseOperationsRequest))
- }
- return interceptor(ctx, in, info, handler)
-}
-
-func _DatabaseAdmin_ListBackupOperations_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
- in := new(ListBackupOperationsRequest)
- if err := dec(in); err != nil {
- return nil, err
- }
- if interceptor == nil {
- return srv.(DatabaseAdminServer).ListBackupOperations(ctx, in)
- }
- info := &grpc.UnaryServerInfo{
- Server: srv,
- FullMethod: "/google.spanner.admin.database.v1.DatabaseAdmin/ListBackupOperations",
- }
- handler := func(ctx context.Context, req interface{}) (interface{}, error) {
- return srv.(DatabaseAdminServer).ListBackupOperations(ctx, req.(*ListBackupOperationsRequest))
- }
- return interceptor(ctx, in, info, handler)
-}
-
-func _DatabaseAdmin_ListDatabaseRoles_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
- in := new(ListDatabaseRolesRequest)
- if err := dec(in); err != nil {
- return nil, err
- }
- if interceptor == nil {
- return srv.(DatabaseAdminServer).ListDatabaseRoles(ctx, in)
- }
- info := &grpc.UnaryServerInfo{
- Server: srv,
- FullMethod: "/google.spanner.admin.database.v1.DatabaseAdmin/ListDatabaseRoles",
- }
- handler := func(ctx context.Context, req interface{}) (interface{}, error) {
- return srv.(DatabaseAdminServer).ListDatabaseRoles(ctx, req.(*ListDatabaseRolesRequest))
- }
- return interceptor(ctx, in, info, handler)
-}
-
-func _DatabaseAdmin_CreateBackupSchedule_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
- in := new(CreateBackupScheduleRequest)
- if err := dec(in); err != nil {
- return nil, err
- }
- if interceptor == nil {
- return srv.(DatabaseAdminServer).CreateBackupSchedule(ctx, in)
- }
- info := &grpc.UnaryServerInfo{
- Server: srv,
- FullMethod: "/google.spanner.admin.database.v1.DatabaseAdmin/CreateBackupSchedule",
- }
- handler := func(ctx context.Context, req interface{}) (interface{}, error) {
- return srv.(DatabaseAdminServer).CreateBackupSchedule(ctx, req.(*CreateBackupScheduleRequest))
- }
- return interceptor(ctx, in, info, handler)
-}
-
-func _DatabaseAdmin_GetBackupSchedule_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
- in := new(GetBackupScheduleRequest)
- if err := dec(in); err != nil {
- return nil, err
- }
- if interceptor == nil {
- return srv.(DatabaseAdminServer).GetBackupSchedule(ctx, in)
- }
- info := &grpc.UnaryServerInfo{
- Server: srv,
- FullMethod: "/google.spanner.admin.database.v1.DatabaseAdmin/GetBackupSchedule",
- }
- handler := func(ctx context.Context, req interface{}) (interface{}, error) {
- return srv.(DatabaseAdminServer).GetBackupSchedule(ctx, req.(*GetBackupScheduleRequest))
- }
- return interceptor(ctx, in, info, handler)
-}
-
-func _DatabaseAdmin_UpdateBackupSchedule_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
- in := new(UpdateBackupScheduleRequest)
- if err := dec(in); err != nil {
- return nil, err
- }
- if interceptor == nil {
- return srv.(DatabaseAdminServer).UpdateBackupSchedule(ctx, in)
- }
- info := &grpc.UnaryServerInfo{
- Server: srv,
- FullMethod: "/google.spanner.admin.database.v1.DatabaseAdmin/UpdateBackupSchedule",
- }
- handler := func(ctx context.Context, req interface{}) (interface{}, error) {
- return srv.(DatabaseAdminServer).UpdateBackupSchedule(ctx, req.(*UpdateBackupScheduleRequest))
- }
- return interceptor(ctx, in, info, handler)
-}
-
-func _DatabaseAdmin_DeleteBackupSchedule_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
- in := new(DeleteBackupScheduleRequest)
- if err := dec(in); err != nil {
- return nil, err
- }
- if interceptor == nil {
- return srv.(DatabaseAdminServer).DeleteBackupSchedule(ctx, in)
- }
- info := &grpc.UnaryServerInfo{
- Server: srv,
- FullMethod: "/google.spanner.admin.database.v1.DatabaseAdmin/DeleteBackupSchedule",
- }
- handler := func(ctx context.Context, req interface{}) (interface{}, error) {
- return srv.(DatabaseAdminServer).DeleteBackupSchedule(ctx, req.(*DeleteBackupScheduleRequest))
- }
- return interceptor(ctx, in, info, handler)
-}
-
-func _DatabaseAdmin_ListBackupSchedules_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
- in := new(ListBackupSchedulesRequest)
- if err := dec(in); err != nil {
- return nil, err
- }
- if interceptor == nil {
- return srv.(DatabaseAdminServer).ListBackupSchedules(ctx, in)
- }
- info := &grpc.UnaryServerInfo{
- Server: srv,
- FullMethod: "/google.spanner.admin.database.v1.DatabaseAdmin/ListBackupSchedules",
- }
- handler := func(ctx context.Context, req interface{}) (interface{}, error) {
- return srv.(DatabaseAdminServer).ListBackupSchedules(ctx, req.(*ListBackupSchedulesRequest))
- }
- return interceptor(ctx, in, info, handler)
-}
-
-var _DatabaseAdmin_serviceDesc = grpc.ServiceDesc{
- ServiceName: "google.spanner.admin.database.v1.DatabaseAdmin",
- HandlerType: (*DatabaseAdminServer)(nil),
- Methods: []grpc.MethodDesc{
- {
- MethodName: "ListDatabases",
- Handler: _DatabaseAdmin_ListDatabases_Handler,
- },
- {
- MethodName: "CreateDatabase",
- Handler: _DatabaseAdmin_CreateDatabase_Handler,
- },
- {
- MethodName: "GetDatabase",
- Handler: _DatabaseAdmin_GetDatabase_Handler,
- },
- {
- MethodName: "UpdateDatabase",
- Handler: _DatabaseAdmin_UpdateDatabase_Handler,
- },
- {
- MethodName: "UpdateDatabaseDdl",
- Handler: _DatabaseAdmin_UpdateDatabaseDdl_Handler,
- },
- {
- MethodName: "DropDatabase",
- Handler: _DatabaseAdmin_DropDatabase_Handler,
- },
- {
- MethodName: "GetDatabaseDdl",
- Handler: _DatabaseAdmin_GetDatabaseDdl_Handler,
- },
- {
- MethodName: "SetIamPolicy",
- Handler: _DatabaseAdmin_SetIamPolicy_Handler,
- },
- {
- MethodName: "GetIamPolicy",
- Handler: _DatabaseAdmin_GetIamPolicy_Handler,
- },
- {
- MethodName: "TestIamPermissions",
- Handler: _DatabaseAdmin_TestIamPermissions_Handler,
- },
- {
- MethodName: "CreateBackup",
- Handler: _DatabaseAdmin_CreateBackup_Handler,
- },
- {
- MethodName: "CopyBackup",
- Handler: _DatabaseAdmin_CopyBackup_Handler,
- },
- {
- MethodName: "GetBackup",
- Handler: _DatabaseAdmin_GetBackup_Handler,
- },
- {
- MethodName: "UpdateBackup",
- Handler: _DatabaseAdmin_UpdateBackup_Handler,
- },
- {
- MethodName: "DeleteBackup",
- Handler: _DatabaseAdmin_DeleteBackup_Handler,
- },
- {
- MethodName: "ListBackups",
- Handler: _DatabaseAdmin_ListBackups_Handler,
- },
- {
- MethodName: "RestoreDatabase",
- Handler: _DatabaseAdmin_RestoreDatabase_Handler,
- },
- {
- MethodName: "ListDatabaseOperations",
- Handler: _DatabaseAdmin_ListDatabaseOperations_Handler,
- },
- {
- MethodName: "ListBackupOperations",
- Handler: _DatabaseAdmin_ListBackupOperations_Handler,
- },
- {
- MethodName: "ListDatabaseRoles",
- Handler: _DatabaseAdmin_ListDatabaseRoles_Handler,
- },
- {
- MethodName: "CreateBackupSchedule",
- Handler: _DatabaseAdmin_CreateBackupSchedule_Handler,
- },
- {
- MethodName: "GetBackupSchedule",
- Handler: _DatabaseAdmin_GetBackupSchedule_Handler,
- },
- {
- MethodName: "UpdateBackupSchedule",
- Handler: _DatabaseAdmin_UpdateBackupSchedule_Handler,
- },
- {
- MethodName: "DeleteBackupSchedule",
- Handler: _DatabaseAdmin_DeleteBackupSchedule_Handler,
- },
- {
- MethodName: "ListBackupSchedules",
- Handler: _DatabaseAdmin_ListBackupSchedules_Handler,
- },
- },
- Streams: []grpc.StreamDesc{},
- Metadata: "google/spanner/admin/database/v1/spanner_database_admin.proto",
-}
diff --git a/vendor/cloud.google.com/go/spanner/admin/database/apiv1/doc.go b/vendor/cloud.google.com/go/spanner/admin/database/apiv1/doc.go
deleted file mode 100644
index 52f0ffb6b..000000000
--- a/vendor/cloud.google.com/go/spanner/admin/database/apiv1/doc.go
+++ /dev/null
@@ -1,128 +0,0 @@
-// Copyright 2024 Google LLC
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// https://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-// Code generated by protoc-gen-go_gapic. DO NOT EDIT.
-
-// Package database is an auto-generated package for the
-// Cloud Spanner API.
-//
-// Cloud Spanner is a managed, mission-critical, globally consistent and
-// scalable relational database service.
-//
-// # General documentation
-//
-// For information that is relevant for all client libraries please reference
-// https://pkg.go.dev/cloud.google.com/go#pkg-overview. Some information on this
-// page includes:
-//
-// - [Authentication and Authorization]
-// - [Timeouts and Cancellation]
-// - [Testing against Client Libraries]
-// - [Debugging Client Libraries]
-// - [Inspecting errors]
-//
-// # Example usage
-//
-// To get started with this package, create a client.
-//
-// ctx := context.Background()
-// // This snippet has been automatically generated and should be regarded as a code template only.
-// // It will require modifications to work:
-// // - It may require correct/in-range values for request initialization.
-// // - It may require specifying regional endpoints when creating the service client as shown in:
-// // https://pkg.go.dev/cloud.google.com/go#hdr-Client_Options
-// c, err := database.NewDatabaseAdminClient(ctx)
-// if err != nil {
-// // TODO: Handle error.
-// }
-// defer c.Close()
-//
-// The client will use your default application credentials. Clients should be reused instead of created as needed.
-// The methods of Client are safe for concurrent use by multiple goroutines.
-// The returned client must be Closed when it is done being used.
-//
-// # Using the Client
-//
-// The following is an example of making an API call with the newly created client.
-//
-// ctx := context.Background()
-// // This snippet has been automatically generated and should be regarded as a code template only.
-// // It will require modifications to work:
-// // - It may require correct/in-range values for request initialization.
-// // - It may require specifying regional endpoints when creating the service client as shown in:
-// // https://pkg.go.dev/cloud.google.com/go#hdr-Client_Options
-// c, err := database.NewDatabaseAdminClient(ctx)
-// if err != nil {
-// // TODO: Handle error.
-// }
-// defer c.Close()
-//
-// req := &databasepb.CopyBackupRequest{
-// // TODO: Fill request struct fields.
-// // See https://pkg.go.dev/cloud.google.com/go/spanner/admin/database/apiv1/databasepb#CopyBackupRequest.
-// }
-// op, err := c.CopyBackup(ctx, req)
-// if err != nil {
-// // TODO: Handle error.
-// }
-//
-// resp, err := op.Wait(ctx)
-// if err != nil {
-// // TODO: Handle error.
-// }
-// // TODO: Use resp.
-// _ = resp
-//
-// # Use of Context
-//
-// The ctx passed to NewDatabaseAdminClient is used for authentication requests and
-// for creating the underlying connection, but is not used for subsequent calls.
-// Individual methods on the client use the ctx given to them.
-//
-// To close the open connection, use the Close() method.
-//
-// [Authentication and Authorization]: https://pkg.go.dev/cloud.google.com/go#hdr-Authentication_and_Authorization
-// [Timeouts and Cancellation]: https://pkg.go.dev/cloud.google.com/go#hdr-Timeouts_and_Cancellation
-// [Testing against Client Libraries]: https://pkg.go.dev/cloud.google.com/go#hdr-Testing
-// [Debugging Client Libraries]: https://pkg.go.dev/cloud.google.com/go#hdr-Debugging
-// [Inspecting errors]: https://pkg.go.dev/cloud.google.com/go#hdr-Inspecting_errors
-package database // import "cloud.google.com/go/spanner/admin/database/apiv1"
-
-import (
- "context"
-
- "google.golang.org/api/option"
-)
-
-// For more information on implementing a client constructor hook, see
-// https://github.com/googleapis/google-cloud-go/wiki/Customizing-constructors.
-type clientHookParams struct{}
-type clientHook func(context.Context, clientHookParams) ([]option.ClientOption, error)
-
-var versionClient string
-
-func getVersionClient() string {
- if versionClient == "" {
- return "UNKNOWN"
- }
- return versionClient
-}
-
-// DefaultAuthScopes reports the default set of authentication scopes to use with this package.
-func DefaultAuthScopes() []string {
- return []string{
- "https://www.googleapis.com/auth/cloud-platform",
- "https://www.googleapis.com/auth/spanner.admin",
- }
-}
diff --git a/vendor/cloud.google.com/go/spanner/admin/database/apiv1/init.go b/vendor/cloud.google.com/go/spanner/admin/database/apiv1/init.go
deleted file mode 100644
index 60c190565..000000000
--- a/vendor/cloud.google.com/go/spanner/admin/database/apiv1/init.go
+++ /dev/null
@@ -1,39 +0,0 @@
-/*
-Copyright 2020 Google LLC
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-*/
-package database
-
-import (
- "context"
- "os"
-
- "google.golang.org/api/option"
- "google.golang.org/grpc"
- "google.golang.org/grpc/credentials/insecure"
-)
-
-func init() {
- newDatabaseAdminClientHook = func(ctx context.Context, p clientHookParams) ([]option.ClientOption, error) {
- if emulator := os.Getenv("SPANNER_EMULATOR_HOST"); emulator != "" {
- return []option.ClientOption{
- option.WithEndpoint(emulator),
- option.WithGRPCDialOption(grpc.WithTransportCredentials(insecure.NewCredentials())),
- option.WithoutAuthentication(),
- }, nil
- }
-
- return nil, nil
- }
-}
diff --git a/vendor/cloud.google.com/go/spanner/admin/database/apiv1/path_funcs.go b/vendor/cloud.google.com/go/spanner/admin/database/apiv1/path_funcs.go
deleted file mode 100644
index d474b2cce..000000000
--- a/vendor/cloud.google.com/go/spanner/admin/database/apiv1/path_funcs.go
+++ /dev/null
@@ -1,49 +0,0 @@
-// Copyright 2018 Google LLC
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// https://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package database
-
-// DatabaseAdminInstancePath returns the path for the instance resource.
-//
-// Deprecated: Use
-//
-// fmt.Sprintf("projects/%s/instances/%s", project, instance)
-//
-// instead.
-func DatabaseAdminInstancePath(project, instance string) string {
- return "" +
- "projects/" +
- project +
- "/instances/" +
- instance +
- ""
-}
-
-// DatabaseAdminDatabasePath returns the path for the database resource.
-//
-// Deprecated: Use
-//
-// fmt.Sprintf("projects/%s/instances/%s/databases/%s", project, instance, database)
-//
-// instead.
-func DatabaseAdminDatabasePath(project, instance, database string) string {
- return "" +
- "projects/" +
- project +
- "/instances/" +
- instance +
- "/databases/" +
- database +
- ""
-}
diff --git a/vendor/cloud.google.com/go/spanner/admin/database/apiv1/version.go b/vendor/cloud.google.com/go/spanner/admin/database/apiv1/version.go
deleted file mode 100644
index b0ba71de8..000000000
--- a/vendor/cloud.google.com/go/spanner/admin/database/apiv1/version.go
+++ /dev/null
@@ -1,23 +0,0 @@
-// Copyright 2023 Google LLC
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-// Code generated by gapicgen. DO NOT EDIT.
-
-package database
-
-import "cloud.google.com/go/spanner/internal"
-
-func init() {
- versionClient = internal.Version
-}
diff --git a/vendor/cloud.google.com/go/spanner/admin/instance/apiv1/auxiliary.go b/vendor/cloud.google.com/go/spanner/admin/instance/apiv1/auxiliary.go
deleted file mode 100644
index f6bab7fe1..000000000
--- a/vendor/cloud.google.com/go/spanner/admin/instance/apiv1/auxiliary.go
+++ /dev/null
@@ -1,664 +0,0 @@
-// Copyright 2024 Google LLC
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// https://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-// Code generated by protoc-gen-go_gapic. DO NOT EDIT.
-
-package instance
-
-import (
- "context"
- "time"
-
- "cloud.google.com/go/longrunning"
- longrunningpb "cloud.google.com/go/longrunning/autogen/longrunningpb"
- instancepb "cloud.google.com/go/spanner/admin/instance/apiv1/instancepb"
- gax "github.com/googleapis/gax-go/v2"
- "google.golang.org/api/iterator"
-)
-
-// CreateInstanceConfigOperation manages a long-running operation from CreateInstanceConfig.
-type CreateInstanceConfigOperation struct {
- lro *longrunning.Operation
- pollPath string
-}
-
-// Wait blocks until the long-running operation is completed, returning the response and any errors encountered.
-//
-// See documentation of Poll for error-handling information.
-func (op *CreateInstanceConfigOperation) Wait(ctx context.Context, opts ...gax.CallOption) (*instancepb.InstanceConfig, error) {
- opts = append([]gax.CallOption{gax.WithPath(op.pollPath)}, opts...)
- var resp instancepb.InstanceConfig
- if err := op.lro.WaitWithInterval(ctx, &resp, time.Minute, opts...); err != nil {
- return nil, err
- }
- return &resp, nil
-}
-
-// Poll fetches the latest state of the long-running operation.
-//
-// Poll also fetches the latest metadata, which can be retrieved by Metadata.
-//
-// If Poll fails, the error is returned and op is unmodified. If Poll succeeds and
-// the operation has completed with failure, the error is returned and op.Done will return true.
-// If Poll succeeds and the operation has completed successfully,
-// op.Done will return true, and the response of the operation is returned.
-// If Poll succeeds and the operation has not completed, the returned response and error are both nil.
-func (op *CreateInstanceConfigOperation) Poll(ctx context.Context, opts ...gax.CallOption) (*instancepb.InstanceConfig, error) {
- opts = append([]gax.CallOption{gax.WithPath(op.pollPath)}, opts...)
- var resp instancepb.InstanceConfig
- if err := op.lro.Poll(ctx, &resp, opts...); err != nil {
- return nil, err
- }
- if !op.Done() {
- return nil, nil
- }
- return &resp, nil
-}
-
-// Metadata returns metadata associated with the long-running operation.
-// Metadata itself does not contact the server, but Poll does.
-// To get the latest metadata, call this method after a successful call to Poll.
-// If the metadata is not available, the returned metadata and error are both nil.
-func (op *CreateInstanceConfigOperation) Metadata() (*instancepb.CreateInstanceConfigMetadata, error) {
- var meta instancepb.CreateInstanceConfigMetadata
- if err := op.lro.Metadata(&meta); err == longrunning.ErrNoMetadata {
- return nil, nil
- } else if err != nil {
- return nil, err
- }
- return &meta, nil
-}
-
-// Done reports whether the long-running operation has completed.
-func (op *CreateInstanceConfigOperation) Done() bool {
- return op.lro.Done()
-}
-
-// Name returns the name of the long-running operation.
-// The name is assigned by the server and is unique within the service from which the operation is created.
-func (op *CreateInstanceConfigOperation) Name() string {
- return op.lro.Name()
-}
-
-// CreateInstanceOperation manages a long-running operation from CreateInstance.
-type CreateInstanceOperation struct {
- lro *longrunning.Operation
- pollPath string
-}
-
-// Wait blocks until the long-running operation is completed, returning the response and any errors encountered.
-//
-// See documentation of Poll for error-handling information.
-func (op *CreateInstanceOperation) Wait(ctx context.Context, opts ...gax.CallOption) (*instancepb.Instance, error) {
- opts = append([]gax.CallOption{gax.WithPath(op.pollPath)}, opts...)
- var resp instancepb.Instance
- if err := op.lro.WaitWithInterval(ctx, &resp, time.Minute, opts...); err != nil {
- return nil, err
- }
- return &resp, nil
-}
-
-// Poll fetches the latest state of the long-running operation.
-//
-// Poll also fetches the latest metadata, which can be retrieved by Metadata.
-//
-// If Poll fails, the error is returned and op is unmodified. If Poll succeeds and
-// the operation has completed with failure, the error is returned and op.Done will return true.
-// If Poll succeeds and the operation has completed successfully,
-// op.Done will return true, and the response of the operation is returned.
-// If Poll succeeds and the operation has not completed, the returned response and error are both nil.
-func (op *CreateInstanceOperation) Poll(ctx context.Context, opts ...gax.CallOption) (*instancepb.Instance, error) {
- opts = append([]gax.CallOption{gax.WithPath(op.pollPath)}, opts...)
- var resp instancepb.Instance
- if err := op.lro.Poll(ctx, &resp, opts...); err != nil {
- return nil, err
- }
- if !op.Done() {
- return nil, nil
- }
- return &resp, nil
-}
-
-// Metadata returns metadata associated with the long-running operation.
-// Metadata itself does not contact the server, but Poll does.
-// To get the latest metadata, call this method after a successful call to Poll.
-// If the metadata is not available, the returned metadata and error are both nil.
-func (op *CreateInstanceOperation) Metadata() (*instancepb.CreateInstanceMetadata, error) {
- var meta instancepb.CreateInstanceMetadata
- if err := op.lro.Metadata(&meta); err == longrunning.ErrNoMetadata {
- return nil, nil
- } else if err != nil {
- return nil, err
- }
- return &meta, nil
-}
-
-// Done reports whether the long-running operation has completed.
-func (op *CreateInstanceOperation) Done() bool {
- return op.lro.Done()
-}
-
-// Name returns the name of the long-running operation.
-// The name is assigned by the server and is unique within the service from which the operation is created.
-func (op *CreateInstanceOperation) Name() string {
- return op.lro.Name()
-}
-
-// CreateInstancePartitionOperation manages a long-running operation from CreateInstancePartition.
-type CreateInstancePartitionOperation struct {
- lro *longrunning.Operation
- pollPath string
-}
-
-// Wait blocks until the long-running operation is completed, returning the response and any errors encountered.
-//
-// See documentation of Poll for error-handling information.
-func (op *CreateInstancePartitionOperation) Wait(ctx context.Context, opts ...gax.CallOption) (*instancepb.InstancePartition, error) {
- opts = append([]gax.CallOption{gax.WithPath(op.pollPath)}, opts...)
- var resp instancepb.InstancePartition
- if err := op.lro.WaitWithInterval(ctx, &resp, time.Minute, opts...); err != nil {
- return nil, err
- }
- return &resp, nil
-}
-
-// Poll fetches the latest state of the long-running operation.
-//
-// Poll also fetches the latest metadata, which can be retrieved by Metadata.
-//
-// If Poll fails, the error is returned and op is unmodified. If Poll succeeds and
-// the operation has completed with failure, the error is returned and op.Done will return true.
-// If Poll succeeds and the operation has completed successfully,
-// op.Done will return true, and the response of the operation is returned.
-// If Poll succeeds and the operation has not completed, the returned response and error are both nil.
-func (op *CreateInstancePartitionOperation) Poll(ctx context.Context, opts ...gax.CallOption) (*instancepb.InstancePartition, error) {
- opts = append([]gax.CallOption{gax.WithPath(op.pollPath)}, opts...)
- var resp instancepb.InstancePartition
- if err := op.lro.Poll(ctx, &resp, opts...); err != nil {
- return nil, err
- }
- if !op.Done() {
- return nil, nil
- }
- return &resp, nil
-}
-
-// Metadata returns metadata associated with the long-running operation.
-// Metadata itself does not contact the server, but Poll does.
-// To get the latest metadata, call this method after a successful call to Poll.
-// If the metadata is not available, the returned metadata and error are both nil.
-func (op *CreateInstancePartitionOperation) Metadata() (*instancepb.CreateInstancePartitionMetadata, error) {
- var meta instancepb.CreateInstancePartitionMetadata
- if err := op.lro.Metadata(&meta); err == longrunning.ErrNoMetadata {
- return nil, nil
- } else if err != nil {
- return nil, err
- }
- return &meta, nil
-}
-
-// Done reports whether the long-running operation has completed.
-func (op *CreateInstancePartitionOperation) Done() bool {
- return op.lro.Done()
-}
-
-// Name returns the name of the long-running operation.
-// The name is assigned by the server and is unique within the service from which the operation is created.
-func (op *CreateInstancePartitionOperation) Name() string {
- return op.lro.Name()
-}
-
-// MoveInstanceOperation manages a long-running operation from MoveInstance.
-type MoveInstanceOperation struct {
- lro *longrunning.Operation
- pollPath string
-}
-
-// Wait blocks until the long-running operation is completed, returning the response and any errors encountered.
-//
-// See documentation of Poll for error-handling information.
-func (op *MoveInstanceOperation) Wait(ctx context.Context, opts ...gax.CallOption) (*instancepb.MoveInstanceResponse, error) {
- opts = append([]gax.CallOption{gax.WithPath(op.pollPath)}, opts...)
- var resp instancepb.MoveInstanceResponse
- if err := op.lro.WaitWithInterval(ctx, &resp, time.Minute, opts...); err != nil {
- return nil, err
- }
- return &resp, nil
-}
-
-// Poll fetches the latest state of the long-running operation.
-//
-// Poll also fetches the latest metadata, which can be retrieved by Metadata.
-//
-// If Poll fails, the error is returned and op is unmodified. If Poll succeeds and
-// the operation has completed with failure, the error is returned and op.Done will return true.
-// If Poll succeeds and the operation has completed successfully,
-// op.Done will return true, and the response of the operation is returned.
-// If Poll succeeds and the operation has not completed, the returned response and error are both nil.
-func (op *MoveInstanceOperation) Poll(ctx context.Context, opts ...gax.CallOption) (*instancepb.MoveInstanceResponse, error) {
- opts = append([]gax.CallOption{gax.WithPath(op.pollPath)}, opts...)
- var resp instancepb.MoveInstanceResponse
- if err := op.lro.Poll(ctx, &resp, opts...); err != nil {
- return nil, err
- }
- if !op.Done() {
- return nil, nil
- }
- return &resp, nil
-}
-
-// Metadata returns metadata associated with the long-running operation.
-// Metadata itself does not contact the server, but Poll does.
-// To get the latest metadata, call this method after a successful call to Poll.
-// If the metadata is not available, the returned metadata and error are both nil.
-func (op *MoveInstanceOperation) Metadata() (*instancepb.MoveInstanceMetadata, error) {
- var meta instancepb.MoveInstanceMetadata
- if err := op.lro.Metadata(&meta); err == longrunning.ErrNoMetadata {
- return nil, nil
- } else if err != nil {
- return nil, err
- }
- return &meta, nil
-}
-
-// Done reports whether the long-running operation has completed.
-func (op *MoveInstanceOperation) Done() bool {
- return op.lro.Done()
-}
-
-// Name returns the name of the long-running operation.
-// The name is assigned by the server and is unique within the service from which the operation is created.
-func (op *MoveInstanceOperation) Name() string {
- return op.lro.Name()
-}
-
-// UpdateInstanceConfigOperation manages a long-running operation from UpdateInstanceConfig.
-type UpdateInstanceConfigOperation struct {
- lro *longrunning.Operation
- pollPath string
-}
-
-// Wait blocks until the long-running operation is completed, returning the response and any errors encountered.
-//
-// See documentation of Poll for error-handling information.
-func (op *UpdateInstanceConfigOperation) Wait(ctx context.Context, opts ...gax.CallOption) (*instancepb.InstanceConfig, error) {
- opts = append([]gax.CallOption{gax.WithPath(op.pollPath)}, opts...)
- var resp instancepb.InstanceConfig
- if err := op.lro.WaitWithInterval(ctx, &resp, time.Minute, opts...); err != nil {
- return nil, err
- }
- return &resp, nil
-}
-
-// Poll fetches the latest state of the long-running operation.
-//
-// Poll also fetches the latest metadata, which can be retrieved by Metadata.
-//
-// If Poll fails, the error is returned and op is unmodified. If Poll succeeds and
-// the operation has completed with failure, the error is returned and op.Done will return true.
-// If Poll succeeds and the operation has completed successfully,
-// op.Done will return true, and the response of the operation is returned.
-// If Poll succeeds and the operation has not completed, the returned response and error are both nil.
-func (op *UpdateInstanceConfigOperation) Poll(ctx context.Context, opts ...gax.CallOption) (*instancepb.InstanceConfig, error) {
- opts = append([]gax.CallOption{gax.WithPath(op.pollPath)}, opts...)
- var resp instancepb.InstanceConfig
- if err := op.lro.Poll(ctx, &resp, opts...); err != nil {
- return nil, err
- }
- if !op.Done() {
- return nil, nil
- }
- return &resp, nil
-}
-
-// Metadata returns metadata associated with the long-running operation.
-// Metadata itself does not contact the server, but Poll does.
-// To get the latest metadata, call this method after a successful call to Poll.
-// If the metadata is not available, the returned metadata and error are both nil.
-func (op *UpdateInstanceConfigOperation) Metadata() (*instancepb.UpdateInstanceConfigMetadata, error) {
- var meta instancepb.UpdateInstanceConfigMetadata
- if err := op.lro.Metadata(&meta); err == longrunning.ErrNoMetadata {
- return nil, nil
- } else if err != nil {
- return nil, err
- }
- return &meta, nil
-}
-
-// Done reports whether the long-running operation has completed.
-func (op *UpdateInstanceConfigOperation) Done() bool {
- return op.lro.Done()
-}
-
-// Name returns the name of the long-running operation.
-// The name is assigned by the server and is unique within the service from which the operation is created.
-func (op *UpdateInstanceConfigOperation) Name() string {
- return op.lro.Name()
-}
-
-// UpdateInstanceOperation manages a long-running operation from UpdateInstance.
-type UpdateInstanceOperation struct {
- lro *longrunning.Operation
- pollPath string
-}
-
-// Wait blocks until the long-running operation is completed, returning the response and any errors encountered.
-//
-// See documentation of Poll for error-handling information.
-func (op *UpdateInstanceOperation) Wait(ctx context.Context, opts ...gax.CallOption) (*instancepb.Instance, error) {
- opts = append([]gax.CallOption{gax.WithPath(op.pollPath)}, opts...)
- var resp instancepb.Instance
- if err := op.lro.WaitWithInterval(ctx, &resp, time.Minute, opts...); err != nil {
- return nil, err
- }
- return &resp, nil
-}
-
-// Poll fetches the latest state of the long-running operation.
-//
-// Poll also fetches the latest metadata, which can be retrieved by Metadata.
-//
-// If Poll fails, the error is returned and op is unmodified. If Poll succeeds and
-// the operation has completed with failure, the error is returned and op.Done will return true.
-// If Poll succeeds and the operation has completed successfully,
-// op.Done will return true, and the response of the operation is returned.
-// If Poll succeeds and the operation has not completed, the returned response and error are both nil.
-func (op *UpdateInstanceOperation) Poll(ctx context.Context, opts ...gax.CallOption) (*instancepb.Instance, error) {
- opts = append([]gax.CallOption{gax.WithPath(op.pollPath)}, opts...)
- var resp instancepb.Instance
- if err := op.lro.Poll(ctx, &resp, opts...); err != nil {
- return nil, err
- }
- if !op.Done() {
- return nil, nil
- }
- return &resp, nil
-}
-
-// Metadata returns metadata associated with the long-running operation.
-// Metadata itself does not contact the server, but Poll does.
-// To get the latest metadata, call this method after a successful call to Poll.
-// If the metadata is not available, the returned metadata and error are both nil.
-func (op *UpdateInstanceOperation) Metadata() (*instancepb.UpdateInstanceMetadata, error) {
- var meta instancepb.UpdateInstanceMetadata
- if err := op.lro.Metadata(&meta); err == longrunning.ErrNoMetadata {
- return nil, nil
- } else if err != nil {
- return nil, err
- }
- return &meta, nil
-}
-
-// Done reports whether the long-running operation has completed.
-func (op *UpdateInstanceOperation) Done() bool {
- return op.lro.Done()
-}
-
-// Name returns the name of the long-running operation.
-// The name is assigned by the server and is unique within the service from which the operation is created.
-func (op *UpdateInstanceOperation) Name() string {
- return op.lro.Name()
-}
-
-// UpdateInstancePartitionOperation manages a long-running operation from UpdateInstancePartition.
-type UpdateInstancePartitionOperation struct {
- lro *longrunning.Operation
- pollPath string
-}
-
-// Wait blocks until the long-running operation is completed, returning the response and any errors encountered.
-//
-// See documentation of Poll for error-handling information.
-func (op *UpdateInstancePartitionOperation) Wait(ctx context.Context, opts ...gax.CallOption) (*instancepb.InstancePartition, error) {
- opts = append([]gax.CallOption{gax.WithPath(op.pollPath)}, opts...)
- var resp instancepb.InstancePartition
- if err := op.lro.WaitWithInterval(ctx, &resp, time.Minute, opts...); err != nil {
- return nil, err
- }
- return &resp, nil
-}
-
-// Poll fetches the latest state of the long-running operation.
-//
-// Poll also fetches the latest metadata, which can be retrieved by Metadata.
-//
-// If Poll fails, the error is returned and op is unmodified. If Poll succeeds and
-// the operation has completed with failure, the error is returned and op.Done will return true.
-// If Poll succeeds and the operation has completed successfully,
-// op.Done will return true, and the response of the operation is returned.
-// If Poll succeeds and the operation has not completed, the returned response and error are both nil.
-func (op *UpdateInstancePartitionOperation) Poll(ctx context.Context, opts ...gax.CallOption) (*instancepb.InstancePartition, error) {
- opts = append([]gax.CallOption{gax.WithPath(op.pollPath)}, opts...)
- var resp instancepb.InstancePartition
- if err := op.lro.Poll(ctx, &resp, opts...); err != nil {
- return nil, err
- }
- if !op.Done() {
- return nil, nil
- }
- return &resp, nil
-}
-
-// Metadata returns metadata associated with the long-running operation.
-// Metadata itself does not contact the server, but Poll does.
-// To get the latest metadata, call this method after a successful call to Poll.
-// If the metadata is not available, the returned metadata and error are both nil.
-func (op *UpdateInstancePartitionOperation) Metadata() (*instancepb.UpdateInstancePartitionMetadata, error) {
- var meta instancepb.UpdateInstancePartitionMetadata
- if err := op.lro.Metadata(&meta); err == longrunning.ErrNoMetadata {
- return nil, nil
- } else if err != nil {
- return nil, err
- }
- return &meta, nil
-}
-
-// Done reports whether the long-running operation has completed.
-func (op *UpdateInstancePartitionOperation) Done() bool {
- return op.lro.Done()
-}
-
-// Name returns the name of the long-running operation.
-// The name is assigned by the server and is unique within the service from which the operation is created.
-func (op *UpdateInstancePartitionOperation) Name() string {
- return op.lro.Name()
-}
-
-// InstanceConfigIterator manages a stream of *instancepb.InstanceConfig.
-type InstanceConfigIterator struct {
- items []*instancepb.InstanceConfig
- pageInfo *iterator.PageInfo
- nextFunc func() error
-
- // Response is the raw response for the current page.
- // It must be cast to the RPC response type.
- // Calling Next() or InternalFetch() updates this value.
- Response interface{}
-
- // InternalFetch is for use by the Google Cloud Libraries only.
- // It is not part of the stable interface of this package.
- //
- // InternalFetch returns results from a single call to the underlying RPC.
- // The number of results is no greater than pageSize.
- // If there are no more results, nextPageToken is empty and err is nil.
- InternalFetch func(pageSize int, pageToken string) (results []*instancepb.InstanceConfig, nextPageToken string, err error)
-}
-
-// PageInfo supports pagination. See the google.golang.org/api/iterator package for details.
-func (it *InstanceConfigIterator) PageInfo() *iterator.PageInfo {
- return it.pageInfo
-}
-
-// Next returns the next result. Its second return value is iterator.Done if there are no more
-// results. Once Next returns Done, all subsequent calls will return Done.
-func (it *InstanceConfigIterator) Next() (*instancepb.InstanceConfig, error) {
- var item *instancepb.InstanceConfig
- if err := it.nextFunc(); err != nil {
- return item, err
- }
- item = it.items[0]
- it.items = it.items[1:]
- return item, nil
-}
-
-func (it *InstanceConfigIterator) bufLen() int {
- return len(it.items)
-}
-
-func (it *InstanceConfigIterator) takeBuf() interface{} {
- b := it.items
- it.items = nil
- return b
-}
-
-// InstanceIterator manages a stream of *instancepb.Instance.
-type InstanceIterator struct {
- items []*instancepb.Instance
- pageInfo *iterator.PageInfo
- nextFunc func() error
-
- // Response is the raw response for the current page.
- // It must be cast to the RPC response type.
- // Calling Next() or InternalFetch() updates this value.
- Response interface{}
-
- // InternalFetch is for use by the Google Cloud Libraries only.
- // It is not part of the stable interface of this package.
- //
- // InternalFetch returns results from a single call to the underlying RPC.
- // The number of results is no greater than pageSize.
- // If there are no more results, nextPageToken is empty and err is nil.
- InternalFetch func(pageSize int, pageToken string) (results []*instancepb.Instance, nextPageToken string, err error)
-}
-
-// PageInfo supports pagination. See the google.golang.org/api/iterator package for details.
-func (it *InstanceIterator) PageInfo() *iterator.PageInfo {
- return it.pageInfo
-}
-
-// Next returns the next result. Its second return value is iterator.Done if there are no more
-// results. Once Next returns Done, all subsequent calls will return Done.
-func (it *InstanceIterator) Next() (*instancepb.Instance, error) {
- var item *instancepb.Instance
- if err := it.nextFunc(); err != nil {
- return item, err
- }
- item = it.items[0]
- it.items = it.items[1:]
- return item, nil
-}
-
-func (it *InstanceIterator) bufLen() int {
- return len(it.items)
-}
-
-func (it *InstanceIterator) takeBuf() interface{} {
- b := it.items
- it.items = nil
- return b
-}
-
-// InstancePartitionIterator manages a stream of *instancepb.InstancePartition.
-type InstancePartitionIterator struct {
- items []*instancepb.InstancePartition
- pageInfo *iterator.PageInfo
- nextFunc func() error
-
- // Response is the raw response for the current page.
- // It must be cast to the RPC response type.
- // Calling Next() or InternalFetch() updates this value.
- Response interface{}
-
- // InternalFetch is for use by the Google Cloud Libraries only.
- // It is not part of the stable interface of this package.
- //
- // InternalFetch returns results from a single call to the underlying RPC.
- // The number of results is no greater than pageSize.
- // If there are no more results, nextPageToken is empty and err is nil.
- InternalFetch func(pageSize int, pageToken string) (results []*instancepb.InstancePartition, nextPageToken string, err error)
-}
-
-// PageInfo supports pagination. See the google.golang.org/api/iterator package for details.
-func (it *InstancePartitionIterator) PageInfo() *iterator.PageInfo {
- return it.pageInfo
-}
-
-// Next returns the next result. Its second return value is iterator.Done if there are no more
-// results. Once Next returns Done, all subsequent calls will return Done.
-func (it *InstancePartitionIterator) Next() (*instancepb.InstancePartition, error) {
- var item *instancepb.InstancePartition
- if err := it.nextFunc(); err != nil {
- return item, err
- }
- item = it.items[0]
- it.items = it.items[1:]
- return item, nil
-}
-
-func (it *InstancePartitionIterator) bufLen() int {
- return len(it.items)
-}
-
-func (it *InstancePartitionIterator) takeBuf() interface{} {
- b := it.items
- it.items = nil
- return b
-}
-
-// OperationIterator manages a stream of *longrunningpb.Operation.
-type OperationIterator struct {
- items []*longrunningpb.Operation
- pageInfo *iterator.PageInfo
- nextFunc func() error
-
- // Response is the raw response for the current page.
- // It must be cast to the RPC response type.
- // Calling Next() or InternalFetch() updates this value.
- Response interface{}
-
- // InternalFetch is for use by the Google Cloud Libraries only.
- // It is not part of the stable interface of this package.
- //
- // InternalFetch returns results from a single call to the underlying RPC.
- // The number of results is no greater than pageSize.
- // If there are no more results, nextPageToken is empty and err is nil.
- InternalFetch func(pageSize int, pageToken string) (results []*longrunningpb.Operation, nextPageToken string, err error)
-}
-
-// PageInfo supports pagination. See the google.golang.org/api/iterator package for details.
-func (it *OperationIterator) PageInfo() *iterator.PageInfo {
- return it.pageInfo
-}
-
-// Next returns the next result. Its second return value is iterator.Done if there are no more
-// results. Once Next returns Done, all subsequent calls will return Done.
-func (it *OperationIterator) Next() (*longrunningpb.Operation, error) {
- var item *longrunningpb.Operation
- if err := it.nextFunc(); err != nil {
- return item, err
- }
- item = it.items[0]
- it.items = it.items[1:]
- return item, nil
-}
-
-func (it *OperationIterator) bufLen() int {
- return len(it.items)
-}
-
-func (it *OperationIterator) takeBuf() interface{} {
- b := it.items
- it.items = nil
- return b
-}
diff --git a/vendor/cloud.google.com/go/spanner/admin/instance/apiv1/doc.go b/vendor/cloud.google.com/go/spanner/admin/instance/apiv1/doc.go
deleted file mode 100644
index 4f103493b..000000000
--- a/vendor/cloud.google.com/go/spanner/admin/instance/apiv1/doc.go
+++ /dev/null
@@ -1,125 +0,0 @@
-// Copyright 2024 Google LLC
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// https://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-// Code generated by protoc-gen-go_gapic. DO NOT EDIT.
-
-// Package instance is an auto-generated package for the
-// Cloud Spanner Instance Admin API.
-//
-// # General documentation
-//
-// For information that is relevant for all client libraries please reference
-// https://pkg.go.dev/cloud.google.com/go#pkg-overview. Some information on this
-// page includes:
-//
-// - [Authentication and Authorization]
-// - [Timeouts and Cancellation]
-// - [Testing against Client Libraries]
-// - [Debugging Client Libraries]
-// - [Inspecting errors]
-//
-// # Example usage
-//
-// To get started with this package, create a client.
-//
-// ctx := context.Background()
-// // This snippet has been automatically generated and should be regarded as a code template only.
-// // It will require modifications to work:
-// // - It may require correct/in-range values for request initialization.
-// // - It may require specifying regional endpoints when creating the service client as shown in:
-// // https://pkg.go.dev/cloud.google.com/go#hdr-Client_Options
-// c, err := instance.NewInstanceAdminClient(ctx)
-// if err != nil {
-// // TODO: Handle error.
-// }
-// defer c.Close()
-//
-// The client will use your default application credentials. Clients should be reused instead of created as needed.
-// The methods of Client are safe for concurrent use by multiple goroutines.
-// The returned client must be Closed when it is done being used.
-//
-// # Using the Client
-//
-// The following is an example of making an API call with the newly created client.
-//
-// ctx := context.Background()
-// // This snippet has been automatically generated and should be regarded as a code template only.
-// // It will require modifications to work:
-// // - It may require correct/in-range values for request initialization.
-// // - It may require specifying regional endpoints when creating the service client as shown in:
-// // https://pkg.go.dev/cloud.google.com/go#hdr-Client_Options
-// c, err := instance.NewInstanceAdminClient(ctx)
-// if err != nil {
-// // TODO: Handle error.
-// }
-// defer c.Close()
-//
-// req := &instancepb.CreateInstanceRequest{
-// // TODO: Fill request struct fields.
-// // See https://pkg.go.dev/cloud.google.com/go/spanner/admin/instance/apiv1/instancepb#CreateInstanceRequest.
-// }
-// op, err := c.CreateInstance(ctx, req)
-// if err != nil {
-// // TODO: Handle error.
-// }
-//
-// resp, err := op.Wait(ctx)
-// if err != nil {
-// // TODO: Handle error.
-// }
-// // TODO: Use resp.
-// _ = resp
-//
-// # Use of Context
-//
-// The ctx passed to NewInstanceAdminClient is used for authentication requests and
-// for creating the underlying connection, but is not used for subsequent calls.
-// Individual methods on the client use the ctx given to them.
-//
-// To close the open connection, use the Close() method.
-//
-// [Authentication and Authorization]: https://pkg.go.dev/cloud.google.com/go#hdr-Authentication_and_Authorization
-// [Timeouts and Cancellation]: https://pkg.go.dev/cloud.google.com/go#hdr-Timeouts_and_Cancellation
-// [Testing against Client Libraries]: https://pkg.go.dev/cloud.google.com/go#hdr-Testing
-// [Debugging Client Libraries]: https://pkg.go.dev/cloud.google.com/go#hdr-Debugging
-// [Inspecting errors]: https://pkg.go.dev/cloud.google.com/go#hdr-Inspecting_errors
-package instance // import "cloud.google.com/go/spanner/admin/instance/apiv1"
-
-import (
- "context"
-
- "google.golang.org/api/option"
-)
-
-// For more information on implementing a client constructor hook, see
-// https://github.com/googleapis/google-cloud-go/wiki/Customizing-constructors.
-type clientHookParams struct{}
-type clientHook func(context.Context, clientHookParams) ([]option.ClientOption, error)
-
-var versionClient string
-
-func getVersionClient() string {
- if versionClient == "" {
- return "UNKNOWN"
- }
- return versionClient
-}
-
-// DefaultAuthScopes reports the default set of authentication scopes to use with this package.
-func DefaultAuthScopes() []string {
- return []string{
- "https://www.googleapis.com/auth/cloud-platform",
- "https://www.googleapis.com/auth/spanner.admin",
- }
-}
diff --git a/vendor/cloud.google.com/go/spanner/admin/instance/apiv1/init.go b/vendor/cloud.google.com/go/spanner/admin/instance/apiv1/init.go
deleted file mode 100644
index 85046d971..000000000
--- a/vendor/cloud.google.com/go/spanner/admin/instance/apiv1/init.go
+++ /dev/null
@@ -1,39 +0,0 @@
-/*
-Copyright 2020 Google LLC
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-*/
-package instance
-
-import (
- "context"
- "os"
-
- "google.golang.org/api/option"
- "google.golang.org/grpc"
- "google.golang.org/grpc/credentials/insecure"
-)
-
-func init() {
- newInstanceAdminClientHook = func(ctx context.Context, p clientHookParams) ([]option.ClientOption, error) {
- if emulator := os.Getenv("SPANNER_EMULATOR_HOST"); emulator != "" {
- return []option.ClientOption{
- option.WithEndpoint(emulator),
- option.WithGRPCDialOption(grpc.WithTransportCredentials(insecure.NewCredentials())),
- option.WithoutAuthentication(),
- }, nil
- }
-
- return nil, nil
- }
-}
diff --git a/vendor/cloud.google.com/go/spanner/admin/instance/apiv1/instance_admin_client.go b/vendor/cloud.google.com/go/spanner/admin/instance/apiv1/instance_admin_client.go
deleted file mode 100644
index 30b533f6f..000000000
--- a/vendor/cloud.google.com/go/spanner/admin/instance/apiv1/instance_admin_client.go
+++ /dev/null
@@ -1,3660 +0,0 @@
-// Copyright 2024 Google LLC
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// https://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-// Code generated by protoc-gen-go_gapic. DO NOT EDIT.
-
-package instance
-
-import (
- "bytes"
- "context"
- "fmt"
- "io"
- "math"
- "net/http"
- "net/url"
- "time"
-
- iampb "cloud.google.com/go/iam/apiv1/iampb"
- "cloud.google.com/go/longrunning"
- lroauto "cloud.google.com/go/longrunning/autogen"
- longrunningpb "cloud.google.com/go/longrunning/autogen/longrunningpb"
- instancepb "cloud.google.com/go/spanner/admin/instance/apiv1/instancepb"
- gax "github.com/googleapis/gax-go/v2"
- "google.golang.org/api/googleapi"
- "google.golang.org/api/iterator"
- "google.golang.org/api/option"
- "google.golang.org/api/option/internaloption"
- gtransport "google.golang.org/api/transport/grpc"
- httptransport "google.golang.org/api/transport/http"
- "google.golang.org/grpc"
- "google.golang.org/grpc/codes"
- "google.golang.org/protobuf/encoding/protojson"
- "google.golang.org/protobuf/proto"
-)
-
-var newInstanceAdminClientHook clientHook
-
-// InstanceAdminCallOptions contains the retry settings for each method of InstanceAdminClient.
-type InstanceAdminCallOptions struct {
- ListInstanceConfigs []gax.CallOption
- GetInstanceConfig []gax.CallOption
- CreateInstanceConfig []gax.CallOption
- UpdateInstanceConfig []gax.CallOption
- DeleteInstanceConfig []gax.CallOption
- ListInstanceConfigOperations []gax.CallOption
- ListInstances []gax.CallOption
- ListInstancePartitions []gax.CallOption
- GetInstance []gax.CallOption
- CreateInstance []gax.CallOption
- UpdateInstance []gax.CallOption
- DeleteInstance []gax.CallOption
- SetIamPolicy []gax.CallOption
- GetIamPolicy []gax.CallOption
- TestIamPermissions []gax.CallOption
- GetInstancePartition []gax.CallOption
- CreateInstancePartition []gax.CallOption
- DeleteInstancePartition []gax.CallOption
- UpdateInstancePartition []gax.CallOption
- ListInstancePartitionOperations []gax.CallOption
- MoveInstance []gax.CallOption
-}
-
-func defaultInstanceAdminGRPCClientOptions() []option.ClientOption {
- return []option.ClientOption{
- internaloption.WithDefaultEndpoint("spanner.googleapis.com:443"),
- internaloption.WithDefaultEndpointTemplate("spanner.UNIVERSE_DOMAIN:443"),
- internaloption.WithDefaultMTLSEndpoint("spanner.mtls.googleapis.com:443"),
- internaloption.WithDefaultUniverseDomain("googleapis.com"),
- internaloption.WithDefaultAudience("https://spanner.googleapis.com/"),
- internaloption.WithDefaultScopes(DefaultAuthScopes()...),
- internaloption.EnableJwtWithScope(),
- option.WithGRPCDialOption(grpc.WithDefaultCallOptions(
- grpc.MaxCallRecvMsgSize(math.MaxInt32))),
- }
-}
-
-func defaultInstanceAdminCallOptions() *InstanceAdminCallOptions {
- return &InstanceAdminCallOptions{
- ListInstanceConfigs: []gax.CallOption{
- gax.WithTimeout(3600000 * time.Millisecond),
- gax.WithRetry(func() gax.Retryer {
- return gax.OnCodes([]codes.Code{
- codes.Unavailable,
- codes.DeadlineExceeded,
- }, gax.Backoff{
- Initial: 1000 * time.Millisecond,
- Max: 32000 * time.Millisecond,
- Multiplier: 1.30,
- })
- }),
- },
- GetInstanceConfig: []gax.CallOption{
- gax.WithTimeout(3600000 * time.Millisecond),
- gax.WithRetry(func() gax.Retryer {
- return gax.OnCodes([]codes.Code{
- codes.Unavailable,
- codes.DeadlineExceeded,
- }, gax.Backoff{
- Initial: 1000 * time.Millisecond,
- Max: 32000 * time.Millisecond,
- Multiplier: 1.30,
- })
- }),
- },
- CreateInstanceConfig: []gax.CallOption{},
- UpdateInstanceConfig: []gax.CallOption{},
- DeleteInstanceConfig: []gax.CallOption{},
- ListInstanceConfigOperations: []gax.CallOption{},
- ListInstances: []gax.CallOption{
- gax.WithTimeout(3600000 * time.Millisecond),
- gax.WithRetry(func() gax.Retryer {
- return gax.OnCodes([]codes.Code{
- codes.Unavailable,
- codes.DeadlineExceeded,
- }, gax.Backoff{
- Initial: 1000 * time.Millisecond,
- Max: 32000 * time.Millisecond,
- Multiplier: 1.30,
- })
- }),
- },
- ListInstancePartitions: []gax.CallOption{},
- GetInstance: []gax.CallOption{
- gax.WithTimeout(3600000 * time.Millisecond),
- gax.WithRetry(func() gax.Retryer {
- return gax.OnCodes([]codes.Code{
- codes.Unavailable,
- codes.DeadlineExceeded,
- }, gax.Backoff{
- Initial: 1000 * time.Millisecond,
- Max: 32000 * time.Millisecond,
- Multiplier: 1.30,
- })
- }),
- },
- CreateInstance: []gax.CallOption{
- gax.WithTimeout(3600000 * time.Millisecond),
- },
- UpdateInstance: []gax.CallOption{
- gax.WithTimeout(3600000 * time.Millisecond),
- },
- DeleteInstance: []gax.CallOption{
- gax.WithTimeout(3600000 * time.Millisecond),
- gax.WithRetry(func() gax.Retryer {
- return gax.OnCodes([]codes.Code{
- codes.Unavailable,
- codes.DeadlineExceeded,
- }, gax.Backoff{
- Initial: 1000 * time.Millisecond,
- Max: 32000 * time.Millisecond,
- Multiplier: 1.30,
- })
- }),
- },
- SetIamPolicy: []gax.CallOption{
- gax.WithTimeout(30000 * time.Millisecond),
- },
- GetIamPolicy: []gax.CallOption{
- gax.WithTimeout(30000 * time.Millisecond),
- gax.WithRetry(func() gax.Retryer {
- return gax.OnCodes([]codes.Code{
- codes.Unavailable,
- codes.DeadlineExceeded,
- }, gax.Backoff{
- Initial: 1000 * time.Millisecond,
- Max: 32000 * time.Millisecond,
- Multiplier: 1.30,
- })
- }),
- },
- TestIamPermissions: []gax.CallOption{
- gax.WithTimeout(30000 * time.Millisecond),
- },
- GetInstancePartition: []gax.CallOption{},
- CreateInstancePartition: []gax.CallOption{},
- DeleteInstancePartition: []gax.CallOption{},
- UpdateInstancePartition: []gax.CallOption{},
- ListInstancePartitionOperations: []gax.CallOption{},
- MoveInstance: []gax.CallOption{},
- }
-}
-
-func defaultInstanceAdminRESTCallOptions() *InstanceAdminCallOptions {
- return &InstanceAdminCallOptions{
- ListInstanceConfigs: []gax.CallOption{
- gax.WithTimeout(3600000 * time.Millisecond),
- gax.WithRetry(func() gax.Retryer {
- return gax.OnHTTPCodes(gax.Backoff{
- Initial: 1000 * time.Millisecond,
- Max: 32000 * time.Millisecond,
- Multiplier: 1.30,
- },
- http.StatusServiceUnavailable,
- http.StatusGatewayTimeout)
- }),
- },
- GetInstanceConfig: []gax.CallOption{
- gax.WithTimeout(3600000 * time.Millisecond),
- gax.WithRetry(func() gax.Retryer {
- return gax.OnHTTPCodes(gax.Backoff{
- Initial: 1000 * time.Millisecond,
- Max: 32000 * time.Millisecond,
- Multiplier: 1.30,
- },
- http.StatusServiceUnavailable,
- http.StatusGatewayTimeout)
- }),
- },
- CreateInstanceConfig: []gax.CallOption{},
- UpdateInstanceConfig: []gax.CallOption{},
- DeleteInstanceConfig: []gax.CallOption{},
- ListInstanceConfigOperations: []gax.CallOption{},
- ListInstances: []gax.CallOption{
- gax.WithTimeout(3600000 * time.Millisecond),
- gax.WithRetry(func() gax.Retryer {
- return gax.OnHTTPCodes(gax.Backoff{
- Initial: 1000 * time.Millisecond,
- Max: 32000 * time.Millisecond,
- Multiplier: 1.30,
- },
- http.StatusServiceUnavailable,
- http.StatusGatewayTimeout)
- }),
- },
- ListInstancePartitions: []gax.CallOption{},
- GetInstance: []gax.CallOption{
- gax.WithTimeout(3600000 * time.Millisecond),
- gax.WithRetry(func() gax.Retryer {
- return gax.OnHTTPCodes(gax.Backoff{
- Initial: 1000 * time.Millisecond,
- Max: 32000 * time.Millisecond,
- Multiplier: 1.30,
- },
- http.StatusServiceUnavailable,
- http.StatusGatewayTimeout)
- }),
- },
- CreateInstance: []gax.CallOption{
- gax.WithTimeout(3600000 * time.Millisecond),
- },
- UpdateInstance: []gax.CallOption{
- gax.WithTimeout(3600000 * time.Millisecond),
- },
- DeleteInstance: []gax.CallOption{
- gax.WithTimeout(3600000 * time.Millisecond),
- gax.WithRetry(func() gax.Retryer {
- return gax.OnHTTPCodes(gax.Backoff{
- Initial: 1000 * time.Millisecond,
- Max: 32000 * time.Millisecond,
- Multiplier: 1.30,
- },
- http.StatusServiceUnavailable,
- http.StatusGatewayTimeout)
- }),
- },
- SetIamPolicy: []gax.CallOption{
- gax.WithTimeout(30000 * time.Millisecond),
- },
- GetIamPolicy: []gax.CallOption{
- gax.WithTimeout(30000 * time.Millisecond),
- gax.WithRetry(func() gax.Retryer {
- return gax.OnHTTPCodes(gax.Backoff{
- Initial: 1000 * time.Millisecond,
- Max: 32000 * time.Millisecond,
- Multiplier: 1.30,
- },
- http.StatusServiceUnavailable,
- http.StatusGatewayTimeout)
- }),
- },
- TestIamPermissions: []gax.CallOption{
- gax.WithTimeout(30000 * time.Millisecond),
- },
- GetInstancePartition: []gax.CallOption{},
- CreateInstancePartition: []gax.CallOption{},
- DeleteInstancePartition: []gax.CallOption{},
- UpdateInstancePartition: []gax.CallOption{},
- ListInstancePartitionOperations: []gax.CallOption{},
- MoveInstance: []gax.CallOption{},
- }
-}
-
-// internalInstanceAdminClient is an interface that defines the methods available from Cloud Spanner Instance Admin API.
-type internalInstanceAdminClient interface {
- Close() error
- setGoogleClientInfo(...string)
- Connection() *grpc.ClientConn
- ListInstanceConfigs(context.Context, *instancepb.ListInstanceConfigsRequest, ...gax.CallOption) *InstanceConfigIterator
- GetInstanceConfig(context.Context, *instancepb.GetInstanceConfigRequest, ...gax.CallOption) (*instancepb.InstanceConfig, error)
- CreateInstanceConfig(context.Context, *instancepb.CreateInstanceConfigRequest, ...gax.CallOption) (*CreateInstanceConfigOperation, error)
- CreateInstanceConfigOperation(name string) *CreateInstanceConfigOperation
- UpdateInstanceConfig(context.Context, *instancepb.UpdateInstanceConfigRequest, ...gax.CallOption) (*UpdateInstanceConfigOperation, error)
- UpdateInstanceConfigOperation(name string) *UpdateInstanceConfigOperation
- DeleteInstanceConfig(context.Context, *instancepb.DeleteInstanceConfigRequest, ...gax.CallOption) error
- ListInstanceConfigOperations(context.Context, *instancepb.ListInstanceConfigOperationsRequest, ...gax.CallOption) *OperationIterator
- ListInstances(context.Context, *instancepb.ListInstancesRequest, ...gax.CallOption) *InstanceIterator
- ListInstancePartitions(context.Context, *instancepb.ListInstancePartitionsRequest, ...gax.CallOption) *InstancePartitionIterator
- GetInstance(context.Context, *instancepb.GetInstanceRequest, ...gax.CallOption) (*instancepb.Instance, error)
- CreateInstance(context.Context, *instancepb.CreateInstanceRequest, ...gax.CallOption) (*CreateInstanceOperation, error)
- CreateInstanceOperation(name string) *CreateInstanceOperation
- UpdateInstance(context.Context, *instancepb.UpdateInstanceRequest, ...gax.CallOption) (*UpdateInstanceOperation, error)
- UpdateInstanceOperation(name string) *UpdateInstanceOperation
- DeleteInstance(context.Context, *instancepb.DeleteInstanceRequest, ...gax.CallOption) error
- SetIamPolicy(context.Context, *iampb.SetIamPolicyRequest, ...gax.CallOption) (*iampb.Policy, error)
- GetIamPolicy(context.Context, *iampb.GetIamPolicyRequest, ...gax.CallOption) (*iampb.Policy, error)
- TestIamPermissions(context.Context, *iampb.TestIamPermissionsRequest, ...gax.CallOption) (*iampb.TestIamPermissionsResponse, error)
- GetInstancePartition(context.Context, *instancepb.GetInstancePartitionRequest, ...gax.CallOption) (*instancepb.InstancePartition, error)
- CreateInstancePartition(context.Context, *instancepb.CreateInstancePartitionRequest, ...gax.CallOption) (*CreateInstancePartitionOperation, error)
- CreateInstancePartitionOperation(name string) *CreateInstancePartitionOperation
- DeleteInstancePartition(context.Context, *instancepb.DeleteInstancePartitionRequest, ...gax.CallOption) error
- UpdateInstancePartition(context.Context, *instancepb.UpdateInstancePartitionRequest, ...gax.CallOption) (*UpdateInstancePartitionOperation, error)
- UpdateInstancePartitionOperation(name string) *UpdateInstancePartitionOperation
- ListInstancePartitionOperations(context.Context, *instancepb.ListInstancePartitionOperationsRequest, ...gax.CallOption) *OperationIterator
- MoveInstance(context.Context, *instancepb.MoveInstanceRequest, ...gax.CallOption) (*MoveInstanceOperation, error)
- MoveInstanceOperation(name string) *MoveInstanceOperation
-}
-
-// InstanceAdminClient is a client for interacting with Cloud Spanner Instance Admin API.
-// Methods, except Close, may be called concurrently. However, fields must not be modified concurrently with method calls.
-//
-// # Cloud Spanner Instance Admin API
-//
-// The Cloud Spanner Instance Admin API can be used to create, delete,
-// modify and list instances. Instances are dedicated Cloud Spanner serving
-// and storage resources to be used by Cloud Spanner databases.
-//
-// Each instance has a “configuration”, which dictates where the
-// serving resources for the Cloud Spanner instance are located (e.g.,
-// US-central, Europe). Configurations are created by Google based on
-// resource availability.
-//
-// Cloud Spanner billing is based on the instances that exist and their
-// sizes. After an instance exists, there are no additional
-// per-database or per-operation charges for use of the instance
-// (though there may be additional network bandwidth charges).
-// Instances offer isolation: problems with databases in one instance
-// will not affect other instances. However, within an instance
-// databases can affect each other. For example, if one database in an
-// instance receives a lot of requests and consumes most of the
-// instance resources, fewer resources are available for other
-// databases in that instance, and their performance may suffer.
-type InstanceAdminClient struct {
- // The internal transport-dependent client.
- internalClient internalInstanceAdminClient
-
- // The call options for this service.
- CallOptions *InstanceAdminCallOptions
-
- // LROClient is used internally to handle long-running operations.
- // It is exposed so that its CallOptions can be modified if required.
- // Users should not Close this client.
- LROClient *lroauto.OperationsClient
-}
-
-// Wrapper methods routed to the internal client.
-
-// Close closes the connection to the API service. The user should invoke this when
-// the client is no longer required.
-func (c *InstanceAdminClient) Close() error {
- return c.internalClient.Close()
-}
-
-// setGoogleClientInfo sets the name and version of the application in
-// the `x-goog-api-client` header passed on each request. Intended for
-// use by Google-written clients.
-func (c *InstanceAdminClient) setGoogleClientInfo(keyval ...string) {
- c.internalClient.setGoogleClientInfo(keyval...)
-}
-
-// Connection returns a connection to the API service.
-//
-// Deprecated: Connections are now pooled so this method does not always
-// return the same resource.
-func (c *InstanceAdminClient) Connection() *grpc.ClientConn {
- return c.internalClient.Connection()
-}
-
-// ListInstanceConfigs lists the supported instance configurations for a given project.
-func (c *InstanceAdminClient) ListInstanceConfigs(ctx context.Context, req *instancepb.ListInstanceConfigsRequest, opts ...gax.CallOption) *InstanceConfigIterator {
- return c.internalClient.ListInstanceConfigs(ctx, req, opts...)
-}
-
-// GetInstanceConfig gets information about a particular instance configuration.
-func (c *InstanceAdminClient) GetInstanceConfig(ctx context.Context, req *instancepb.GetInstanceConfigRequest, opts ...gax.CallOption) (*instancepb.InstanceConfig, error) {
- return c.internalClient.GetInstanceConfig(ctx, req, opts...)
-}
-
-// CreateInstanceConfig creates an instance configuration and begins preparing it to be used. The
-// returned [long-running operation][google.longrunning.Operation]
-// can be used to track the progress of preparing the new
-// instance configuration. The instance configuration name is assigned by the
-// caller. If the named instance configuration already exists,
-// CreateInstanceConfig returns ALREADY_EXISTS.
-//
-// Immediately after the request returns:
-//
-// The instance configuration is readable via the API, with all requested
-// attributes. The instance configuration’s
-// reconciling
-// field is set to true. Its state is CREATING.
-//
-// While the operation is pending:
-//
-// Cancelling the operation renders the instance configuration immediately
-// unreadable via the API.
-//
-// Except for deleting the creating resource, all other attempts to modify
-// the instance configuration are rejected.
-//
-// Upon completion of the returned operation:
-//
-// Instances can be created using the instance configuration.
-//
-// The instance configuration’s
-// reconciling
-// field becomes false. Its state becomes READY.
-//
-// The returned [long-running operation][google.longrunning.Operation] will
-// have a name of the format
-// <instance_config_name>/operations/<operation_id> and can be used to track
-// creation of the instance configuration. The
-// metadata field type is
-// CreateInstanceConfigMetadata.
-// The response field type is
-// InstanceConfig, if
-// successful.
-//
-// Authorization requires spanner.instanceConfigs.create permission on
-// the resource
-// parent.
-func (c *InstanceAdminClient) CreateInstanceConfig(ctx context.Context, req *instancepb.CreateInstanceConfigRequest, opts ...gax.CallOption) (*CreateInstanceConfigOperation, error) {
- return c.internalClient.CreateInstanceConfig(ctx, req, opts...)
-}
-
-// CreateInstanceConfigOperation returns a new CreateInstanceConfigOperation from a given name.
-// The name must be that of a previously created CreateInstanceConfigOperation, possibly from a different process.
-func (c *InstanceAdminClient) CreateInstanceConfigOperation(name string) *CreateInstanceConfigOperation {
- return c.internalClient.CreateInstanceConfigOperation(name)
-}
-
-// UpdateInstanceConfig updates an instance configuration. The returned
-// [long-running operation][google.longrunning.Operation] can be used to track
-// the progress of updating the instance. If the named instance configuration
-// does not exist, returns NOT_FOUND.
-//
-// Only user-managed configurations can be updated.
-//
-// Immediately after the request returns:
-//
-// The instance configuration’s
-// reconciling
-// field is set to true.
-//
-// While the operation is pending:
-//
-// Cancelling the operation sets its metadata’s
-// cancel_time.
-// The operation is guaranteed to succeed at undoing all changes, after
-// which point it terminates with a CANCELLED status.
-//
-// All other attempts to modify the instance configuration are rejected.
-//
-// Reading the instance configuration via the API continues to give the
-// pre-request values.
-//
-// Upon completion of the returned operation:
-//
-// Creating instances using the instance configuration uses the new
-// values.
-//
-// The new values of the instance configuration are readable via the API.
-//
-// The instance configuration’s
-// reconciling
-// field becomes false.
-//
-// The returned [long-running operation][google.longrunning.Operation] will
-// have a name of the format
-// <instance_config_name>/operations/<operation_id> and can be used to track
-// the instance configuration modification. The
-// metadata field type is
-// UpdateInstanceConfigMetadata.
-// The response field type is
-// InstanceConfig, if
-// successful.
-//
-// Authorization requires spanner.instanceConfigs.update permission on
-// the resource [name][google.spanner.admin.instance.v1.InstanceConfig.name (at http://google.spanner.admin.instance.v1.InstanceConfig.name)].
-func (c *InstanceAdminClient) UpdateInstanceConfig(ctx context.Context, req *instancepb.UpdateInstanceConfigRequest, opts ...gax.CallOption) (*UpdateInstanceConfigOperation, error) {
- return c.internalClient.UpdateInstanceConfig(ctx, req, opts...)
-}
-
-// UpdateInstanceConfigOperation returns a new UpdateInstanceConfigOperation from a given name.
-// The name must be that of a previously created UpdateInstanceConfigOperation, possibly from a different process.
-func (c *InstanceAdminClient) UpdateInstanceConfigOperation(name string) *UpdateInstanceConfigOperation {
- return c.internalClient.UpdateInstanceConfigOperation(name)
-}
-
-// DeleteInstanceConfig deletes the instance configuration. Deletion is only allowed when no
-// instances are using the configuration. If any instances are using
-// the configuration, returns FAILED_PRECONDITION.
-//
-// Only user-managed configurations can be deleted.
-//
-// Authorization requires spanner.instanceConfigs.delete permission on
-// the resource [name][google.spanner.admin.instance.v1.InstanceConfig.name (at http://google.spanner.admin.instance.v1.InstanceConfig.name)].
-func (c *InstanceAdminClient) DeleteInstanceConfig(ctx context.Context, req *instancepb.DeleteInstanceConfigRequest, opts ...gax.CallOption) error {
- return c.internalClient.DeleteInstanceConfig(ctx, req, opts...)
-}
-
-// ListInstanceConfigOperations lists the user-managed instance configuration [long-running
-// operations][google.longrunning.Operation] in the given project. An instance
-// configuration operation has a name of the form
-// projects/<project>/instanceConfigs/<instance_config>/operations/<operation>.
-// The long-running operation
-// metadata field type
-// metadata.type_url describes the type of the metadata. Operations returned
-// include those that have completed/failed/canceled within the last 7 days,
-// and pending operations. Operations returned are ordered by
-// operation.metadata.value.start_time in descending order starting
-// from the most recently started operation.
-func (c *InstanceAdminClient) ListInstanceConfigOperations(ctx context.Context, req *instancepb.ListInstanceConfigOperationsRequest, opts ...gax.CallOption) *OperationIterator {
- return c.internalClient.ListInstanceConfigOperations(ctx, req, opts...)
-}
-
-// ListInstances lists all instances in the given project.
-func (c *InstanceAdminClient) ListInstances(ctx context.Context, req *instancepb.ListInstancesRequest, opts ...gax.CallOption) *InstanceIterator {
- return c.internalClient.ListInstances(ctx, req, opts...)
-}
-
-// ListInstancePartitions lists all instance partitions for the given instance.
-func (c *InstanceAdminClient) ListInstancePartitions(ctx context.Context, req *instancepb.ListInstancePartitionsRequest, opts ...gax.CallOption) *InstancePartitionIterator {
- return c.internalClient.ListInstancePartitions(ctx, req, opts...)
-}
-
-// GetInstance gets information about a particular instance.
-func (c *InstanceAdminClient) GetInstance(ctx context.Context, req *instancepb.GetInstanceRequest, opts ...gax.CallOption) (*instancepb.Instance, error) {
- return c.internalClient.GetInstance(ctx, req, opts...)
-}
-
-// CreateInstance creates an instance and begins preparing it to begin serving. The
-// returned [long-running operation][google.longrunning.Operation]
-// can be used to track the progress of preparing the new
-// instance. The instance name is assigned by the caller. If the
-// named instance already exists, CreateInstance returns
-// ALREADY_EXISTS.
-//
-// Immediately upon completion of this request:
-//
-// The instance is readable via the API, with all requested attributes
-// but no allocated resources. Its state is CREATING.
-//
-// Until completion of the returned operation:
-//
-// Cancelling the operation renders the instance immediately unreadable
-// via the API.
-//
-// The instance can be deleted.
-//
-// All other attempts to modify the instance are rejected.
-//
-// Upon completion of the returned operation:
-//
-// Billing for all successfully-allocated resources begins (some types
-// may have lower than the requested levels).
-//
-// Databases can be created in the instance.
-//
-// The instance’s allocated resource levels are readable via the API.
-//
-// The instance’s state becomes READY.
-//
-// The returned [long-running operation][google.longrunning.Operation] will
-// have a name of the format <instance_name>/operations/<operation_id> and
-// can be used to track creation of the instance. The
-// metadata field type is
-// CreateInstanceMetadata.
-// The response field type is
-// Instance, if successful.
-func (c *InstanceAdminClient) CreateInstance(ctx context.Context, req *instancepb.CreateInstanceRequest, opts ...gax.CallOption) (*CreateInstanceOperation, error) {
- return c.internalClient.CreateInstance(ctx, req, opts...)
-}
-
-// CreateInstanceOperation returns a new CreateInstanceOperation from a given name.
-// The name must be that of a previously created CreateInstanceOperation, possibly from a different process.
-func (c *InstanceAdminClient) CreateInstanceOperation(name string) *CreateInstanceOperation {
- return c.internalClient.CreateInstanceOperation(name)
-}
-
-// UpdateInstance updates an instance, and begins allocating or releasing resources
-// as requested. The returned [long-running
-// operation][google.longrunning.Operation] can be used to track the
-// progress of updating the instance. If the named instance does not
-// exist, returns NOT_FOUND.
-//
-// Immediately upon completion of this request:
-//
-// For resource types for which a decrease in the instance’s allocation
-// has been requested, billing is based on the newly-requested level.
-//
-// Until completion of the returned operation:
-//
-// Cancelling the operation sets its metadata’s
-// cancel_time,
-// and begins restoring resources to their pre-request values. The
-// operation is guaranteed to succeed at undoing all resource changes,
-// after which point it terminates with a CANCELLED status.
-//
-// All other attempts to modify the instance are rejected.
-//
-// Reading the instance via the API continues to give the pre-request
-// resource levels.
-//
-// Upon completion of the returned operation:
-//
-// Billing begins for all successfully-allocated resources (some types
-// may have lower than the requested levels).
-//
-// All newly-reserved resources are available for serving the instance’s
-// tables.
-//
-// The instance’s new resource levels are readable via the API.
-//
-// The returned [long-running operation][google.longrunning.Operation] will
-// have a name of the format <instance_name>/operations/<operation_id> and
-// can be used to track the instance modification. The
-// metadata field type is
-// UpdateInstanceMetadata.
-// The response field type is
-// Instance, if successful.
-//
-// Authorization requires spanner.instances.update permission on
-// the resource [name][google.spanner.admin.instance.v1.Instance.name (at http://google.spanner.admin.instance.v1.Instance.name)].
-func (c *InstanceAdminClient) UpdateInstance(ctx context.Context, req *instancepb.UpdateInstanceRequest, opts ...gax.CallOption) (*UpdateInstanceOperation, error) {
- return c.internalClient.UpdateInstance(ctx, req, opts...)
-}
-
-// UpdateInstanceOperation returns a new UpdateInstanceOperation from a given name.
-// The name must be that of a previously created UpdateInstanceOperation, possibly from a different process.
-func (c *InstanceAdminClient) UpdateInstanceOperation(name string) *UpdateInstanceOperation {
- return c.internalClient.UpdateInstanceOperation(name)
-}
-
-// DeleteInstance deletes an instance.
-//
-// Immediately upon completion of the request:
-//
-// Billing ceases for all of the instance’s reserved resources.
-//
-// Soon afterward:
-//
-// The instance and all of its databases immediately and
-// irrevocably disappear from the API. All data in the databases
-// is permanently deleted.
-func (c *InstanceAdminClient) DeleteInstance(ctx context.Context, req *instancepb.DeleteInstanceRequest, opts ...gax.CallOption) error {
- return c.internalClient.DeleteInstance(ctx, req, opts...)
-}
-
-// SetIamPolicy sets the access control policy on an instance resource. Replaces any
-// existing policy.
-//
-// Authorization requires spanner.instances.setIamPolicy on
-// resource.
-func (c *InstanceAdminClient) SetIamPolicy(ctx context.Context, req *iampb.SetIamPolicyRequest, opts ...gax.CallOption) (*iampb.Policy, error) {
- return c.internalClient.SetIamPolicy(ctx, req, opts...)
-}
-
-// GetIamPolicy gets the access control policy for an instance resource. Returns an empty
-// policy if an instance exists but does not have a policy set.
-//
-// Authorization requires spanner.instances.getIamPolicy on
-// resource.
-func (c *InstanceAdminClient) GetIamPolicy(ctx context.Context, req *iampb.GetIamPolicyRequest, opts ...gax.CallOption) (*iampb.Policy, error) {
- return c.internalClient.GetIamPolicy(ctx, req, opts...)
-}
-
-// TestIamPermissions returns permissions that the caller has on the specified instance resource.
-//
-// Attempting this RPC on a non-existent Cloud Spanner instance resource will
-// result in a NOT_FOUND error if the user has spanner.instances.list
-// permission on the containing Google Cloud Project. Otherwise returns an
-// empty set of permissions.
-func (c *InstanceAdminClient) TestIamPermissions(ctx context.Context, req *iampb.TestIamPermissionsRequest, opts ...gax.CallOption) (*iampb.TestIamPermissionsResponse, error) {
- return c.internalClient.TestIamPermissions(ctx, req, opts...)
-}
-
-// GetInstancePartition gets information about a particular instance partition.
-func (c *InstanceAdminClient) GetInstancePartition(ctx context.Context, req *instancepb.GetInstancePartitionRequest, opts ...gax.CallOption) (*instancepb.InstancePartition, error) {
- return c.internalClient.GetInstancePartition(ctx, req, opts...)
-}
-
-// CreateInstancePartition creates an instance partition and begins preparing it to be used. The
-// returned [long-running operation][google.longrunning.Operation]
-// can be used to track the progress of preparing the new instance partition.
-// The instance partition name is assigned by the caller. If the named
-// instance partition already exists, CreateInstancePartition returns
-// ALREADY_EXISTS.
-//
-// Immediately upon completion of this request:
-//
-// The instance partition is readable via the API, with all requested
-// attributes but no allocated resources. Its state is CREATING.
-//
-// Until completion of the returned operation:
-//
-// Cancelling the operation renders the instance partition immediately
-// unreadable via the API.
-//
-// The instance partition can be deleted.
-//
-// All other attempts to modify the instance partition are rejected.
-//
-// Upon completion of the returned operation:
-//
-// Billing for all successfully-allocated resources begins (some types
-// may have lower than the requested levels).
-//
-// Databases can start using this instance partition.
-//
-// The instance partition’s allocated resource levels are readable via the
-// API.
-//
-// The instance partition’s state becomes READY.
-//
-// The returned [long-running operation][google.longrunning.Operation] will
-// have a name of the format
-// <instance_partition_name>/operations/<operation_id> and can be used to
-// track creation of the instance partition. The
-// metadata field type is
-// CreateInstancePartitionMetadata.
-// The response field type is
-// InstancePartition, if
-// successful.
-func (c *InstanceAdminClient) CreateInstancePartition(ctx context.Context, req *instancepb.CreateInstancePartitionRequest, opts ...gax.CallOption) (*CreateInstancePartitionOperation, error) {
- return c.internalClient.CreateInstancePartition(ctx, req, opts...)
-}
-
-// CreateInstancePartitionOperation returns a new CreateInstancePartitionOperation from a given name.
-// The name must be that of a previously created CreateInstancePartitionOperation, possibly from a different process.
-func (c *InstanceAdminClient) CreateInstancePartitionOperation(name string) *CreateInstancePartitionOperation {
- return c.internalClient.CreateInstancePartitionOperation(name)
-}
-
-// DeleteInstancePartition deletes an existing instance partition. Requires that the
-// instance partition is not used by any database or backup and is not the
-// default instance partition of an instance.
-//
-// Authorization requires spanner.instancePartitions.delete permission on
-// the resource
-// [name][google.spanner.admin.instance.v1.InstancePartition.name (at http://google.spanner.admin.instance.v1.InstancePartition.name)].
-func (c *InstanceAdminClient) DeleteInstancePartition(ctx context.Context, req *instancepb.DeleteInstancePartitionRequest, opts ...gax.CallOption) error {
- return c.internalClient.DeleteInstancePartition(ctx, req, opts...)
-}
-
-// UpdateInstancePartition updates an instance partition, and begins allocating or releasing resources
-// as requested. The returned [long-running
-// operation][google.longrunning.Operation] can be used to track the
-// progress of updating the instance partition. If the named instance
-// partition does not exist, returns NOT_FOUND.
-//
-// Immediately upon completion of this request:
-//
-// For resource types for which a decrease in the instance partition’s
-// allocation has been requested, billing is based on the newly-requested
-// level.
-//
-// Until completion of the returned operation:
-//
-// Cancelling the operation sets its metadata’s
-// cancel_time,
-// and begins restoring resources to their pre-request values. The
-// operation is guaranteed to succeed at undoing all resource changes,
-// after which point it terminates with a CANCELLED status.
-//
-// All other attempts to modify the instance partition are rejected.
-//
-// Reading the instance partition via the API continues to give the
-// pre-request resource levels.
-//
-// Upon completion of the returned operation:
-//
-// Billing begins for all successfully-allocated resources (some types
-// may have lower than the requested levels).
-//
-// All newly-reserved resources are available for serving the instance
-// partition’s tables.
-//
-// The instance partition’s new resource levels are readable via the API.
-//
-// The returned [long-running operation][google.longrunning.Operation] will
-// have a name of the format
-// <instance_partition_name>/operations/<operation_id> and can be used to
-// track the instance partition modification. The
-// metadata field type is
-// UpdateInstancePartitionMetadata.
-// The response field type is
-// InstancePartition, if
-// successful.
-//
-// Authorization requires spanner.instancePartitions.update permission on
-// the resource
-// [name][google.spanner.admin.instance.v1.InstancePartition.name (at http://google.spanner.admin.instance.v1.InstancePartition.name)].
-func (c *InstanceAdminClient) UpdateInstancePartition(ctx context.Context, req *instancepb.UpdateInstancePartitionRequest, opts ...gax.CallOption) (*UpdateInstancePartitionOperation, error) {
- return c.internalClient.UpdateInstancePartition(ctx, req, opts...)
-}
-
-// UpdateInstancePartitionOperation returns a new UpdateInstancePartitionOperation from a given name.
-// The name must be that of a previously created UpdateInstancePartitionOperation, possibly from a different process.
-func (c *InstanceAdminClient) UpdateInstancePartitionOperation(name string) *UpdateInstancePartitionOperation {
- return c.internalClient.UpdateInstancePartitionOperation(name)
-}
-
-// ListInstancePartitionOperations lists instance partition [long-running
-// operations][google.longrunning.Operation] in the given instance.
-// An instance partition operation has a name of the form
-// projects/<project>/instances/<instance>/instancePartitions/<instance_partition>/operations/<operation>.
-// The long-running operation
-// metadata field type
-// metadata.type_url describes the type of the metadata. Operations returned
-// include those that have completed/failed/canceled within the last 7 days,
-// and pending operations. Operations returned are ordered by
-// operation.metadata.value.start_time in descending order starting from the
-// most recently started operation.
-//
-// Authorization requires spanner.instancePartitionOperations.list
-// permission on the resource
-// parent.
-func (c *InstanceAdminClient) ListInstancePartitionOperations(ctx context.Context, req *instancepb.ListInstancePartitionOperationsRequest, opts ...gax.CallOption) *OperationIterator {
- return c.internalClient.ListInstancePartitionOperations(ctx, req, opts...)
-}
-
-// MoveInstance moves an instance to the target instance configuration. You can use the
-// returned [long-running operation][google.longrunning.Operation] to track
-// the progress of moving the instance.
-//
-// MoveInstance returns FAILED_PRECONDITION if the instance meets any of
-// the following criteria:
-//
-// Is undergoing a move to a different instance configuration
-//
-// Has backups
-//
-// Has an ongoing update
-//
-// Contains any CMEK-enabled databases
-//
-// Is a free trial instance
-//
-// While the operation is pending:
-//
-// All other attempts to modify the instance, including changes to its
-// compute capacity, are rejected.
-//
-// The following database and backup admin operations are rejected:
-//
-// DatabaseAdmin.CreateDatabase
-//
-// DatabaseAdmin.UpdateDatabaseDdl (disabled if default_leader is
-// specified in the request.)
-//
-// DatabaseAdmin.RestoreDatabase
-//
-// DatabaseAdmin.CreateBackup
-//
-// DatabaseAdmin.CopyBackup
-//
-// Both the source and target instance configurations are subject to
-// hourly compute and storage charges.
-//
-// The instance might experience higher read-write latencies and a higher
-// transaction abort rate. However, moving an instance doesn’t cause any
-// downtime.
-//
-// The returned [long-running operation][google.longrunning.Operation] has
-// a name of the format
-// <instance_name>/operations/<operation_id> and can be used to track
-// the move instance operation. The
-// metadata field type is
-// MoveInstanceMetadata.
-// The response field type is
-// Instance,
-// if successful.
-// Cancelling the operation sets its metadata’s
-// cancel_time.
-// Cancellation is not immediate because it involves moving any data
-// previously moved to the target instance configuration back to the original
-// instance configuration. You can use this operation to track the progress of
-// the cancellation. Upon successful completion of the cancellation, the
-// operation terminates with CANCELLED status.
-//
-// If not cancelled, upon completion of the returned operation:
-//
-// The instance successfully moves to the target instance
-// configuration.
-//
-// You are billed for compute and storage in target instance
-// configuration.
-//
-// Authorization requires the spanner.instances.update permission on
-// the resource instance.
-//
-// For more details, see
-// Move an instance (at https://cloud.google.com/spanner/docs/move-instance).
-func (c *InstanceAdminClient) MoveInstance(ctx context.Context, req *instancepb.MoveInstanceRequest, opts ...gax.CallOption) (*MoveInstanceOperation, error) {
- return c.internalClient.MoveInstance(ctx, req, opts...)
-}
-
-// MoveInstanceOperation returns a new MoveInstanceOperation from a given name.
-// The name must be that of a previously created MoveInstanceOperation, possibly from a different process.
-func (c *InstanceAdminClient) MoveInstanceOperation(name string) *MoveInstanceOperation {
- return c.internalClient.MoveInstanceOperation(name)
-}
-
-// instanceAdminGRPCClient is a client for interacting with Cloud Spanner Instance Admin API over gRPC transport.
-//
-// Methods, except Close, may be called concurrently. However, fields must not be modified concurrently with method calls.
-type instanceAdminGRPCClient struct {
- // Connection pool of gRPC connections to the service.
- connPool gtransport.ConnPool
-
- // Points back to the CallOptions field of the containing InstanceAdminClient
- CallOptions **InstanceAdminCallOptions
-
- // The gRPC API client.
- instanceAdminClient instancepb.InstanceAdminClient
-
- // LROClient is used internally to handle long-running operations.
- // It is exposed so that its CallOptions can be modified if required.
- // Users should not Close this client.
- LROClient **lroauto.OperationsClient
-
- // The x-goog-* metadata to be sent with each request.
- xGoogHeaders []string
-}
-
-// NewInstanceAdminClient creates a new instance admin client based on gRPC.
-// The returned client must be Closed when it is done being used to clean up its underlying connections.
-//
-// # Cloud Spanner Instance Admin API
-//
-// The Cloud Spanner Instance Admin API can be used to create, delete,
-// modify and list instances. Instances are dedicated Cloud Spanner serving
-// and storage resources to be used by Cloud Spanner databases.
-//
-// Each instance has a “configuration”, which dictates where the
-// serving resources for the Cloud Spanner instance are located (e.g.,
-// US-central, Europe). Configurations are created by Google based on
-// resource availability.
-//
-// Cloud Spanner billing is based on the instances that exist and their
-// sizes. After an instance exists, there are no additional
-// per-database or per-operation charges for use of the instance
-// (though there may be additional network bandwidth charges).
-// Instances offer isolation: problems with databases in one instance
-// will not affect other instances. However, within an instance
-// databases can affect each other. For example, if one database in an
-// instance receives a lot of requests and consumes most of the
-// instance resources, fewer resources are available for other
-// databases in that instance, and their performance may suffer.
-func NewInstanceAdminClient(ctx context.Context, opts ...option.ClientOption) (*InstanceAdminClient, error) {
- clientOpts := defaultInstanceAdminGRPCClientOptions()
- if newInstanceAdminClientHook != nil {
- hookOpts, err := newInstanceAdminClientHook(ctx, clientHookParams{})
- if err != nil {
- return nil, err
- }
- clientOpts = append(clientOpts, hookOpts...)
- }
-
- connPool, err := gtransport.DialPool(ctx, append(clientOpts, opts...)...)
- if err != nil {
- return nil, err
- }
- client := InstanceAdminClient{CallOptions: defaultInstanceAdminCallOptions()}
-
- c := &instanceAdminGRPCClient{
- connPool: connPool,
- instanceAdminClient: instancepb.NewInstanceAdminClient(connPool),
- CallOptions: &client.CallOptions,
- }
- c.setGoogleClientInfo()
-
- client.internalClient = c
-
- client.LROClient, err = lroauto.NewOperationsClient(ctx, gtransport.WithConnPool(connPool))
- if err != nil {
- // This error "should not happen", since we are just reusing old connection pool
- // and never actually need to dial.
- // If this does happen, we could leak connp. However, we cannot close conn:
- // If the user invoked the constructor with option.WithGRPCConn,
- // we would close a connection that's still in use.
- // TODO: investigate error conditions.
- return nil, err
- }
- c.LROClient = &client.LROClient
- return &client, nil
-}
-
-// Connection returns a connection to the API service.
-//
-// Deprecated: Connections are now pooled so this method does not always
-// return the same resource.
-func (c *instanceAdminGRPCClient) Connection() *grpc.ClientConn {
- return c.connPool.Conn()
-}
-
-// setGoogleClientInfo sets the name and version of the application in
-// the `x-goog-api-client` header passed on each request. Intended for
-// use by Google-written clients.
-func (c *instanceAdminGRPCClient) setGoogleClientInfo(keyval ...string) {
- kv := append([]string{"gl-go", gax.GoVersion}, keyval...)
- kv = append(kv, "gapic", getVersionClient(), "gax", gax.Version, "grpc", grpc.Version)
- c.xGoogHeaders = []string{
- "x-goog-api-client", gax.XGoogHeader(kv...),
- }
-}
-
-// Close closes the connection to the API service. The user should invoke this when
-// the client is no longer required.
-func (c *instanceAdminGRPCClient) Close() error {
- return c.connPool.Close()
-}
-
-// Methods, except Close, may be called concurrently. However, fields must not be modified concurrently with method calls.
-type instanceAdminRESTClient struct {
- // The http endpoint to connect to.
- endpoint string
-
- // The http client.
- httpClient *http.Client
-
- // LROClient is used internally to handle long-running operations.
- // It is exposed so that its CallOptions can be modified if required.
- // Users should not Close this client.
- LROClient **lroauto.OperationsClient
-
- // The x-goog-* headers to be sent with each request.
- xGoogHeaders []string
-
- // Points back to the CallOptions field of the containing InstanceAdminClient
- CallOptions **InstanceAdminCallOptions
-}
-
-// NewInstanceAdminRESTClient creates a new instance admin rest client.
-//
-// # Cloud Spanner Instance Admin API
-//
-// The Cloud Spanner Instance Admin API can be used to create, delete,
-// modify and list instances. Instances are dedicated Cloud Spanner serving
-// and storage resources to be used by Cloud Spanner databases.
-//
-// Each instance has a “configuration”, which dictates where the
-// serving resources for the Cloud Spanner instance are located (e.g.,
-// US-central, Europe). Configurations are created by Google based on
-// resource availability.
-//
-// Cloud Spanner billing is based on the instances that exist and their
-// sizes. After an instance exists, there are no additional
-// per-database or per-operation charges for use of the instance
-// (though there may be additional network bandwidth charges).
-// Instances offer isolation: problems with databases in one instance
-// will not affect other instances. However, within an instance
-// databases can affect each other. For example, if one database in an
-// instance receives a lot of requests and consumes most of the
-// instance resources, fewer resources are available for other
-// databases in that instance, and their performance may suffer.
-func NewInstanceAdminRESTClient(ctx context.Context, opts ...option.ClientOption) (*InstanceAdminClient, error) {
- clientOpts := append(defaultInstanceAdminRESTClientOptions(), opts...)
- httpClient, endpoint, err := httptransport.NewClient(ctx, clientOpts...)
- if err != nil {
- return nil, err
- }
-
- callOpts := defaultInstanceAdminRESTCallOptions()
- c := &instanceAdminRESTClient{
- endpoint: endpoint,
- httpClient: httpClient,
- CallOptions: &callOpts,
- }
- c.setGoogleClientInfo()
-
- lroOpts := []option.ClientOption{
- option.WithHTTPClient(httpClient),
- option.WithEndpoint(endpoint),
- }
- opClient, err := lroauto.NewOperationsRESTClient(ctx, lroOpts...)
- if err != nil {
- return nil, err
- }
- c.LROClient = &opClient
-
- return &InstanceAdminClient{internalClient: c, CallOptions: callOpts}, nil
-}
-
-func defaultInstanceAdminRESTClientOptions() []option.ClientOption {
- return []option.ClientOption{
- internaloption.WithDefaultEndpoint("https://spanner.googleapis.com"),
- internaloption.WithDefaultEndpointTemplate("https://spanner.UNIVERSE_DOMAIN"),
- internaloption.WithDefaultMTLSEndpoint("https://spanner.mtls.googleapis.com"),
- internaloption.WithDefaultUniverseDomain("googleapis.com"),
- internaloption.WithDefaultAudience("https://spanner.googleapis.com/"),
- internaloption.WithDefaultScopes(DefaultAuthScopes()...),
- }
-}
-
-// setGoogleClientInfo sets the name and version of the application in
-// the `x-goog-api-client` header passed on each request. Intended for
-// use by Google-written clients.
-func (c *instanceAdminRESTClient) setGoogleClientInfo(keyval ...string) {
- kv := append([]string{"gl-go", gax.GoVersion}, keyval...)
- kv = append(kv, "gapic", getVersionClient(), "gax", gax.Version, "rest", "UNKNOWN")
- c.xGoogHeaders = []string{
- "x-goog-api-client", gax.XGoogHeader(kv...),
- }
-}
-
-// Close closes the connection to the API service. The user should invoke this when
-// the client is no longer required.
-func (c *instanceAdminRESTClient) Close() error {
- // Replace httpClient with nil to force cleanup.
- c.httpClient = nil
- return nil
-}
-
-// Connection returns a connection to the API service.
-//
-// Deprecated: This method always returns nil.
-func (c *instanceAdminRESTClient) Connection() *grpc.ClientConn {
- return nil
-}
-func (c *instanceAdminGRPCClient) ListInstanceConfigs(ctx context.Context, req *instancepb.ListInstanceConfigsRequest, opts ...gax.CallOption) *InstanceConfigIterator {
- hds := []string{"x-goog-request-params", fmt.Sprintf("%s=%v", "parent", url.QueryEscape(req.GetParent()))}
-
- hds = append(c.xGoogHeaders, hds...)
- ctx = gax.InsertMetadataIntoOutgoingContext(ctx, hds...)
- opts = append((*c.CallOptions).ListInstanceConfigs[0:len((*c.CallOptions).ListInstanceConfigs):len((*c.CallOptions).ListInstanceConfigs)], opts...)
- it := &InstanceConfigIterator{}
- req = proto.Clone(req).(*instancepb.ListInstanceConfigsRequest)
- it.InternalFetch = func(pageSize int, pageToken string) ([]*instancepb.InstanceConfig, string, error) {
- resp := &instancepb.ListInstanceConfigsResponse{}
- if pageToken != "" {
- req.PageToken = pageToken
- }
- if pageSize > math.MaxInt32 {
- req.PageSize = math.MaxInt32
- } else if pageSize != 0 {
- req.PageSize = int32(pageSize)
- }
- err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
- var err error
- resp, err = c.instanceAdminClient.ListInstanceConfigs(ctx, req, settings.GRPC...)
- return err
- }, opts...)
- if err != nil {
- return nil, "", err
- }
-
- it.Response = resp
- return resp.GetInstanceConfigs(), resp.GetNextPageToken(), nil
- }
- fetch := func(pageSize int, pageToken string) (string, error) {
- items, nextPageToken, err := it.InternalFetch(pageSize, pageToken)
- if err != nil {
- return "", err
- }
- it.items = append(it.items, items...)
- return nextPageToken, nil
- }
-
- it.pageInfo, it.nextFunc = iterator.NewPageInfo(fetch, it.bufLen, it.takeBuf)
- it.pageInfo.MaxSize = int(req.GetPageSize())
- it.pageInfo.Token = req.GetPageToken()
-
- return it
-}
-
-func (c *instanceAdminGRPCClient) GetInstanceConfig(ctx context.Context, req *instancepb.GetInstanceConfigRequest, opts ...gax.CallOption) (*instancepb.InstanceConfig, error) {
- hds := []string{"x-goog-request-params", fmt.Sprintf("%s=%v", "name", url.QueryEscape(req.GetName()))}
-
- hds = append(c.xGoogHeaders, hds...)
- ctx = gax.InsertMetadataIntoOutgoingContext(ctx, hds...)
- opts = append((*c.CallOptions).GetInstanceConfig[0:len((*c.CallOptions).GetInstanceConfig):len((*c.CallOptions).GetInstanceConfig)], opts...)
- var resp *instancepb.InstanceConfig
- err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
- var err error
- resp, err = c.instanceAdminClient.GetInstanceConfig(ctx, req, settings.GRPC...)
- return err
- }, opts...)
- if err != nil {
- return nil, err
- }
- return resp, nil
-}
-
-func (c *instanceAdminGRPCClient) CreateInstanceConfig(ctx context.Context, req *instancepb.CreateInstanceConfigRequest, opts ...gax.CallOption) (*CreateInstanceConfigOperation, error) {
- hds := []string{"x-goog-request-params", fmt.Sprintf("%s=%v", "parent", url.QueryEscape(req.GetParent()))}
-
- hds = append(c.xGoogHeaders, hds...)
- ctx = gax.InsertMetadataIntoOutgoingContext(ctx, hds...)
- opts = append((*c.CallOptions).CreateInstanceConfig[0:len((*c.CallOptions).CreateInstanceConfig):len((*c.CallOptions).CreateInstanceConfig)], opts...)
- var resp *longrunningpb.Operation
- err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
- var err error
- resp, err = c.instanceAdminClient.CreateInstanceConfig(ctx, req, settings.GRPC...)
- return err
- }, opts...)
- if err != nil {
- return nil, err
- }
- return &CreateInstanceConfigOperation{
- lro: longrunning.InternalNewOperation(*c.LROClient, resp),
- }, nil
-}
-
-func (c *instanceAdminGRPCClient) UpdateInstanceConfig(ctx context.Context, req *instancepb.UpdateInstanceConfigRequest, opts ...gax.CallOption) (*UpdateInstanceConfigOperation, error) {
- hds := []string{"x-goog-request-params", fmt.Sprintf("%s=%v", "instance_config.name", url.QueryEscape(req.GetInstanceConfig().GetName()))}
-
- hds = append(c.xGoogHeaders, hds...)
- ctx = gax.InsertMetadataIntoOutgoingContext(ctx, hds...)
- opts = append((*c.CallOptions).UpdateInstanceConfig[0:len((*c.CallOptions).UpdateInstanceConfig):len((*c.CallOptions).UpdateInstanceConfig)], opts...)
- var resp *longrunningpb.Operation
- err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
- var err error
- resp, err = c.instanceAdminClient.UpdateInstanceConfig(ctx, req, settings.GRPC...)
- return err
- }, opts...)
- if err != nil {
- return nil, err
- }
- return &UpdateInstanceConfigOperation{
- lro: longrunning.InternalNewOperation(*c.LROClient, resp),
- }, nil
-}
-
-func (c *instanceAdminGRPCClient) DeleteInstanceConfig(ctx context.Context, req *instancepb.DeleteInstanceConfigRequest, opts ...gax.CallOption) error {
- hds := []string{"x-goog-request-params", fmt.Sprintf("%s=%v", "name", url.QueryEscape(req.GetName()))}
-
- hds = append(c.xGoogHeaders, hds...)
- ctx = gax.InsertMetadataIntoOutgoingContext(ctx, hds...)
- opts = append((*c.CallOptions).DeleteInstanceConfig[0:len((*c.CallOptions).DeleteInstanceConfig):len((*c.CallOptions).DeleteInstanceConfig)], opts...)
- err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
- var err error
- _, err = c.instanceAdminClient.DeleteInstanceConfig(ctx, req, settings.GRPC...)
- return err
- }, opts...)
- return err
-}
-
-func (c *instanceAdminGRPCClient) ListInstanceConfigOperations(ctx context.Context, req *instancepb.ListInstanceConfigOperationsRequest, opts ...gax.CallOption) *OperationIterator {
- hds := []string{"x-goog-request-params", fmt.Sprintf("%s=%v", "parent", url.QueryEscape(req.GetParent()))}
-
- hds = append(c.xGoogHeaders, hds...)
- ctx = gax.InsertMetadataIntoOutgoingContext(ctx, hds...)
- opts = append((*c.CallOptions).ListInstanceConfigOperations[0:len((*c.CallOptions).ListInstanceConfigOperations):len((*c.CallOptions).ListInstanceConfigOperations)], opts...)
- it := &OperationIterator{}
- req = proto.Clone(req).(*instancepb.ListInstanceConfigOperationsRequest)
- it.InternalFetch = func(pageSize int, pageToken string) ([]*longrunningpb.Operation, string, error) {
- resp := &instancepb.ListInstanceConfigOperationsResponse{}
- if pageToken != "" {
- req.PageToken = pageToken
- }
- if pageSize > math.MaxInt32 {
- req.PageSize = math.MaxInt32
- } else if pageSize != 0 {
- req.PageSize = int32(pageSize)
- }
- err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
- var err error
- resp, err = c.instanceAdminClient.ListInstanceConfigOperations(ctx, req, settings.GRPC...)
- return err
- }, opts...)
- if err != nil {
- return nil, "", err
- }
-
- it.Response = resp
- return resp.GetOperations(), resp.GetNextPageToken(), nil
- }
- fetch := func(pageSize int, pageToken string) (string, error) {
- items, nextPageToken, err := it.InternalFetch(pageSize, pageToken)
- if err != nil {
- return "", err
- }
- it.items = append(it.items, items...)
- return nextPageToken, nil
- }
-
- it.pageInfo, it.nextFunc = iterator.NewPageInfo(fetch, it.bufLen, it.takeBuf)
- it.pageInfo.MaxSize = int(req.GetPageSize())
- it.pageInfo.Token = req.GetPageToken()
-
- return it
-}
-
-func (c *instanceAdminGRPCClient) ListInstances(ctx context.Context, req *instancepb.ListInstancesRequest, opts ...gax.CallOption) *InstanceIterator {
- hds := []string{"x-goog-request-params", fmt.Sprintf("%s=%v", "parent", url.QueryEscape(req.GetParent()))}
-
- hds = append(c.xGoogHeaders, hds...)
- ctx = gax.InsertMetadataIntoOutgoingContext(ctx, hds...)
- opts = append((*c.CallOptions).ListInstances[0:len((*c.CallOptions).ListInstances):len((*c.CallOptions).ListInstances)], opts...)
- it := &InstanceIterator{}
- req = proto.Clone(req).(*instancepb.ListInstancesRequest)
- it.InternalFetch = func(pageSize int, pageToken string) ([]*instancepb.Instance, string, error) {
- resp := &instancepb.ListInstancesResponse{}
- if pageToken != "" {
- req.PageToken = pageToken
- }
- if pageSize > math.MaxInt32 {
- req.PageSize = math.MaxInt32
- } else if pageSize != 0 {
- req.PageSize = int32(pageSize)
- }
- err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
- var err error
- resp, err = c.instanceAdminClient.ListInstances(ctx, req, settings.GRPC...)
- return err
- }, opts...)
- if err != nil {
- return nil, "", err
- }
-
- it.Response = resp
- return resp.GetInstances(), resp.GetNextPageToken(), nil
- }
- fetch := func(pageSize int, pageToken string) (string, error) {
- items, nextPageToken, err := it.InternalFetch(pageSize, pageToken)
- if err != nil {
- return "", err
- }
- it.items = append(it.items, items...)
- return nextPageToken, nil
- }
-
- it.pageInfo, it.nextFunc = iterator.NewPageInfo(fetch, it.bufLen, it.takeBuf)
- it.pageInfo.MaxSize = int(req.GetPageSize())
- it.pageInfo.Token = req.GetPageToken()
-
- return it
-}
-
-func (c *instanceAdminGRPCClient) ListInstancePartitions(ctx context.Context, req *instancepb.ListInstancePartitionsRequest, opts ...gax.CallOption) *InstancePartitionIterator {
- hds := []string{"x-goog-request-params", fmt.Sprintf("%s=%v", "parent", url.QueryEscape(req.GetParent()))}
-
- hds = append(c.xGoogHeaders, hds...)
- ctx = gax.InsertMetadataIntoOutgoingContext(ctx, hds...)
- opts = append((*c.CallOptions).ListInstancePartitions[0:len((*c.CallOptions).ListInstancePartitions):len((*c.CallOptions).ListInstancePartitions)], opts...)
- it := &InstancePartitionIterator{}
- req = proto.Clone(req).(*instancepb.ListInstancePartitionsRequest)
- it.InternalFetch = func(pageSize int, pageToken string) ([]*instancepb.InstancePartition, string, error) {
- resp := &instancepb.ListInstancePartitionsResponse{}
- if pageToken != "" {
- req.PageToken = pageToken
- }
- if pageSize > math.MaxInt32 {
- req.PageSize = math.MaxInt32
- } else if pageSize != 0 {
- req.PageSize = int32(pageSize)
- }
- err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
- var err error
- resp, err = c.instanceAdminClient.ListInstancePartitions(ctx, req, settings.GRPC...)
- return err
- }, opts...)
- if err != nil {
- return nil, "", err
- }
-
- it.Response = resp
- return resp.GetInstancePartitions(), resp.GetNextPageToken(), nil
- }
- fetch := func(pageSize int, pageToken string) (string, error) {
- items, nextPageToken, err := it.InternalFetch(pageSize, pageToken)
- if err != nil {
- return "", err
- }
- it.items = append(it.items, items...)
- return nextPageToken, nil
- }
-
- it.pageInfo, it.nextFunc = iterator.NewPageInfo(fetch, it.bufLen, it.takeBuf)
- it.pageInfo.MaxSize = int(req.GetPageSize())
- it.pageInfo.Token = req.GetPageToken()
-
- return it
-}
-
-func (c *instanceAdminGRPCClient) GetInstance(ctx context.Context, req *instancepb.GetInstanceRequest, opts ...gax.CallOption) (*instancepb.Instance, error) {
- hds := []string{"x-goog-request-params", fmt.Sprintf("%s=%v", "name", url.QueryEscape(req.GetName()))}
-
- hds = append(c.xGoogHeaders, hds...)
- ctx = gax.InsertMetadataIntoOutgoingContext(ctx, hds...)
- opts = append((*c.CallOptions).GetInstance[0:len((*c.CallOptions).GetInstance):len((*c.CallOptions).GetInstance)], opts...)
- var resp *instancepb.Instance
- err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
- var err error
- resp, err = c.instanceAdminClient.GetInstance(ctx, req, settings.GRPC...)
- return err
- }, opts...)
- if err != nil {
- return nil, err
- }
- return resp, nil
-}
-
-func (c *instanceAdminGRPCClient) CreateInstance(ctx context.Context, req *instancepb.CreateInstanceRequest, opts ...gax.CallOption) (*CreateInstanceOperation, error) {
- hds := []string{"x-goog-request-params", fmt.Sprintf("%s=%v", "parent", url.QueryEscape(req.GetParent()))}
-
- hds = append(c.xGoogHeaders, hds...)
- ctx = gax.InsertMetadataIntoOutgoingContext(ctx, hds...)
- opts = append((*c.CallOptions).CreateInstance[0:len((*c.CallOptions).CreateInstance):len((*c.CallOptions).CreateInstance)], opts...)
- var resp *longrunningpb.Operation
- err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
- var err error
- resp, err = c.instanceAdminClient.CreateInstance(ctx, req, settings.GRPC...)
- return err
- }, opts...)
- if err != nil {
- return nil, err
- }
- return &CreateInstanceOperation{
- lro: longrunning.InternalNewOperation(*c.LROClient, resp),
- }, nil
-}
-
-func (c *instanceAdminGRPCClient) UpdateInstance(ctx context.Context, req *instancepb.UpdateInstanceRequest, opts ...gax.CallOption) (*UpdateInstanceOperation, error) {
- hds := []string{"x-goog-request-params", fmt.Sprintf("%s=%v", "instance.name", url.QueryEscape(req.GetInstance().GetName()))}
-
- hds = append(c.xGoogHeaders, hds...)
- ctx = gax.InsertMetadataIntoOutgoingContext(ctx, hds...)
- opts = append((*c.CallOptions).UpdateInstance[0:len((*c.CallOptions).UpdateInstance):len((*c.CallOptions).UpdateInstance)], opts...)
- var resp *longrunningpb.Operation
- err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
- var err error
- resp, err = c.instanceAdminClient.UpdateInstance(ctx, req, settings.GRPC...)
- return err
- }, opts...)
- if err != nil {
- return nil, err
- }
- return &UpdateInstanceOperation{
- lro: longrunning.InternalNewOperation(*c.LROClient, resp),
- }, nil
-}
-
-func (c *instanceAdminGRPCClient) DeleteInstance(ctx context.Context, req *instancepb.DeleteInstanceRequest, opts ...gax.CallOption) error {
- hds := []string{"x-goog-request-params", fmt.Sprintf("%s=%v", "name", url.QueryEscape(req.GetName()))}
-
- hds = append(c.xGoogHeaders, hds...)
- ctx = gax.InsertMetadataIntoOutgoingContext(ctx, hds...)
- opts = append((*c.CallOptions).DeleteInstance[0:len((*c.CallOptions).DeleteInstance):len((*c.CallOptions).DeleteInstance)], opts...)
- err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
- var err error
- _, err = c.instanceAdminClient.DeleteInstance(ctx, req, settings.GRPC...)
- return err
- }, opts...)
- return err
-}
-
-func (c *instanceAdminGRPCClient) SetIamPolicy(ctx context.Context, req *iampb.SetIamPolicyRequest, opts ...gax.CallOption) (*iampb.Policy, error) {
- hds := []string{"x-goog-request-params", fmt.Sprintf("%s=%v", "resource", url.QueryEscape(req.GetResource()))}
-
- hds = append(c.xGoogHeaders, hds...)
- ctx = gax.InsertMetadataIntoOutgoingContext(ctx, hds...)
- opts = append((*c.CallOptions).SetIamPolicy[0:len((*c.CallOptions).SetIamPolicy):len((*c.CallOptions).SetIamPolicy)], opts...)
- var resp *iampb.Policy
- err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
- var err error
- resp, err = c.instanceAdminClient.SetIamPolicy(ctx, req, settings.GRPC...)
- return err
- }, opts...)
- if err != nil {
- return nil, err
- }
- return resp, nil
-}
-
-func (c *instanceAdminGRPCClient) GetIamPolicy(ctx context.Context, req *iampb.GetIamPolicyRequest, opts ...gax.CallOption) (*iampb.Policy, error) {
- hds := []string{"x-goog-request-params", fmt.Sprintf("%s=%v", "resource", url.QueryEscape(req.GetResource()))}
-
- hds = append(c.xGoogHeaders, hds...)
- ctx = gax.InsertMetadataIntoOutgoingContext(ctx, hds...)
- opts = append((*c.CallOptions).GetIamPolicy[0:len((*c.CallOptions).GetIamPolicy):len((*c.CallOptions).GetIamPolicy)], opts...)
- var resp *iampb.Policy
- err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
- var err error
- resp, err = c.instanceAdminClient.GetIamPolicy(ctx, req, settings.GRPC...)
- return err
- }, opts...)
- if err != nil {
- return nil, err
- }
- return resp, nil
-}
-
-func (c *instanceAdminGRPCClient) TestIamPermissions(ctx context.Context, req *iampb.TestIamPermissionsRequest, opts ...gax.CallOption) (*iampb.TestIamPermissionsResponse, error) {
- hds := []string{"x-goog-request-params", fmt.Sprintf("%s=%v", "resource", url.QueryEscape(req.GetResource()))}
-
- hds = append(c.xGoogHeaders, hds...)
- ctx = gax.InsertMetadataIntoOutgoingContext(ctx, hds...)
- opts = append((*c.CallOptions).TestIamPermissions[0:len((*c.CallOptions).TestIamPermissions):len((*c.CallOptions).TestIamPermissions)], opts...)
- var resp *iampb.TestIamPermissionsResponse
- err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
- var err error
- resp, err = c.instanceAdminClient.TestIamPermissions(ctx, req, settings.GRPC...)
- return err
- }, opts...)
- if err != nil {
- return nil, err
- }
- return resp, nil
-}
-
-func (c *instanceAdminGRPCClient) GetInstancePartition(ctx context.Context, req *instancepb.GetInstancePartitionRequest, opts ...gax.CallOption) (*instancepb.InstancePartition, error) {
- hds := []string{"x-goog-request-params", fmt.Sprintf("%s=%v", "name", url.QueryEscape(req.GetName()))}
-
- hds = append(c.xGoogHeaders, hds...)
- ctx = gax.InsertMetadataIntoOutgoingContext(ctx, hds...)
- opts = append((*c.CallOptions).GetInstancePartition[0:len((*c.CallOptions).GetInstancePartition):len((*c.CallOptions).GetInstancePartition)], opts...)
- var resp *instancepb.InstancePartition
- err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
- var err error
- resp, err = c.instanceAdminClient.GetInstancePartition(ctx, req, settings.GRPC...)
- return err
- }, opts...)
- if err != nil {
- return nil, err
- }
- return resp, nil
-}
-
-func (c *instanceAdminGRPCClient) CreateInstancePartition(ctx context.Context, req *instancepb.CreateInstancePartitionRequest, opts ...gax.CallOption) (*CreateInstancePartitionOperation, error) {
- hds := []string{"x-goog-request-params", fmt.Sprintf("%s=%v", "parent", url.QueryEscape(req.GetParent()))}
-
- hds = append(c.xGoogHeaders, hds...)
- ctx = gax.InsertMetadataIntoOutgoingContext(ctx, hds...)
- opts = append((*c.CallOptions).CreateInstancePartition[0:len((*c.CallOptions).CreateInstancePartition):len((*c.CallOptions).CreateInstancePartition)], opts...)
- var resp *longrunningpb.Operation
- err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
- var err error
- resp, err = c.instanceAdminClient.CreateInstancePartition(ctx, req, settings.GRPC...)
- return err
- }, opts...)
- if err != nil {
- return nil, err
- }
- return &CreateInstancePartitionOperation{
- lro: longrunning.InternalNewOperation(*c.LROClient, resp),
- }, nil
-}
-
-func (c *instanceAdminGRPCClient) DeleteInstancePartition(ctx context.Context, req *instancepb.DeleteInstancePartitionRequest, opts ...gax.CallOption) error {
- hds := []string{"x-goog-request-params", fmt.Sprintf("%s=%v", "name", url.QueryEscape(req.GetName()))}
-
- hds = append(c.xGoogHeaders, hds...)
- ctx = gax.InsertMetadataIntoOutgoingContext(ctx, hds...)
- opts = append((*c.CallOptions).DeleteInstancePartition[0:len((*c.CallOptions).DeleteInstancePartition):len((*c.CallOptions).DeleteInstancePartition)], opts...)
- err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
- var err error
- _, err = c.instanceAdminClient.DeleteInstancePartition(ctx, req, settings.GRPC...)
- return err
- }, opts...)
- return err
-}
-
-func (c *instanceAdminGRPCClient) UpdateInstancePartition(ctx context.Context, req *instancepb.UpdateInstancePartitionRequest, opts ...gax.CallOption) (*UpdateInstancePartitionOperation, error) {
- hds := []string{"x-goog-request-params", fmt.Sprintf("%s=%v", "instance_partition.name", url.QueryEscape(req.GetInstancePartition().GetName()))}
-
- hds = append(c.xGoogHeaders, hds...)
- ctx = gax.InsertMetadataIntoOutgoingContext(ctx, hds...)
- opts = append((*c.CallOptions).UpdateInstancePartition[0:len((*c.CallOptions).UpdateInstancePartition):len((*c.CallOptions).UpdateInstancePartition)], opts...)
- var resp *longrunningpb.Operation
- err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
- var err error
- resp, err = c.instanceAdminClient.UpdateInstancePartition(ctx, req, settings.GRPC...)
- return err
- }, opts...)
- if err != nil {
- return nil, err
- }
- return &UpdateInstancePartitionOperation{
- lro: longrunning.InternalNewOperation(*c.LROClient, resp),
- }, nil
-}
-
-func (c *instanceAdminGRPCClient) ListInstancePartitionOperations(ctx context.Context, req *instancepb.ListInstancePartitionOperationsRequest, opts ...gax.CallOption) *OperationIterator {
- hds := []string{"x-goog-request-params", fmt.Sprintf("%s=%v", "parent", url.QueryEscape(req.GetParent()))}
-
- hds = append(c.xGoogHeaders, hds...)
- ctx = gax.InsertMetadataIntoOutgoingContext(ctx, hds...)
- opts = append((*c.CallOptions).ListInstancePartitionOperations[0:len((*c.CallOptions).ListInstancePartitionOperations):len((*c.CallOptions).ListInstancePartitionOperations)], opts...)
- it := &OperationIterator{}
- req = proto.Clone(req).(*instancepb.ListInstancePartitionOperationsRequest)
- it.InternalFetch = func(pageSize int, pageToken string) ([]*longrunningpb.Operation, string, error) {
- resp := &instancepb.ListInstancePartitionOperationsResponse{}
- if pageToken != "" {
- req.PageToken = pageToken
- }
- if pageSize > math.MaxInt32 {
- req.PageSize = math.MaxInt32
- } else if pageSize != 0 {
- req.PageSize = int32(pageSize)
- }
- err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
- var err error
- resp, err = c.instanceAdminClient.ListInstancePartitionOperations(ctx, req, settings.GRPC...)
- return err
- }, opts...)
- if err != nil {
- return nil, "", err
- }
-
- it.Response = resp
- return resp.GetOperations(), resp.GetNextPageToken(), nil
- }
- fetch := func(pageSize int, pageToken string) (string, error) {
- items, nextPageToken, err := it.InternalFetch(pageSize, pageToken)
- if err != nil {
- return "", err
- }
- it.items = append(it.items, items...)
- return nextPageToken, nil
- }
-
- it.pageInfo, it.nextFunc = iterator.NewPageInfo(fetch, it.bufLen, it.takeBuf)
- it.pageInfo.MaxSize = int(req.GetPageSize())
- it.pageInfo.Token = req.GetPageToken()
-
- return it
-}
-
-func (c *instanceAdminGRPCClient) MoveInstance(ctx context.Context, req *instancepb.MoveInstanceRequest, opts ...gax.CallOption) (*MoveInstanceOperation, error) {
- hds := []string{"x-goog-request-params", fmt.Sprintf("%s=%v", "name", url.QueryEscape(req.GetName()))}
-
- hds = append(c.xGoogHeaders, hds...)
- ctx = gax.InsertMetadataIntoOutgoingContext(ctx, hds...)
- opts = append((*c.CallOptions).MoveInstance[0:len((*c.CallOptions).MoveInstance):len((*c.CallOptions).MoveInstance)], opts...)
- var resp *longrunningpb.Operation
- err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
- var err error
- resp, err = c.instanceAdminClient.MoveInstance(ctx, req, settings.GRPC...)
- return err
- }, opts...)
- if err != nil {
- return nil, err
- }
- return &MoveInstanceOperation{
- lro: longrunning.InternalNewOperation(*c.LROClient, resp),
- }, nil
-}
-
-// ListInstanceConfigs lists the supported instance configurations for a given project.
-func (c *instanceAdminRESTClient) ListInstanceConfigs(ctx context.Context, req *instancepb.ListInstanceConfigsRequest, opts ...gax.CallOption) *InstanceConfigIterator {
- it := &InstanceConfigIterator{}
- req = proto.Clone(req).(*instancepb.ListInstanceConfigsRequest)
- unm := protojson.UnmarshalOptions{AllowPartial: true, DiscardUnknown: true}
- it.InternalFetch = func(pageSize int, pageToken string) ([]*instancepb.InstanceConfig, string, error) {
- resp := &instancepb.ListInstanceConfigsResponse{}
- if pageToken != "" {
- req.PageToken = pageToken
- }
- if pageSize > math.MaxInt32 {
- req.PageSize = math.MaxInt32
- } else if pageSize != 0 {
- req.PageSize = int32(pageSize)
- }
- baseUrl, err := url.Parse(c.endpoint)
- if err != nil {
- return nil, "", err
- }
- baseUrl.Path += fmt.Sprintf("/v1/%v/instanceConfigs", req.GetParent())
-
- params := url.Values{}
- params.Add("$alt", "json;enum-encoding=int")
- if req.GetPageSize() != 0 {
- params.Add("pageSize", fmt.Sprintf("%v", req.GetPageSize()))
- }
- if req.GetPageToken() != "" {
- params.Add("pageToken", fmt.Sprintf("%v", req.GetPageToken()))
- }
-
- baseUrl.RawQuery = params.Encode()
-
- // Build HTTP headers from client and context metadata.
- hds := append(c.xGoogHeaders, "Content-Type", "application/json")
- headers := gax.BuildHeaders(ctx, hds...)
- e := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
- if settings.Path != "" {
- baseUrl.Path = settings.Path
- }
- httpReq, err := http.NewRequest("GET", baseUrl.String(), nil)
- if err != nil {
- return err
- }
- httpReq.Header = headers
-
- httpRsp, err := c.httpClient.Do(httpReq)
- if err != nil {
- return err
- }
- defer httpRsp.Body.Close()
-
- if err = googleapi.CheckResponse(httpRsp); err != nil {
- return err
- }
-
- buf, err := io.ReadAll(httpRsp.Body)
- if err != nil {
- return err
- }
-
- if err := unm.Unmarshal(buf, resp); err != nil {
- return err
- }
-
- return nil
- }, opts...)
- if e != nil {
- return nil, "", e
- }
- it.Response = resp
- return resp.GetInstanceConfigs(), resp.GetNextPageToken(), nil
- }
-
- fetch := func(pageSize int, pageToken string) (string, error) {
- items, nextPageToken, err := it.InternalFetch(pageSize, pageToken)
- if err != nil {
- return "", err
- }
- it.items = append(it.items, items...)
- return nextPageToken, nil
- }
-
- it.pageInfo, it.nextFunc = iterator.NewPageInfo(fetch, it.bufLen, it.takeBuf)
- it.pageInfo.MaxSize = int(req.GetPageSize())
- it.pageInfo.Token = req.GetPageToken()
-
- return it
-}
-
-// GetInstanceConfig gets information about a particular instance configuration.
-func (c *instanceAdminRESTClient) GetInstanceConfig(ctx context.Context, req *instancepb.GetInstanceConfigRequest, opts ...gax.CallOption) (*instancepb.InstanceConfig, error) {
- baseUrl, err := url.Parse(c.endpoint)
- if err != nil {
- return nil, err
- }
- baseUrl.Path += fmt.Sprintf("/v1/%v", req.GetName())
-
- params := url.Values{}
- params.Add("$alt", "json;enum-encoding=int")
-
- baseUrl.RawQuery = params.Encode()
-
- // Build HTTP headers from client and context metadata.
- hds := []string{"x-goog-request-params", fmt.Sprintf("%s=%v", "name", url.QueryEscape(req.GetName()))}
-
- hds = append(c.xGoogHeaders, hds...)
- hds = append(hds, "Content-Type", "application/json")
- headers := gax.BuildHeaders(ctx, hds...)
- opts = append((*c.CallOptions).GetInstanceConfig[0:len((*c.CallOptions).GetInstanceConfig):len((*c.CallOptions).GetInstanceConfig)], opts...)
- unm := protojson.UnmarshalOptions{AllowPartial: true, DiscardUnknown: true}
- resp := &instancepb.InstanceConfig{}
- e := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
- if settings.Path != "" {
- baseUrl.Path = settings.Path
- }
- httpReq, err := http.NewRequest("GET", baseUrl.String(), nil)
- if err != nil {
- return err
- }
- httpReq = httpReq.WithContext(ctx)
- httpReq.Header = headers
-
- httpRsp, err := c.httpClient.Do(httpReq)
- if err != nil {
- return err
- }
- defer httpRsp.Body.Close()
-
- if err = googleapi.CheckResponse(httpRsp); err != nil {
- return err
- }
-
- buf, err := io.ReadAll(httpRsp.Body)
- if err != nil {
- return err
- }
-
- if err := unm.Unmarshal(buf, resp); err != nil {
- return err
- }
-
- return nil
- }, opts...)
- if e != nil {
- return nil, e
- }
- return resp, nil
-}
-
-// CreateInstanceConfig creates an instance configuration and begins preparing it to be used. The
-// returned [long-running operation][google.longrunning.Operation]
-// can be used to track the progress of preparing the new
-// instance configuration. The instance configuration name is assigned by the
-// caller. If the named instance configuration already exists,
-// CreateInstanceConfig returns ALREADY_EXISTS.
-//
-// Immediately after the request returns:
-//
-// The instance configuration is readable via the API, with all requested
-// attributes. The instance configuration’s
-// reconciling
-// field is set to true. Its state is CREATING.
-//
-// While the operation is pending:
-//
-// Cancelling the operation renders the instance configuration immediately
-// unreadable via the API.
-//
-// Except for deleting the creating resource, all other attempts to modify
-// the instance configuration are rejected.
-//
-// Upon completion of the returned operation:
-//
-// Instances can be created using the instance configuration.
-//
-// The instance configuration’s
-// reconciling
-// field becomes false. Its state becomes READY.
-//
-// The returned [long-running operation][google.longrunning.Operation] will
-// have a name of the format
-// <instance_config_name>/operations/<operation_id> and can be used to track
-// creation of the instance configuration. The
-// metadata field type is
-// CreateInstanceConfigMetadata.
-// The response field type is
-// InstanceConfig, if
-// successful.
-//
-// Authorization requires spanner.instanceConfigs.create permission on
-// the resource
-// parent.
-func (c *instanceAdminRESTClient) CreateInstanceConfig(ctx context.Context, req *instancepb.CreateInstanceConfigRequest, opts ...gax.CallOption) (*CreateInstanceConfigOperation, error) {
- m := protojson.MarshalOptions{AllowPartial: true, UseEnumNumbers: true}
- jsonReq, err := m.Marshal(req)
- if err != nil {
- return nil, err
- }
-
- baseUrl, err := url.Parse(c.endpoint)
- if err != nil {
- return nil, err
- }
- baseUrl.Path += fmt.Sprintf("/v1/%v/instanceConfigs", req.GetParent())
-
- params := url.Values{}
- params.Add("$alt", "json;enum-encoding=int")
-
- baseUrl.RawQuery = params.Encode()
-
- // Build HTTP headers from client and context metadata.
- hds := []string{"x-goog-request-params", fmt.Sprintf("%s=%v", "parent", url.QueryEscape(req.GetParent()))}
-
- hds = append(c.xGoogHeaders, hds...)
- hds = append(hds, "Content-Type", "application/json")
- headers := gax.BuildHeaders(ctx, hds...)
- unm := protojson.UnmarshalOptions{AllowPartial: true, DiscardUnknown: true}
- resp := &longrunningpb.Operation{}
- e := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
- if settings.Path != "" {
- baseUrl.Path = settings.Path
- }
- httpReq, err := http.NewRequest("POST", baseUrl.String(), bytes.NewReader(jsonReq))
- if err != nil {
- return err
- }
- httpReq = httpReq.WithContext(ctx)
- httpReq.Header = headers
-
- httpRsp, err := c.httpClient.Do(httpReq)
- if err != nil {
- return err
- }
- defer httpRsp.Body.Close()
-
- if err = googleapi.CheckResponse(httpRsp); err != nil {
- return err
- }
-
- buf, err := io.ReadAll(httpRsp.Body)
- if err != nil {
- return err
- }
-
- if err := unm.Unmarshal(buf, resp); err != nil {
- return err
- }
-
- return nil
- }, opts...)
- if e != nil {
- return nil, e
- }
-
- override := fmt.Sprintf("/v1/%s", resp.GetName())
- return &CreateInstanceConfigOperation{
- lro: longrunning.InternalNewOperation(*c.LROClient, resp),
- pollPath: override,
- }, nil
-}
-
-// UpdateInstanceConfig updates an instance configuration. The returned
-// [long-running operation][google.longrunning.Operation] can be used to track
-// the progress of updating the instance. If the named instance configuration
-// does not exist, returns NOT_FOUND.
-//
-// Only user-managed configurations can be updated.
-//
-// Immediately after the request returns:
-//
-// The instance configuration’s
-// reconciling
-// field is set to true.
-//
-// While the operation is pending:
-//
-// Cancelling the operation sets its metadata’s
-// cancel_time.
-// The operation is guaranteed to succeed at undoing all changes, after
-// which point it terminates with a CANCELLED status.
-//
-// All other attempts to modify the instance configuration are rejected.
-//
-// Reading the instance configuration via the API continues to give the
-// pre-request values.
-//
-// Upon completion of the returned operation:
-//
-// Creating instances using the instance configuration uses the new
-// values.
-//
-// The new values of the instance configuration are readable via the API.
-//
-// The instance configuration’s
-// reconciling
-// field becomes false.
-//
-// The returned [long-running operation][google.longrunning.Operation] will
-// have a name of the format
-// <instance_config_name>/operations/<operation_id> and can be used to track
-// the instance configuration modification. The
-// metadata field type is
-// UpdateInstanceConfigMetadata.
-// The response field type is
-// InstanceConfig, if
-// successful.
-//
-// Authorization requires spanner.instanceConfigs.update permission on
-// the resource [name][google.spanner.admin.instance.v1.InstanceConfig.name (at http://google.spanner.admin.instance.v1.InstanceConfig.name)].
-func (c *instanceAdminRESTClient) UpdateInstanceConfig(ctx context.Context, req *instancepb.UpdateInstanceConfigRequest, opts ...gax.CallOption) (*UpdateInstanceConfigOperation, error) {
- m := protojson.MarshalOptions{AllowPartial: true, UseEnumNumbers: true}
- jsonReq, err := m.Marshal(req)
- if err != nil {
- return nil, err
- }
-
- baseUrl, err := url.Parse(c.endpoint)
- if err != nil {
- return nil, err
- }
- baseUrl.Path += fmt.Sprintf("/v1/%v", req.GetInstanceConfig().GetName())
-
- params := url.Values{}
- params.Add("$alt", "json;enum-encoding=int")
-
- baseUrl.RawQuery = params.Encode()
-
- // Build HTTP headers from client and context metadata.
- hds := []string{"x-goog-request-params", fmt.Sprintf("%s=%v", "instance_config.name", url.QueryEscape(req.GetInstanceConfig().GetName()))}
-
- hds = append(c.xGoogHeaders, hds...)
- hds = append(hds, "Content-Type", "application/json")
- headers := gax.BuildHeaders(ctx, hds...)
- unm := protojson.UnmarshalOptions{AllowPartial: true, DiscardUnknown: true}
- resp := &longrunningpb.Operation{}
- e := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
- if settings.Path != "" {
- baseUrl.Path = settings.Path
- }
- httpReq, err := http.NewRequest("PATCH", baseUrl.String(), bytes.NewReader(jsonReq))
- if err != nil {
- return err
- }
- httpReq = httpReq.WithContext(ctx)
- httpReq.Header = headers
-
- httpRsp, err := c.httpClient.Do(httpReq)
- if err != nil {
- return err
- }
- defer httpRsp.Body.Close()
-
- if err = googleapi.CheckResponse(httpRsp); err != nil {
- return err
- }
-
- buf, err := io.ReadAll(httpRsp.Body)
- if err != nil {
- return err
- }
-
- if err := unm.Unmarshal(buf, resp); err != nil {
- return err
- }
-
- return nil
- }, opts...)
- if e != nil {
- return nil, e
- }
-
- override := fmt.Sprintf("/v1/%s", resp.GetName())
- return &UpdateInstanceConfigOperation{
- lro: longrunning.InternalNewOperation(*c.LROClient, resp),
- pollPath: override,
- }, nil
-}
-
-// DeleteInstanceConfig deletes the instance configuration. Deletion is only allowed when no
-// instances are using the configuration. If any instances are using
-// the configuration, returns FAILED_PRECONDITION.
-//
-// Only user-managed configurations can be deleted.
-//
-// Authorization requires spanner.instanceConfigs.delete permission on
-// the resource [name][google.spanner.admin.instance.v1.InstanceConfig.name (at http://google.spanner.admin.instance.v1.InstanceConfig.name)].
-func (c *instanceAdminRESTClient) DeleteInstanceConfig(ctx context.Context, req *instancepb.DeleteInstanceConfigRequest, opts ...gax.CallOption) error {
- baseUrl, err := url.Parse(c.endpoint)
- if err != nil {
- return err
- }
- baseUrl.Path += fmt.Sprintf("/v1/%v", req.GetName())
-
- params := url.Values{}
- params.Add("$alt", "json;enum-encoding=int")
- if req.GetEtag() != "" {
- params.Add("etag", fmt.Sprintf("%v", req.GetEtag()))
- }
- if req.GetValidateOnly() {
- params.Add("validateOnly", fmt.Sprintf("%v", req.GetValidateOnly()))
- }
-
- baseUrl.RawQuery = params.Encode()
-
- // Build HTTP headers from client and context metadata.
- hds := []string{"x-goog-request-params", fmt.Sprintf("%s=%v", "name", url.QueryEscape(req.GetName()))}
-
- hds = append(c.xGoogHeaders, hds...)
- hds = append(hds, "Content-Type", "application/json")
- headers := gax.BuildHeaders(ctx, hds...)
- return gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
- if settings.Path != "" {
- baseUrl.Path = settings.Path
- }
- httpReq, err := http.NewRequest("DELETE", baseUrl.String(), nil)
- if err != nil {
- return err
- }
- httpReq = httpReq.WithContext(ctx)
- httpReq.Header = headers
-
- httpRsp, err := c.httpClient.Do(httpReq)
- if err != nil {
- return err
- }
- defer httpRsp.Body.Close()
-
- // Returns nil if there is no error, otherwise wraps
- // the response code and body into a non-nil error
- return googleapi.CheckResponse(httpRsp)
- }, opts...)
-}
-
-// ListInstanceConfigOperations lists the user-managed instance configuration [long-running
-// operations][google.longrunning.Operation] in the given project. An instance
-// configuration operation has a name of the form
-// projects/<project>/instanceConfigs/<instance_config>/operations/<operation>.
-// The long-running operation
-// metadata field type
-// metadata.type_url describes the type of the metadata. Operations returned
-// include those that have completed/failed/canceled within the last 7 days,
-// and pending operations. Operations returned are ordered by
-// operation.metadata.value.start_time in descending order starting
-// from the most recently started operation.
-func (c *instanceAdminRESTClient) ListInstanceConfigOperations(ctx context.Context, req *instancepb.ListInstanceConfigOperationsRequest, opts ...gax.CallOption) *OperationIterator {
- it := &OperationIterator{}
- req = proto.Clone(req).(*instancepb.ListInstanceConfigOperationsRequest)
- unm := protojson.UnmarshalOptions{AllowPartial: true, DiscardUnknown: true}
- it.InternalFetch = func(pageSize int, pageToken string) ([]*longrunningpb.Operation, string, error) {
- resp := &instancepb.ListInstanceConfigOperationsResponse{}
- if pageToken != "" {
- req.PageToken = pageToken
- }
- if pageSize > math.MaxInt32 {
- req.PageSize = math.MaxInt32
- } else if pageSize != 0 {
- req.PageSize = int32(pageSize)
- }
- baseUrl, err := url.Parse(c.endpoint)
- if err != nil {
- return nil, "", err
- }
- baseUrl.Path += fmt.Sprintf("/v1/%v/instanceConfigOperations", req.GetParent())
-
- params := url.Values{}
- params.Add("$alt", "json;enum-encoding=int")
- if req.GetFilter() != "" {
- params.Add("filter", fmt.Sprintf("%v", req.GetFilter()))
- }
- if req.GetPageSize() != 0 {
- params.Add("pageSize", fmt.Sprintf("%v", req.GetPageSize()))
- }
- if req.GetPageToken() != "" {
- params.Add("pageToken", fmt.Sprintf("%v", req.GetPageToken()))
- }
-
- baseUrl.RawQuery = params.Encode()
-
- // Build HTTP headers from client and context metadata.
- hds := append(c.xGoogHeaders, "Content-Type", "application/json")
- headers := gax.BuildHeaders(ctx, hds...)
- e := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
- if settings.Path != "" {
- baseUrl.Path = settings.Path
- }
- httpReq, err := http.NewRequest("GET", baseUrl.String(), nil)
- if err != nil {
- return err
- }
- httpReq.Header = headers
-
- httpRsp, err := c.httpClient.Do(httpReq)
- if err != nil {
- return err
- }
- defer httpRsp.Body.Close()
-
- if err = googleapi.CheckResponse(httpRsp); err != nil {
- return err
- }
-
- buf, err := io.ReadAll(httpRsp.Body)
- if err != nil {
- return err
- }
-
- if err := unm.Unmarshal(buf, resp); err != nil {
- return err
- }
-
- return nil
- }, opts...)
- if e != nil {
- return nil, "", e
- }
- it.Response = resp
- return resp.GetOperations(), resp.GetNextPageToken(), nil
- }
-
- fetch := func(pageSize int, pageToken string) (string, error) {
- items, nextPageToken, err := it.InternalFetch(pageSize, pageToken)
- if err != nil {
- return "", err
- }
- it.items = append(it.items, items...)
- return nextPageToken, nil
- }
-
- it.pageInfo, it.nextFunc = iterator.NewPageInfo(fetch, it.bufLen, it.takeBuf)
- it.pageInfo.MaxSize = int(req.GetPageSize())
- it.pageInfo.Token = req.GetPageToken()
-
- return it
-}
-
-// ListInstances lists all instances in the given project.
-func (c *instanceAdminRESTClient) ListInstances(ctx context.Context, req *instancepb.ListInstancesRequest, opts ...gax.CallOption) *InstanceIterator {
- it := &InstanceIterator{}
- req = proto.Clone(req).(*instancepb.ListInstancesRequest)
- unm := protojson.UnmarshalOptions{AllowPartial: true, DiscardUnknown: true}
- it.InternalFetch = func(pageSize int, pageToken string) ([]*instancepb.Instance, string, error) {
- resp := &instancepb.ListInstancesResponse{}
- if pageToken != "" {
- req.PageToken = pageToken
- }
- if pageSize > math.MaxInt32 {
- req.PageSize = math.MaxInt32
- } else if pageSize != 0 {
- req.PageSize = int32(pageSize)
- }
- baseUrl, err := url.Parse(c.endpoint)
- if err != nil {
- return nil, "", err
- }
- baseUrl.Path += fmt.Sprintf("/v1/%v/instances", req.GetParent())
-
- params := url.Values{}
- params.Add("$alt", "json;enum-encoding=int")
- if req.GetFilter() != "" {
- params.Add("filter", fmt.Sprintf("%v", req.GetFilter()))
- }
- if req.GetInstanceDeadline() != nil {
- instanceDeadline, err := protojson.Marshal(req.GetInstanceDeadline())
- if err != nil {
- return nil, "", err
- }
- params.Add("instanceDeadline", string(instanceDeadline[1:len(instanceDeadline)-1]))
- }
- if req.GetPageSize() != 0 {
- params.Add("pageSize", fmt.Sprintf("%v", req.GetPageSize()))
- }
- if req.GetPageToken() != "" {
- params.Add("pageToken", fmt.Sprintf("%v", req.GetPageToken()))
- }
-
- baseUrl.RawQuery = params.Encode()
-
- // Build HTTP headers from client and context metadata.
- hds := append(c.xGoogHeaders, "Content-Type", "application/json")
- headers := gax.BuildHeaders(ctx, hds...)
- e := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
- if settings.Path != "" {
- baseUrl.Path = settings.Path
- }
- httpReq, err := http.NewRequest("GET", baseUrl.String(), nil)
- if err != nil {
- return err
- }
- httpReq.Header = headers
-
- httpRsp, err := c.httpClient.Do(httpReq)
- if err != nil {
- return err
- }
- defer httpRsp.Body.Close()
-
- if err = googleapi.CheckResponse(httpRsp); err != nil {
- return err
- }
-
- buf, err := io.ReadAll(httpRsp.Body)
- if err != nil {
- return err
- }
-
- if err := unm.Unmarshal(buf, resp); err != nil {
- return err
- }
-
- return nil
- }, opts...)
- if e != nil {
- return nil, "", e
- }
- it.Response = resp
- return resp.GetInstances(), resp.GetNextPageToken(), nil
- }
-
- fetch := func(pageSize int, pageToken string) (string, error) {
- items, nextPageToken, err := it.InternalFetch(pageSize, pageToken)
- if err != nil {
- return "", err
- }
- it.items = append(it.items, items...)
- return nextPageToken, nil
- }
-
- it.pageInfo, it.nextFunc = iterator.NewPageInfo(fetch, it.bufLen, it.takeBuf)
- it.pageInfo.MaxSize = int(req.GetPageSize())
- it.pageInfo.Token = req.GetPageToken()
-
- return it
-}
-
-// ListInstancePartitions lists all instance partitions for the given instance.
-func (c *instanceAdminRESTClient) ListInstancePartitions(ctx context.Context, req *instancepb.ListInstancePartitionsRequest, opts ...gax.CallOption) *InstancePartitionIterator {
- it := &InstancePartitionIterator{}
- req = proto.Clone(req).(*instancepb.ListInstancePartitionsRequest)
- unm := protojson.UnmarshalOptions{AllowPartial: true, DiscardUnknown: true}
- it.InternalFetch = func(pageSize int, pageToken string) ([]*instancepb.InstancePartition, string, error) {
- resp := &instancepb.ListInstancePartitionsResponse{}
- if pageToken != "" {
- req.PageToken = pageToken
- }
- if pageSize > math.MaxInt32 {
- req.PageSize = math.MaxInt32
- } else if pageSize != 0 {
- req.PageSize = int32(pageSize)
- }
- baseUrl, err := url.Parse(c.endpoint)
- if err != nil {
- return nil, "", err
- }
- baseUrl.Path += fmt.Sprintf("/v1/%v/instancePartitions", req.GetParent())
-
- params := url.Values{}
- params.Add("$alt", "json;enum-encoding=int")
- if req.GetInstancePartitionDeadline() != nil {
- instancePartitionDeadline, err := protojson.Marshal(req.GetInstancePartitionDeadline())
- if err != nil {
- return nil, "", err
- }
- params.Add("instancePartitionDeadline", string(instancePartitionDeadline[1:len(instancePartitionDeadline)-1]))
- }
- if req.GetPageSize() != 0 {
- params.Add("pageSize", fmt.Sprintf("%v", req.GetPageSize()))
- }
- if req.GetPageToken() != "" {
- params.Add("pageToken", fmt.Sprintf("%v", req.GetPageToken()))
- }
-
- baseUrl.RawQuery = params.Encode()
-
- // Build HTTP headers from client and context metadata.
- hds := append(c.xGoogHeaders, "Content-Type", "application/json")
- headers := gax.BuildHeaders(ctx, hds...)
- e := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
- if settings.Path != "" {
- baseUrl.Path = settings.Path
- }
- httpReq, err := http.NewRequest("GET", baseUrl.String(), nil)
- if err != nil {
- return err
- }
- httpReq.Header = headers
-
- httpRsp, err := c.httpClient.Do(httpReq)
- if err != nil {
- return err
- }
- defer httpRsp.Body.Close()
-
- if err = googleapi.CheckResponse(httpRsp); err != nil {
- return err
- }
-
- buf, err := io.ReadAll(httpRsp.Body)
- if err != nil {
- return err
- }
-
- if err := unm.Unmarshal(buf, resp); err != nil {
- return err
- }
-
- return nil
- }, opts...)
- if e != nil {
- return nil, "", e
- }
- it.Response = resp
- return resp.GetInstancePartitions(), resp.GetNextPageToken(), nil
- }
-
- fetch := func(pageSize int, pageToken string) (string, error) {
- items, nextPageToken, err := it.InternalFetch(pageSize, pageToken)
- if err != nil {
- return "", err
- }
- it.items = append(it.items, items...)
- return nextPageToken, nil
- }
-
- it.pageInfo, it.nextFunc = iterator.NewPageInfo(fetch, it.bufLen, it.takeBuf)
- it.pageInfo.MaxSize = int(req.GetPageSize())
- it.pageInfo.Token = req.GetPageToken()
-
- return it
-}
-
-// GetInstance gets information about a particular instance.
-func (c *instanceAdminRESTClient) GetInstance(ctx context.Context, req *instancepb.GetInstanceRequest, opts ...gax.CallOption) (*instancepb.Instance, error) {
- baseUrl, err := url.Parse(c.endpoint)
- if err != nil {
- return nil, err
- }
- baseUrl.Path += fmt.Sprintf("/v1/%v", req.GetName())
-
- params := url.Values{}
- params.Add("$alt", "json;enum-encoding=int")
- if req.GetFieldMask() != nil {
- fieldMask, err := protojson.Marshal(req.GetFieldMask())
- if err != nil {
- return nil, err
- }
- params.Add("fieldMask", string(fieldMask[1:len(fieldMask)-1]))
- }
-
- baseUrl.RawQuery = params.Encode()
-
- // Build HTTP headers from client and context metadata.
- hds := []string{"x-goog-request-params", fmt.Sprintf("%s=%v", "name", url.QueryEscape(req.GetName()))}
-
- hds = append(c.xGoogHeaders, hds...)
- hds = append(hds, "Content-Type", "application/json")
- headers := gax.BuildHeaders(ctx, hds...)
- opts = append((*c.CallOptions).GetInstance[0:len((*c.CallOptions).GetInstance):len((*c.CallOptions).GetInstance)], opts...)
- unm := protojson.UnmarshalOptions{AllowPartial: true, DiscardUnknown: true}
- resp := &instancepb.Instance{}
- e := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
- if settings.Path != "" {
- baseUrl.Path = settings.Path
- }
- httpReq, err := http.NewRequest("GET", baseUrl.String(), nil)
- if err != nil {
- return err
- }
- httpReq = httpReq.WithContext(ctx)
- httpReq.Header = headers
-
- httpRsp, err := c.httpClient.Do(httpReq)
- if err != nil {
- return err
- }
- defer httpRsp.Body.Close()
-
- if err = googleapi.CheckResponse(httpRsp); err != nil {
- return err
- }
-
- buf, err := io.ReadAll(httpRsp.Body)
- if err != nil {
- return err
- }
-
- if err := unm.Unmarshal(buf, resp); err != nil {
- return err
- }
-
- return nil
- }, opts...)
- if e != nil {
- return nil, e
- }
- return resp, nil
-}
-
-// CreateInstance creates an instance and begins preparing it to begin serving. The
-// returned [long-running operation][google.longrunning.Operation]
-// can be used to track the progress of preparing the new
-// instance. The instance name is assigned by the caller. If the
-// named instance already exists, CreateInstance returns
-// ALREADY_EXISTS.
-//
-// Immediately upon completion of this request:
-//
-// The instance is readable via the API, with all requested attributes
-// but no allocated resources. Its state is CREATING.
-//
-// Until completion of the returned operation:
-//
-// Cancelling the operation renders the instance immediately unreadable
-// via the API.
-//
-// The instance can be deleted.
-//
-// All other attempts to modify the instance are rejected.
-//
-// Upon completion of the returned operation:
-//
-// Billing for all successfully-allocated resources begins (some types
-// may have lower than the requested levels).
-//
-// Databases can be created in the instance.
-//
-// The instance’s allocated resource levels are readable via the API.
-//
-// The instance’s state becomes READY.
-//
-// The returned [long-running operation][google.longrunning.Operation] will
-// have a name of the format <instance_name>/operations/<operation_id> and
-// can be used to track creation of the instance. The
-// metadata field type is
-// CreateInstanceMetadata.
-// The response field type is
-// Instance, if successful.
-func (c *instanceAdminRESTClient) CreateInstance(ctx context.Context, req *instancepb.CreateInstanceRequest, opts ...gax.CallOption) (*CreateInstanceOperation, error) {
- m := protojson.MarshalOptions{AllowPartial: true, UseEnumNumbers: true}
- jsonReq, err := m.Marshal(req)
- if err != nil {
- return nil, err
- }
-
- baseUrl, err := url.Parse(c.endpoint)
- if err != nil {
- return nil, err
- }
- baseUrl.Path += fmt.Sprintf("/v1/%v/instances", req.GetParent())
-
- params := url.Values{}
- params.Add("$alt", "json;enum-encoding=int")
-
- baseUrl.RawQuery = params.Encode()
-
- // Build HTTP headers from client and context metadata.
- hds := []string{"x-goog-request-params", fmt.Sprintf("%s=%v", "parent", url.QueryEscape(req.GetParent()))}
-
- hds = append(c.xGoogHeaders, hds...)
- hds = append(hds, "Content-Type", "application/json")
- headers := gax.BuildHeaders(ctx, hds...)
- unm := protojson.UnmarshalOptions{AllowPartial: true, DiscardUnknown: true}
- resp := &longrunningpb.Operation{}
- e := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
- if settings.Path != "" {
- baseUrl.Path = settings.Path
- }
- httpReq, err := http.NewRequest("POST", baseUrl.String(), bytes.NewReader(jsonReq))
- if err != nil {
- return err
- }
- httpReq = httpReq.WithContext(ctx)
- httpReq.Header = headers
-
- httpRsp, err := c.httpClient.Do(httpReq)
- if err != nil {
- return err
- }
- defer httpRsp.Body.Close()
-
- if err = googleapi.CheckResponse(httpRsp); err != nil {
- return err
- }
-
- buf, err := io.ReadAll(httpRsp.Body)
- if err != nil {
- return err
- }
-
- if err := unm.Unmarshal(buf, resp); err != nil {
- return err
- }
-
- return nil
- }, opts...)
- if e != nil {
- return nil, e
- }
-
- override := fmt.Sprintf("/v1/%s", resp.GetName())
- return &CreateInstanceOperation{
- lro: longrunning.InternalNewOperation(*c.LROClient, resp),
- pollPath: override,
- }, nil
-}
-
-// UpdateInstance updates an instance, and begins allocating or releasing resources
-// as requested. The returned [long-running
-// operation][google.longrunning.Operation] can be used to track the
-// progress of updating the instance. If the named instance does not
-// exist, returns NOT_FOUND.
-//
-// Immediately upon completion of this request:
-//
-// For resource types for which a decrease in the instance’s allocation
-// has been requested, billing is based on the newly-requested level.
-//
-// Until completion of the returned operation:
-//
-// Cancelling the operation sets its metadata’s
-// cancel_time,
-// and begins restoring resources to their pre-request values. The
-// operation is guaranteed to succeed at undoing all resource changes,
-// after which point it terminates with a CANCELLED status.
-//
-// All other attempts to modify the instance are rejected.
-//
-// Reading the instance via the API continues to give the pre-request
-// resource levels.
-//
-// Upon completion of the returned operation:
-//
-// Billing begins for all successfully-allocated resources (some types
-// may have lower than the requested levels).
-//
-// All newly-reserved resources are available for serving the instance’s
-// tables.
-//
-// The instance’s new resource levels are readable via the API.
-//
-// The returned [long-running operation][google.longrunning.Operation] will
-// have a name of the format <instance_name>/operations/<operation_id> and
-// can be used to track the instance modification. The
-// metadata field type is
-// UpdateInstanceMetadata.
-// The response field type is
-// Instance, if successful.
-//
-// Authorization requires spanner.instances.update permission on
-// the resource [name][google.spanner.admin.instance.v1.Instance.name (at http://google.spanner.admin.instance.v1.Instance.name)].
-func (c *instanceAdminRESTClient) UpdateInstance(ctx context.Context, req *instancepb.UpdateInstanceRequest, opts ...gax.CallOption) (*UpdateInstanceOperation, error) {
- m := protojson.MarshalOptions{AllowPartial: true, UseEnumNumbers: true}
- jsonReq, err := m.Marshal(req)
- if err != nil {
- return nil, err
- }
-
- baseUrl, err := url.Parse(c.endpoint)
- if err != nil {
- return nil, err
- }
- baseUrl.Path += fmt.Sprintf("/v1/%v", req.GetInstance().GetName())
-
- params := url.Values{}
- params.Add("$alt", "json;enum-encoding=int")
-
- baseUrl.RawQuery = params.Encode()
-
- // Build HTTP headers from client and context metadata.
- hds := []string{"x-goog-request-params", fmt.Sprintf("%s=%v", "instance.name", url.QueryEscape(req.GetInstance().GetName()))}
-
- hds = append(c.xGoogHeaders, hds...)
- hds = append(hds, "Content-Type", "application/json")
- headers := gax.BuildHeaders(ctx, hds...)
- unm := protojson.UnmarshalOptions{AllowPartial: true, DiscardUnknown: true}
- resp := &longrunningpb.Operation{}
- e := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
- if settings.Path != "" {
- baseUrl.Path = settings.Path
- }
- httpReq, err := http.NewRequest("PATCH", baseUrl.String(), bytes.NewReader(jsonReq))
- if err != nil {
- return err
- }
- httpReq = httpReq.WithContext(ctx)
- httpReq.Header = headers
-
- httpRsp, err := c.httpClient.Do(httpReq)
- if err != nil {
- return err
- }
- defer httpRsp.Body.Close()
-
- if err = googleapi.CheckResponse(httpRsp); err != nil {
- return err
- }
-
- buf, err := io.ReadAll(httpRsp.Body)
- if err != nil {
- return err
- }
-
- if err := unm.Unmarshal(buf, resp); err != nil {
- return err
- }
-
- return nil
- }, opts...)
- if e != nil {
- return nil, e
- }
-
- override := fmt.Sprintf("/v1/%s", resp.GetName())
- return &UpdateInstanceOperation{
- lro: longrunning.InternalNewOperation(*c.LROClient, resp),
- pollPath: override,
- }, nil
-}
-
-// DeleteInstance deletes an instance.
-//
-// Immediately upon completion of the request:
-//
-// Billing ceases for all of the instance’s reserved resources.
-//
-// Soon afterward:
-//
-// The instance and all of its databases immediately and
-// irrevocably disappear from the API. All data in the databases
-// is permanently deleted.
-func (c *instanceAdminRESTClient) DeleteInstance(ctx context.Context, req *instancepb.DeleteInstanceRequest, opts ...gax.CallOption) error {
- baseUrl, err := url.Parse(c.endpoint)
- if err != nil {
- return err
- }
- baseUrl.Path += fmt.Sprintf("/v1/%v", req.GetName())
-
- params := url.Values{}
- params.Add("$alt", "json;enum-encoding=int")
-
- baseUrl.RawQuery = params.Encode()
-
- // Build HTTP headers from client and context metadata.
- hds := []string{"x-goog-request-params", fmt.Sprintf("%s=%v", "name", url.QueryEscape(req.GetName()))}
-
- hds = append(c.xGoogHeaders, hds...)
- hds = append(hds, "Content-Type", "application/json")
- headers := gax.BuildHeaders(ctx, hds...)
- return gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
- if settings.Path != "" {
- baseUrl.Path = settings.Path
- }
- httpReq, err := http.NewRequest("DELETE", baseUrl.String(), nil)
- if err != nil {
- return err
- }
- httpReq = httpReq.WithContext(ctx)
- httpReq.Header = headers
-
- httpRsp, err := c.httpClient.Do(httpReq)
- if err != nil {
- return err
- }
- defer httpRsp.Body.Close()
-
- // Returns nil if there is no error, otherwise wraps
- // the response code and body into a non-nil error
- return googleapi.CheckResponse(httpRsp)
- }, opts...)
-}
-
-// SetIamPolicy sets the access control policy on an instance resource. Replaces any
-// existing policy.
-//
-// Authorization requires spanner.instances.setIamPolicy on
-// resource.
-func (c *instanceAdminRESTClient) SetIamPolicy(ctx context.Context, req *iampb.SetIamPolicyRequest, opts ...gax.CallOption) (*iampb.Policy, error) {
- m := protojson.MarshalOptions{AllowPartial: true, UseEnumNumbers: true}
- jsonReq, err := m.Marshal(req)
- if err != nil {
- return nil, err
- }
-
- baseUrl, err := url.Parse(c.endpoint)
- if err != nil {
- return nil, err
- }
- baseUrl.Path += fmt.Sprintf("/v1/%v:setIamPolicy", req.GetResource())
-
- params := url.Values{}
- params.Add("$alt", "json;enum-encoding=int")
-
- baseUrl.RawQuery = params.Encode()
-
- // Build HTTP headers from client and context metadata.
- hds := []string{"x-goog-request-params", fmt.Sprintf("%s=%v", "resource", url.QueryEscape(req.GetResource()))}
-
- hds = append(c.xGoogHeaders, hds...)
- hds = append(hds, "Content-Type", "application/json")
- headers := gax.BuildHeaders(ctx, hds...)
- opts = append((*c.CallOptions).SetIamPolicy[0:len((*c.CallOptions).SetIamPolicy):len((*c.CallOptions).SetIamPolicy)], opts...)
- unm := protojson.UnmarshalOptions{AllowPartial: true, DiscardUnknown: true}
- resp := &iampb.Policy{}
- e := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
- if settings.Path != "" {
- baseUrl.Path = settings.Path
- }
- httpReq, err := http.NewRequest("POST", baseUrl.String(), bytes.NewReader(jsonReq))
- if err != nil {
- return err
- }
- httpReq = httpReq.WithContext(ctx)
- httpReq.Header = headers
-
- httpRsp, err := c.httpClient.Do(httpReq)
- if err != nil {
- return err
- }
- defer httpRsp.Body.Close()
-
- if err = googleapi.CheckResponse(httpRsp); err != nil {
- return err
- }
-
- buf, err := io.ReadAll(httpRsp.Body)
- if err != nil {
- return err
- }
-
- if err := unm.Unmarshal(buf, resp); err != nil {
- return err
- }
-
- return nil
- }, opts...)
- if e != nil {
- return nil, e
- }
- return resp, nil
-}
-
-// GetIamPolicy gets the access control policy for an instance resource. Returns an empty
-// policy if an instance exists but does not have a policy set.
-//
-// Authorization requires spanner.instances.getIamPolicy on
-// resource.
-func (c *instanceAdminRESTClient) GetIamPolicy(ctx context.Context, req *iampb.GetIamPolicyRequest, opts ...gax.CallOption) (*iampb.Policy, error) {
- m := protojson.MarshalOptions{AllowPartial: true, UseEnumNumbers: true}
- jsonReq, err := m.Marshal(req)
- if err != nil {
- return nil, err
- }
-
- baseUrl, err := url.Parse(c.endpoint)
- if err != nil {
- return nil, err
- }
- baseUrl.Path += fmt.Sprintf("/v1/%v:getIamPolicy", req.GetResource())
-
- params := url.Values{}
- params.Add("$alt", "json;enum-encoding=int")
-
- baseUrl.RawQuery = params.Encode()
-
- // Build HTTP headers from client and context metadata.
- hds := []string{"x-goog-request-params", fmt.Sprintf("%s=%v", "resource", url.QueryEscape(req.GetResource()))}
-
- hds = append(c.xGoogHeaders, hds...)
- hds = append(hds, "Content-Type", "application/json")
- headers := gax.BuildHeaders(ctx, hds...)
- opts = append((*c.CallOptions).GetIamPolicy[0:len((*c.CallOptions).GetIamPolicy):len((*c.CallOptions).GetIamPolicy)], opts...)
- unm := protojson.UnmarshalOptions{AllowPartial: true, DiscardUnknown: true}
- resp := &iampb.Policy{}
- e := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
- if settings.Path != "" {
- baseUrl.Path = settings.Path
- }
- httpReq, err := http.NewRequest("POST", baseUrl.String(), bytes.NewReader(jsonReq))
- if err != nil {
- return err
- }
- httpReq = httpReq.WithContext(ctx)
- httpReq.Header = headers
-
- httpRsp, err := c.httpClient.Do(httpReq)
- if err != nil {
- return err
- }
- defer httpRsp.Body.Close()
-
- if err = googleapi.CheckResponse(httpRsp); err != nil {
- return err
- }
-
- buf, err := io.ReadAll(httpRsp.Body)
- if err != nil {
- return err
- }
-
- if err := unm.Unmarshal(buf, resp); err != nil {
- return err
- }
-
- return nil
- }, opts...)
- if e != nil {
- return nil, e
- }
- return resp, nil
-}
-
-// TestIamPermissions returns permissions that the caller has on the specified instance resource.
-//
-// Attempting this RPC on a non-existent Cloud Spanner instance resource will
-// result in a NOT_FOUND error if the user has spanner.instances.list
-// permission on the containing Google Cloud Project. Otherwise returns an
-// empty set of permissions.
-func (c *instanceAdminRESTClient) TestIamPermissions(ctx context.Context, req *iampb.TestIamPermissionsRequest, opts ...gax.CallOption) (*iampb.TestIamPermissionsResponse, error) {
- m := protojson.MarshalOptions{AllowPartial: true, UseEnumNumbers: true}
- jsonReq, err := m.Marshal(req)
- if err != nil {
- return nil, err
- }
-
- baseUrl, err := url.Parse(c.endpoint)
- if err != nil {
- return nil, err
- }
- baseUrl.Path += fmt.Sprintf("/v1/%v:testIamPermissions", req.GetResource())
-
- params := url.Values{}
- params.Add("$alt", "json;enum-encoding=int")
-
- baseUrl.RawQuery = params.Encode()
-
- // Build HTTP headers from client and context metadata.
- hds := []string{"x-goog-request-params", fmt.Sprintf("%s=%v", "resource", url.QueryEscape(req.GetResource()))}
-
- hds = append(c.xGoogHeaders, hds...)
- hds = append(hds, "Content-Type", "application/json")
- headers := gax.BuildHeaders(ctx, hds...)
- opts = append((*c.CallOptions).TestIamPermissions[0:len((*c.CallOptions).TestIamPermissions):len((*c.CallOptions).TestIamPermissions)], opts...)
- unm := protojson.UnmarshalOptions{AllowPartial: true, DiscardUnknown: true}
- resp := &iampb.TestIamPermissionsResponse{}
- e := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
- if settings.Path != "" {
- baseUrl.Path = settings.Path
- }
- httpReq, err := http.NewRequest("POST", baseUrl.String(), bytes.NewReader(jsonReq))
- if err != nil {
- return err
- }
- httpReq = httpReq.WithContext(ctx)
- httpReq.Header = headers
-
- httpRsp, err := c.httpClient.Do(httpReq)
- if err != nil {
- return err
- }
- defer httpRsp.Body.Close()
-
- if err = googleapi.CheckResponse(httpRsp); err != nil {
- return err
- }
-
- buf, err := io.ReadAll(httpRsp.Body)
- if err != nil {
- return err
- }
-
- if err := unm.Unmarshal(buf, resp); err != nil {
- return err
- }
-
- return nil
- }, opts...)
- if e != nil {
- return nil, e
- }
- return resp, nil
-}
-
-// GetInstancePartition gets information about a particular instance partition.
-func (c *instanceAdminRESTClient) GetInstancePartition(ctx context.Context, req *instancepb.GetInstancePartitionRequest, opts ...gax.CallOption) (*instancepb.InstancePartition, error) {
- baseUrl, err := url.Parse(c.endpoint)
- if err != nil {
- return nil, err
- }
- baseUrl.Path += fmt.Sprintf("/v1/%v", req.GetName())
-
- params := url.Values{}
- params.Add("$alt", "json;enum-encoding=int")
-
- baseUrl.RawQuery = params.Encode()
-
- // Build HTTP headers from client and context metadata.
- hds := []string{"x-goog-request-params", fmt.Sprintf("%s=%v", "name", url.QueryEscape(req.GetName()))}
-
- hds = append(c.xGoogHeaders, hds...)
- hds = append(hds, "Content-Type", "application/json")
- headers := gax.BuildHeaders(ctx, hds...)
- opts = append((*c.CallOptions).GetInstancePartition[0:len((*c.CallOptions).GetInstancePartition):len((*c.CallOptions).GetInstancePartition)], opts...)
- unm := protojson.UnmarshalOptions{AllowPartial: true, DiscardUnknown: true}
- resp := &instancepb.InstancePartition{}
- e := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
- if settings.Path != "" {
- baseUrl.Path = settings.Path
- }
- httpReq, err := http.NewRequest("GET", baseUrl.String(), nil)
- if err != nil {
- return err
- }
- httpReq = httpReq.WithContext(ctx)
- httpReq.Header = headers
-
- httpRsp, err := c.httpClient.Do(httpReq)
- if err != nil {
- return err
- }
- defer httpRsp.Body.Close()
-
- if err = googleapi.CheckResponse(httpRsp); err != nil {
- return err
- }
-
- buf, err := io.ReadAll(httpRsp.Body)
- if err != nil {
- return err
- }
-
- if err := unm.Unmarshal(buf, resp); err != nil {
- return err
- }
-
- return nil
- }, opts...)
- if e != nil {
- return nil, e
- }
- return resp, nil
-}
-
-// CreateInstancePartition creates an instance partition and begins preparing it to be used. The
-// returned [long-running operation][google.longrunning.Operation]
-// can be used to track the progress of preparing the new instance partition.
-// The instance partition name is assigned by the caller. If the named
-// instance partition already exists, CreateInstancePartition returns
-// ALREADY_EXISTS.
-//
-// Immediately upon completion of this request:
-//
-// The instance partition is readable via the API, with all requested
-// attributes but no allocated resources. Its state is CREATING.
-//
-// Until completion of the returned operation:
-//
-// Cancelling the operation renders the instance partition immediately
-// unreadable via the API.
-//
-// The instance partition can be deleted.
-//
-// All other attempts to modify the instance partition are rejected.
-//
-// Upon completion of the returned operation:
-//
-// Billing for all successfully-allocated resources begins (some types
-// may have lower than the requested levels).
-//
-// Databases can start using this instance partition.
-//
-// The instance partition’s allocated resource levels are readable via the
-// API.
-//
-// The instance partition’s state becomes READY.
-//
-// The returned [long-running operation][google.longrunning.Operation] will
-// have a name of the format
-// <instance_partition_name>/operations/<operation_id> and can be used to
-// track creation of the instance partition. The
-// metadata field type is
-// CreateInstancePartitionMetadata.
-// The response field type is
-// InstancePartition, if
-// successful.
-func (c *instanceAdminRESTClient) CreateInstancePartition(ctx context.Context, req *instancepb.CreateInstancePartitionRequest, opts ...gax.CallOption) (*CreateInstancePartitionOperation, error) {
- m := protojson.MarshalOptions{AllowPartial: true, UseEnumNumbers: true}
- jsonReq, err := m.Marshal(req)
- if err != nil {
- return nil, err
- }
-
- baseUrl, err := url.Parse(c.endpoint)
- if err != nil {
- return nil, err
- }
- baseUrl.Path += fmt.Sprintf("/v1/%v/instancePartitions", req.GetParent())
-
- params := url.Values{}
- params.Add("$alt", "json;enum-encoding=int")
-
- baseUrl.RawQuery = params.Encode()
-
- // Build HTTP headers from client and context metadata.
- hds := []string{"x-goog-request-params", fmt.Sprintf("%s=%v", "parent", url.QueryEscape(req.GetParent()))}
-
- hds = append(c.xGoogHeaders, hds...)
- hds = append(hds, "Content-Type", "application/json")
- headers := gax.BuildHeaders(ctx, hds...)
- unm := protojson.UnmarshalOptions{AllowPartial: true, DiscardUnknown: true}
- resp := &longrunningpb.Operation{}
- e := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
- if settings.Path != "" {
- baseUrl.Path = settings.Path
- }
- httpReq, err := http.NewRequest("POST", baseUrl.String(), bytes.NewReader(jsonReq))
- if err != nil {
- return err
- }
- httpReq = httpReq.WithContext(ctx)
- httpReq.Header = headers
-
- httpRsp, err := c.httpClient.Do(httpReq)
- if err != nil {
- return err
- }
- defer httpRsp.Body.Close()
-
- if err = googleapi.CheckResponse(httpRsp); err != nil {
- return err
- }
-
- buf, err := io.ReadAll(httpRsp.Body)
- if err != nil {
- return err
- }
-
- if err := unm.Unmarshal(buf, resp); err != nil {
- return err
- }
-
- return nil
- }, opts...)
- if e != nil {
- return nil, e
- }
-
- override := fmt.Sprintf("/v1/%s", resp.GetName())
- return &CreateInstancePartitionOperation{
- lro: longrunning.InternalNewOperation(*c.LROClient, resp),
- pollPath: override,
- }, nil
-}
-
-// DeleteInstancePartition deletes an existing instance partition. Requires that the
-// instance partition is not used by any database or backup and is not the
-// default instance partition of an instance.
-//
-// Authorization requires spanner.instancePartitions.delete permission on
-// the resource
-// [name][google.spanner.admin.instance.v1.InstancePartition.name (at http://google.spanner.admin.instance.v1.InstancePartition.name)].
-func (c *instanceAdminRESTClient) DeleteInstancePartition(ctx context.Context, req *instancepb.DeleteInstancePartitionRequest, opts ...gax.CallOption) error {
- baseUrl, err := url.Parse(c.endpoint)
- if err != nil {
- return err
- }
- baseUrl.Path += fmt.Sprintf("/v1/%v", req.GetName())
-
- params := url.Values{}
- params.Add("$alt", "json;enum-encoding=int")
- if req.GetEtag() != "" {
- params.Add("etag", fmt.Sprintf("%v", req.GetEtag()))
- }
-
- baseUrl.RawQuery = params.Encode()
-
- // Build HTTP headers from client and context metadata.
- hds := []string{"x-goog-request-params", fmt.Sprintf("%s=%v", "name", url.QueryEscape(req.GetName()))}
-
- hds = append(c.xGoogHeaders, hds...)
- hds = append(hds, "Content-Type", "application/json")
- headers := gax.BuildHeaders(ctx, hds...)
- return gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
- if settings.Path != "" {
- baseUrl.Path = settings.Path
- }
- httpReq, err := http.NewRequest("DELETE", baseUrl.String(), nil)
- if err != nil {
- return err
- }
- httpReq = httpReq.WithContext(ctx)
- httpReq.Header = headers
-
- httpRsp, err := c.httpClient.Do(httpReq)
- if err != nil {
- return err
- }
- defer httpRsp.Body.Close()
-
- // Returns nil if there is no error, otherwise wraps
- // the response code and body into a non-nil error
- return googleapi.CheckResponse(httpRsp)
- }, opts...)
-}
-
-// UpdateInstancePartition updates an instance partition, and begins allocating or releasing resources
-// as requested. The returned [long-running
-// operation][google.longrunning.Operation] can be used to track the
-// progress of updating the instance partition. If the named instance
-// partition does not exist, returns NOT_FOUND.
-//
-// Immediately upon completion of this request:
-//
-// For resource types for which a decrease in the instance partition’s
-// allocation has been requested, billing is based on the newly-requested
-// level.
-//
-// Until completion of the returned operation:
-//
-// Cancelling the operation sets its metadata’s
-// cancel_time,
-// and begins restoring resources to their pre-request values. The
-// operation is guaranteed to succeed at undoing all resource changes,
-// after which point it terminates with a CANCELLED status.
-//
-// All other attempts to modify the instance partition are rejected.
-//
-// Reading the instance partition via the API continues to give the
-// pre-request resource levels.
-//
-// Upon completion of the returned operation:
-//
-// Billing begins for all successfully-allocated resources (some types
-// may have lower than the requested levels).
-//
-// All newly-reserved resources are available for serving the instance
-// partition’s tables.
-//
-// The instance partition’s new resource levels are readable via the API.
-//
-// The returned [long-running operation][google.longrunning.Operation] will
-// have a name of the format
-// <instance_partition_name>/operations/<operation_id> and can be used to
-// track the instance partition modification. The
-// metadata field type is
-// UpdateInstancePartitionMetadata.
-// The response field type is
-// InstancePartition, if
-// successful.
-//
-// Authorization requires spanner.instancePartitions.update permission on
-// the resource
-// [name][google.spanner.admin.instance.v1.InstancePartition.name (at http://google.spanner.admin.instance.v1.InstancePartition.name)].
-func (c *instanceAdminRESTClient) UpdateInstancePartition(ctx context.Context, req *instancepb.UpdateInstancePartitionRequest, opts ...gax.CallOption) (*UpdateInstancePartitionOperation, error) {
- m := protojson.MarshalOptions{AllowPartial: true, UseEnumNumbers: true}
- jsonReq, err := m.Marshal(req)
- if err != nil {
- return nil, err
- }
-
- baseUrl, err := url.Parse(c.endpoint)
- if err != nil {
- return nil, err
- }
- baseUrl.Path += fmt.Sprintf("/v1/%v", req.GetInstancePartition().GetName())
-
- params := url.Values{}
- params.Add("$alt", "json;enum-encoding=int")
-
- baseUrl.RawQuery = params.Encode()
-
- // Build HTTP headers from client and context metadata.
- hds := []string{"x-goog-request-params", fmt.Sprintf("%s=%v", "instance_partition.name", url.QueryEscape(req.GetInstancePartition().GetName()))}
-
- hds = append(c.xGoogHeaders, hds...)
- hds = append(hds, "Content-Type", "application/json")
- headers := gax.BuildHeaders(ctx, hds...)
- unm := protojson.UnmarshalOptions{AllowPartial: true, DiscardUnknown: true}
- resp := &longrunningpb.Operation{}
- e := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
- if settings.Path != "" {
- baseUrl.Path = settings.Path
- }
- httpReq, err := http.NewRequest("PATCH", baseUrl.String(), bytes.NewReader(jsonReq))
- if err != nil {
- return err
- }
- httpReq = httpReq.WithContext(ctx)
- httpReq.Header = headers
-
- httpRsp, err := c.httpClient.Do(httpReq)
- if err != nil {
- return err
- }
- defer httpRsp.Body.Close()
-
- if err = googleapi.CheckResponse(httpRsp); err != nil {
- return err
- }
-
- buf, err := io.ReadAll(httpRsp.Body)
- if err != nil {
- return err
- }
-
- if err := unm.Unmarshal(buf, resp); err != nil {
- return err
- }
-
- return nil
- }, opts...)
- if e != nil {
- return nil, e
- }
-
- override := fmt.Sprintf("/v1/%s", resp.GetName())
- return &UpdateInstancePartitionOperation{
- lro: longrunning.InternalNewOperation(*c.LROClient, resp),
- pollPath: override,
- }, nil
-}
-
-// ListInstancePartitionOperations lists instance partition [long-running
-// operations][google.longrunning.Operation] in the given instance.
-// An instance partition operation has a name of the form
-// projects/<project>/instances/<instance>/instancePartitions/<instance_partition>/operations/<operation>.
-// The long-running operation
-// metadata field type
-// metadata.type_url describes the type of the metadata. Operations returned
-// include those that have completed/failed/canceled within the last 7 days,
-// and pending operations. Operations returned are ordered by
-// operation.metadata.value.start_time in descending order starting from the
-// most recently started operation.
-//
-// Authorization requires spanner.instancePartitionOperations.list
-// permission on the resource
-// parent.
-func (c *instanceAdminRESTClient) ListInstancePartitionOperations(ctx context.Context, req *instancepb.ListInstancePartitionOperationsRequest, opts ...gax.CallOption) *OperationIterator {
- it := &OperationIterator{}
- req = proto.Clone(req).(*instancepb.ListInstancePartitionOperationsRequest)
- unm := protojson.UnmarshalOptions{AllowPartial: true, DiscardUnknown: true}
- it.InternalFetch = func(pageSize int, pageToken string) ([]*longrunningpb.Operation, string, error) {
- resp := &instancepb.ListInstancePartitionOperationsResponse{}
- if pageToken != "" {
- req.PageToken = pageToken
- }
- if pageSize > math.MaxInt32 {
- req.PageSize = math.MaxInt32
- } else if pageSize != 0 {
- req.PageSize = int32(pageSize)
- }
- baseUrl, err := url.Parse(c.endpoint)
- if err != nil {
- return nil, "", err
- }
- baseUrl.Path += fmt.Sprintf("/v1/%v/instancePartitionOperations", req.GetParent())
-
- params := url.Values{}
- params.Add("$alt", "json;enum-encoding=int")
- if req.GetFilter() != "" {
- params.Add("filter", fmt.Sprintf("%v", req.GetFilter()))
- }
- if req.GetInstancePartitionDeadline() != nil {
- instancePartitionDeadline, err := protojson.Marshal(req.GetInstancePartitionDeadline())
- if err != nil {
- return nil, "", err
- }
- params.Add("instancePartitionDeadline", string(instancePartitionDeadline[1:len(instancePartitionDeadline)-1]))
- }
- if req.GetPageSize() != 0 {
- params.Add("pageSize", fmt.Sprintf("%v", req.GetPageSize()))
- }
- if req.GetPageToken() != "" {
- params.Add("pageToken", fmt.Sprintf("%v", req.GetPageToken()))
- }
-
- baseUrl.RawQuery = params.Encode()
-
- // Build HTTP headers from client and context metadata.
- hds := append(c.xGoogHeaders, "Content-Type", "application/json")
- headers := gax.BuildHeaders(ctx, hds...)
- e := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
- if settings.Path != "" {
- baseUrl.Path = settings.Path
- }
- httpReq, err := http.NewRequest("GET", baseUrl.String(), nil)
- if err != nil {
- return err
- }
- httpReq.Header = headers
-
- httpRsp, err := c.httpClient.Do(httpReq)
- if err != nil {
- return err
- }
- defer httpRsp.Body.Close()
-
- if err = googleapi.CheckResponse(httpRsp); err != nil {
- return err
- }
-
- buf, err := io.ReadAll(httpRsp.Body)
- if err != nil {
- return err
- }
-
- if err := unm.Unmarshal(buf, resp); err != nil {
- return err
- }
-
- return nil
- }, opts...)
- if e != nil {
- return nil, "", e
- }
- it.Response = resp
- return resp.GetOperations(), resp.GetNextPageToken(), nil
- }
-
- fetch := func(pageSize int, pageToken string) (string, error) {
- items, nextPageToken, err := it.InternalFetch(pageSize, pageToken)
- if err != nil {
- return "", err
- }
- it.items = append(it.items, items...)
- return nextPageToken, nil
- }
-
- it.pageInfo, it.nextFunc = iterator.NewPageInfo(fetch, it.bufLen, it.takeBuf)
- it.pageInfo.MaxSize = int(req.GetPageSize())
- it.pageInfo.Token = req.GetPageToken()
-
- return it
-}
-
-// MoveInstance moves an instance to the target instance configuration. You can use the
-// returned [long-running operation][google.longrunning.Operation] to track
-// the progress of moving the instance.
-//
-// MoveInstance returns FAILED_PRECONDITION if the instance meets any of
-// the following criteria:
-//
-// Is undergoing a move to a different instance configuration
-//
-// Has backups
-//
-// Has an ongoing update
-//
-// Contains any CMEK-enabled databases
-//
-// Is a free trial instance
-//
-// While the operation is pending:
-//
-// All other attempts to modify the instance, including changes to its
-// compute capacity, are rejected.
-//
-// The following database and backup admin operations are rejected:
-//
-// DatabaseAdmin.CreateDatabase
-//
-// DatabaseAdmin.UpdateDatabaseDdl (disabled if default_leader is
-// specified in the request.)
-//
-// DatabaseAdmin.RestoreDatabase
-//
-// DatabaseAdmin.CreateBackup
-//
-// DatabaseAdmin.CopyBackup
-//
-// Both the source and target instance configurations are subject to
-// hourly compute and storage charges.
-//
-// The instance might experience higher read-write latencies and a higher
-// transaction abort rate. However, moving an instance doesn’t cause any
-// downtime.
-//
-// The returned [long-running operation][google.longrunning.Operation] has
-// a name of the format
-// <instance_name>/operations/<operation_id> and can be used to track
-// the move instance operation. The
-// metadata field type is
-// MoveInstanceMetadata.
-// The response field type is
-// Instance,
-// if successful.
-// Cancelling the operation sets its metadata’s
-// cancel_time.
-// Cancellation is not immediate because it involves moving any data
-// previously moved to the target instance configuration back to the original
-// instance configuration. You can use this operation to track the progress of
-// the cancellation. Upon successful completion of the cancellation, the
-// operation terminates with CANCELLED status.
-//
-// If not cancelled, upon completion of the returned operation:
-//
-// The instance successfully moves to the target instance
-// configuration.
-//
-// You are billed for compute and storage in target instance
-// configuration.
-//
-// Authorization requires the spanner.instances.update permission on
-// the resource instance.
-//
-// For more details, see
-// Move an instance (at https://cloud.google.com/spanner/docs/move-instance).
-func (c *instanceAdminRESTClient) MoveInstance(ctx context.Context, req *instancepb.MoveInstanceRequest, opts ...gax.CallOption) (*MoveInstanceOperation, error) {
- m := protojson.MarshalOptions{AllowPartial: true, UseEnumNumbers: true}
- jsonReq, err := m.Marshal(req)
- if err != nil {
- return nil, err
- }
-
- baseUrl, err := url.Parse(c.endpoint)
- if err != nil {
- return nil, err
- }
- baseUrl.Path += fmt.Sprintf("/v1/%v:move", req.GetName())
-
- params := url.Values{}
- params.Add("$alt", "json;enum-encoding=int")
-
- baseUrl.RawQuery = params.Encode()
-
- // Build HTTP headers from client and context metadata.
- hds := []string{"x-goog-request-params", fmt.Sprintf("%s=%v", "name", url.QueryEscape(req.GetName()))}
-
- hds = append(c.xGoogHeaders, hds...)
- hds = append(hds, "Content-Type", "application/json")
- headers := gax.BuildHeaders(ctx, hds...)
- unm := protojson.UnmarshalOptions{AllowPartial: true, DiscardUnknown: true}
- resp := &longrunningpb.Operation{}
- e := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
- if settings.Path != "" {
- baseUrl.Path = settings.Path
- }
- httpReq, err := http.NewRequest("POST", baseUrl.String(), bytes.NewReader(jsonReq))
- if err != nil {
- return err
- }
- httpReq = httpReq.WithContext(ctx)
- httpReq.Header = headers
-
- httpRsp, err := c.httpClient.Do(httpReq)
- if err != nil {
- return err
- }
- defer httpRsp.Body.Close()
-
- if err = googleapi.CheckResponse(httpRsp); err != nil {
- return err
- }
-
- buf, err := io.ReadAll(httpRsp.Body)
- if err != nil {
- return err
- }
-
- if err := unm.Unmarshal(buf, resp); err != nil {
- return err
- }
-
- return nil
- }, opts...)
- if e != nil {
- return nil, e
- }
-
- override := fmt.Sprintf("/v1/%s", resp.GetName())
- return &MoveInstanceOperation{
- lro: longrunning.InternalNewOperation(*c.LROClient, resp),
- pollPath: override,
- }, nil
-}
-
-// CreateInstanceOperation returns a new CreateInstanceOperation from a given name.
-// The name must be that of a previously created CreateInstanceOperation, possibly from a different process.
-func (c *instanceAdminGRPCClient) CreateInstanceOperation(name string) *CreateInstanceOperation {
- return &CreateInstanceOperation{
- lro: longrunning.InternalNewOperation(*c.LROClient, &longrunningpb.Operation{Name: name}),
- }
-}
-
-// CreateInstanceOperation returns a new CreateInstanceOperation from a given name.
-// The name must be that of a previously created CreateInstanceOperation, possibly from a different process.
-func (c *instanceAdminRESTClient) CreateInstanceOperation(name string) *CreateInstanceOperation {
- override := fmt.Sprintf("/v1/%s", name)
- return &CreateInstanceOperation{
- lro: longrunning.InternalNewOperation(*c.LROClient, &longrunningpb.Operation{Name: name}),
- pollPath: override,
- }
-}
-
-// CreateInstanceConfigOperation returns a new CreateInstanceConfigOperation from a given name.
-// The name must be that of a previously created CreateInstanceConfigOperation, possibly from a different process.
-func (c *instanceAdminGRPCClient) CreateInstanceConfigOperation(name string) *CreateInstanceConfigOperation {
- return &CreateInstanceConfigOperation{
- lro: longrunning.InternalNewOperation(*c.LROClient, &longrunningpb.Operation{Name: name}),
- }
-}
-
-// CreateInstanceConfigOperation returns a new CreateInstanceConfigOperation from a given name.
-// The name must be that of a previously created CreateInstanceConfigOperation, possibly from a different process.
-func (c *instanceAdminRESTClient) CreateInstanceConfigOperation(name string) *CreateInstanceConfigOperation {
- override := fmt.Sprintf("/v1/%s", name)
- return &CreateInstanceConfigOperation{
- lro: longrunning.InternalNewOperation(*c.LROClient, &longrunningpb.Operation{Name: name}),
- pollPath: override,
- }
-}
-
-// CreateInstancePartitionOperation returns a new CreateInstancePartitionOperation from a given name.
-// The name must be that of a previously created CreateInstancePartitionOperation, possibly from a different process.
-func (c *instanceAdminGRPCClient) CreateInstancePartitionOperation(name string) *CreateInstancePartitionOperation {
- return &CreateInstancePartitionOperation{
- lro: longrunning.InternalNewOperation(*c.LROClient, &longrunningpb.Operation{Name: name}),
- }
-}
-
-// CreateInstancePartitionOperation returns a new CreateInstancePartitionOperation from a given name.
-// The name must be that of a previously created CreateInstancePartitionOperation, possibly from a different process.
-func (c *instanceAdminRESTClient) CreateInstancePartitionOperation(name string) *CreateInstancePartitionOperation {
- override := fmt.Sprintf("/v1/%s", name)
- return &CreateInstancePartitionOperation{
- lro: longrunning.InternalNewOperation(*c.LROClient, &longrunningpb.Operation{Name: name}),
- pollPath: override,
- }
-}
-
-// MoveInstanceOperation returns a new MoveInstanceOperation from a given name.
-// The name must be that of a previously created MoveInstanceOperation, possibly from a different process.
-func (c *instanceAdminGRPCClient) MoveInstanceOperation(name string) *MoveInstanceOperation {
- return &MoveInstanceOperation{
- lro: longrunning.InternalNewOperation(*c.LROClient, &longrunningpb.Operation{Name: name}),
- }
-}
-
-// MoveInstanceOperation returns a new MoveInstanceOperation from a given name.
-// The name must be that of a previously created MoveInstanceOperation, possibly from a different process.
-func (c *instanceAdminRESTClient) MoveInstanceOperation(name string) *MoveInstanceOperation {
- override := fmt.Sprintf("/v1/%s", name)
- return &MoveInstanceOperation{
- lro: longrunning.InternalNewOperation(*c.LROClient, &longrunningpb.Operation{Name: name}),
- pollPath: override,
- }
-}
-
-// UpdateInstanceOperation returns a new UpdateInstanceOperation from a given name.
-// The name must be that of a previously created UpdateInstanceOperation, possibly from a different process.
-func (c *instanceAdminGRPCClient) UpdateInstanceOperation(name string) *UpdateInstanceOperation {
- return &UpdateInstanceOperation{
- lro: longrunning.InternalNewOperation(*c.LROClient, &longrunningpb.Operation{Name: name}),
- }
-}
-
-// UpdateInstanceOperation returns a new UpdateInstanceOperation from a given name.
-// The name must be that of a previously created UpdateInstanceOperation, possibly from a different process.
-func (c *instanceAdminRESTClient) UpdateInstanceOperation(name string) *UpdateInstanceOperation {
- override := fmt.Sprintf("/v1/%s", name)
- return &UpdateInstanceOperation{
- lro: longrunning.InternalNewOperation(*c.LROClient, &longrunningpb.Operation{Name: name}),
- pollPath: override,
- }
-}
-
-// UpdateInstanceConfigOperation returns a new UpdateInstanceConfigOperation from a given name.
-// The name must be that of a previously created UpdateInstanceConfigOperation, possibly from a different process.
-func (c *instanceAdminGRPCClient) UpdateInstanceConfigOperation(name string) *UpdateInstanceConfigOperation {
- return &UpdateInstanceConfigOperation{
- lro: longrunning.InternalNewOperation(*c.LROClient, &longrunningpb.Operation{Name: name}),
- }
-}
-
-// UpdateInstanceConfigOperation returns a new UpdateInstanceConfigOperation from a given name.
-// The name must be that of a previously created UpdateInstanceConfigOperation, possibly from a different process.
-func (c *instanceAdminRESTClient) UpdateInstanceConfigOperation(name string) *UpdateInstanceConfigOperation {
- override := fmt.Sprintf("/v1/%s", name)
- return &UpdateInstanceConfigOperation{
- lro: longrunning.InternalNewOperation(*c.LROClient, &longrunningpb.Operation{Name: name}),
- pollPath: override,
- }
-}
-
-// UpdateInstancePartitionOperation returns a new UpdateInstancePartitionOperation from a given name.
-// The name must be that of a previously created UpdateInstancePartitionOperation, possibly from a different process.
-func (c *instanceAdminGRPCClient) UpdateInstancePartitionOperation(name string) *UpdateInstancePartitionOperation {
- return &UpdateInstancePartitionOperation{
- lro: longrunning.InternalNewOperation(*c.LROClient, &longrunningpb.Operation{Name: name}),
- }
-}
-
-// UpdateInstancePartitionOperation returns a new UpdateInstancePartitionOperation from a given name.
-// The name must be that of a previously created UpdateInstancePartitionOperation, possibly from a different process.
-func (c *instanceAdminRESTClient) UpdateInstancePartitionOperation(name string) *UpdateInstancePartitionOperation {
- override := fmt.Sprintf("/v1/%s", name)
- return &UpdateInstancePartitionOperation{
- lro: longrunning.InternalNewOperation(*c.LROClient, &longrunningpb.Operation{Name: name}),
- pollPath: override,
- }
-}
diff --git a/vendor/cloud.google.com/go/spanner/admin/instance/apiv1/instancepb/common.pb.go b/vendor/cloud.google.com/go/spanner/admin/instance/apiv1/instancepb/common.pb.go
deleted file mode 100644
index 0e529b45c..000000000
--- a/vendor/cloud.google.com/go/spanner/admin/instance/apiv1/instancepb/common.pb.go
+++ /dev/null
@@ -1,279 +0,0 @@
-// Copyright 2024 Google LLC
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-// Code generated by protoc-gen-go. DO NOT EDIT.
-// versions:
-// protoc-gen-go v1.34.2
-// protoc v4.25.3
-// source: google/spanner/admin/instance/v1/common.proto
-
-package instancepb
-
-import (
- reflect "reflect"
- sync "sync"
-
- protoreflect "google.golang.org/protobuf/reflect/protoreflect"
- protoimpl "google.golang.org/protobuf/runtime/protoimpl"
- timestamppb "google.golang.org/protobuf/types/known/timestamppb"
-)
-
-const (
- // Verify that this generated code is sufficiently up-to-date.
- _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion)
- // Verify that runtime/protoimpl is sufficiently up-to-date.
- _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20)
-)
-
-// Indicates the expected fulfillment period of an operation.
-type FulfillmentPeriod int32
-
-const (
- // Not specified.
- FulfillmentPeriod_FULFILLMENT_PERIOD_UNSPECIFIED FulfillmentPeriod = 0
- // Normal fulfillment period. The operation is expected to complete within
- // minutes.
- FulfillmentPeriod_FULFILLMENT_PERIOD_NORMAL FulfillmentPeriod = 1
- // Extended fulfillment period. It can take up to an hour for the operation
- // to complete.
- FulfillmentPeriod_FULFILLMENT_PERIOD_EXTENDED FulfillmentPeriod = 2
-)
-
-// Enum value maps for FulfillmentPeriod.
-var (
- FulfillmentPeriod_name = map[int32]string{
- 0: "FULFILLMENT_PERIOD_UNSPECIFIED",
- 1: "FULFILLMENT_PERIOD_NORMAL",
- 2: "FULFILLMENT_PERIOD_EXTENDED",
- }
- FulfillmentPeriod_value = map[string]int32{
- "FULFILLMENT_PERIOD_UNSPECIFIED": 0,
- "FULFILLMENT_PERIOD_NORMAL": 1,
- "FULFILLMENT_PERIOD_EXTENDED": 2,
- }
-)
-
-func (x FulfillmentPeriod) Enum() *FulfillmentPeriod {
- p := new(FulfillmentPeriod)
- *p = x
- return p
-}
-
-func (x FulfillmentPeriod) String() string {
- return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x))
-}
-
-func (FulfillmentPeriod) Descriptor() protoreflect.EnumDescriptor {
- return file_google_spanner_admin_instance_v1_common_proto_enumTypes[0].Descriptor()
-}
-
-func (FulfillmentPeriod) Type() protoreflect.EnumType {
- return &file_google_spanner_admin_instance_v1_common_proto_enumTypes[0]
-}
-
-func (x FulfillmentPeriod) Number() protoreflect.EnumNumber {
- return protoreflect.EnumNumber(x)
-}
-
-// Deprecated: Use FulfillmentPeriod.Descriptor instead.
-func (FulfillmentPeriod) EnumDescriptor() ([]byte, []int) {
- return file_google_spanner_admin_instance_v1_common_proto_rawDescGZIP(), []int{0}
-}
-
-// Encapsulates progress related information for a Cloud Spanner long
-// running instance operations.
-type OperationProgress struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
- // Percent completion of the operation.
- // Values are between 0 and 100 inclusive.
- ProgressPercent int32 `protobuf:"varint,1,opt,name=progress_percent,json=progressPercent,proto3" json:"progress_percent,omitempty"`
- // Time the request was received.
- StartTime *timestamppb.Timestamp `protobuf:"bytes,2,opt,name=start_time,json=startTime,proto3" json:"start_time,omitempty"`
- // If set, the time at which this operation failed or was completed
- // successfully.
- EndTime *timestamppb.Timestamp `protobuf:"bytes,3,opt,name=end_time,json=endTime,proto3" json:"end_time,omitempty"`
-}
-
-func (x *OperationProgress) Reset() {
- *x = OperationProgress{}
- if protoimpl.UnsafeEnabled {
- mi := &file_google_spanner_admin_instance_v1_common_proto_msgTypes[0]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
-}
-
-func (x *OperationProgress) String() string {
- return protoimpl.X.MessageStringOf(x)
-}
-
-func (*OperationProgress) ProtoMessage() {}
-
-func (x *OperationProgress) ProtoReflect() protoreflect.Message {
- mi := &file_google_spanner_admin_instance_v1_common_proto_msgTypes[0]
- if protoimpl.UnsafeEnabled && x != nil {
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- if ms.LoadMessageInfo() == nil {
- ms.StoreMessageInfo(mi)
- }
- return ms
- }
- return mi.MessageOf(x)
-}
-
-// Deprecated: Use OperationProgress.ProtoReflect.Descriptor instead.
-func (*OperationProgress) Descriptor() ([]byte, []int) {
- return file_google_spanner_admin_instance_v1_common_proto_rawDescGZIP(), []int{0}
-}
-
-func (x *OperationProgress) GetProgressPercent() int32 {
- if x != nil {
- return x.ProgressPercent
- }
- return 0
-}
-
-func (x *OperationProgress) GetStartTime() *timestamppb.Timestamp {
- if x != nil {
- return x.StartTime
- }
- return nil
-}
-
-func (x *OperationProgress) GetEndTime() *timestamppb.Timestamp {
- if x != nil {
- return x.EndTime
- }
- return nil
-}
-
-var File_google_spanner_admin_instance_v1_common_proto protoreflect.FileDescriptor
-
-var file_google_spanner_admin_instance_v1_common_proto_rawDesc = []byte{
- 0x0a, 0x2d, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x73, 0x70, 0x61, 0x6e, 0x6e, 0x65, 0x72,
- 0x2f, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2f, 0x69, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x2f,
- 0x76, 0x31, 0x2f, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12,
- 0x20, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x70, 0x61, 0x6e, 0x6e, 0x65, 0x72, 0x2e,
- 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x69, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x2e, 0x76,
- 0x31, 0x1a, 0x1f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62,
- 0x75, 0x66, 0x2f, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x2e, 0x70, 0x72, 0x6f,
- 0x74, 0x6f, 0x22, 0xb0, 0x01, 0x0a, 0x11, 0x4f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e,
- 0x50, 0x72, 0x6f, 0x67, 0x72, 0x65, 0x73, 0x73, 0x12, 0x29, 0x0a, 0x10, 0x70, 0x72, 0x6f, 0x67,
- 0x72, 0x65, 0x73, 0x73, 0x5f, 0x70, 0x65, 0x72, 0x63, 0x65, 0x6e, 0x74, 0x18, 0x01, 0x20, 0x01,
- 0x28, 0x05, 0x52, 0x0f, 0x70, 0x72, 0x6f, 0x67, 0x72, 0x65, 0x73, 0x73, 0x50, 0x65, 0x72, 0x63,
- 0x65, 0x6e, 0x74, 0x12, 0x39, 0x0a, 0x0a, 0x73, 0x74, 0x61, 0x72, 0x74, 0x5f, 0x74, 0x69, 0x6d,
- 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65,
- 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74,
- 0x61, 0x6d, 0x70, 0x52, 0x09, 0x73, 0x74, 0x61, 0x72, 0x74, 0x54, 0x69, 0x6d, 0x65, 0x12, 0x35,
- 0x0a, 0x08, 0x65, 0x6e, 0x64, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b,
- 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62,
- 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x07, 0x65, 0x6e,
- 0x64, 0x54, 0x69, 0x6d, 0x65, 0x2a, 0x77, 0x0a, 0x11, 0x46, 0x75, 0x6c, 0x66, 0x69, 0x6c, 0x6c,
- 0x6d, 0x65, 0x6e, 0x74, 0x50, 0x65, 0x72, 0x69, 0x6f, 0x64, 0x12, 0x22, 0x0a, 0x1e, 0x46, 0x55,
- 0x4c, 0x46, 0x49, 0x4c, 0x4c, 0x4d, 0x45, 0x4e, 0x54, 0x5f, 0x50, 0x45, 0x52, 0x49, 0x4f, 0x44,
- 0x5f, 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, 0x49, 0x45, 0x44, 0x10, 0x00, 0x12, 0x1d,
- 0x0a, 0x19, 0x46, 0x55, 0x4c, 0x46, 0x49, 0x4c, 0x4c, 0x4d, 0x45, 0x4e, 0x54, 0x5f, 0x50, 0x45,
- 0x52, 0x49, 0x4f, 0x44, 0x5f, 0x4e, 0x4f, 0x52, 0x4d, 0x41, 0x4c, 0x10, 0x01, 0x12, 0x1f, 0x0a,
- 0x1b, 0x46, 0x55, 0x4c, 0x46, 0x49, 0x4c, 0x4c, 0x4d, 0x45, 0x4e, 0x54, 0x5f, 0x50, 0x45, 0x52,
- 0x49, 0x4f, 0x44, 0x5f, 0x45, 0x58, 0x54, 0x45, 0x4e, 0x44, 0x45, 0x44, 0x10, 0x02, 0x42, 0xfd,
- 0x01, 0x0a, 0x24, 0x63, 0x6f, 0x6d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x70,
- 0x61, 0x6e, 0x6e, 0x65, 0x72, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x69, 0x6e, 0x73, 0x74,
- 0x61, 0x6e, 0x63, 0x65, 0x2e, 0x76, 0x31, 0x42, 0x0b, 0x43, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x50,
- 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x46, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x67, 0x6f,
- 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x67, 0x6f, 0x2f, 0x73, 0x70, 0x61, 0x6e,
- 0x6e, 0x65, 0x72, 0x2f, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2f, 0x69, 0x6e, 0x73, 0x74, 0x61, 0x6e,
- 0x63, 0x65, 0x2f, 0x61, 0x70, 0x69, 0x76, 0x31, 0x2f, 0x69, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63,
- 0x65, 0x70, 0x62, 0x3b, 0x69, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x70, 0x62, 0xaa, 0x02,
- 0x26, 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x43, 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x53, 0x70,
- 0x61, 0x6e, 0x6e, 0x65, 0x72, 0x2e, 0x41, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x49, 0x6e, 0x73, 0x74,
- 0x61, 0x6e, 0x63, 0x65, 0x2e, 0x56, 0x31, 0xca, 0x02, 0x26, 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65,
- 0x5c, 0x43, 0x6c, 0x6f, 0x75, 0x64, 0x5c, 0x53, 0x70, 0x61, 0x6e, 0x6e, 0x65, 0x72, 0x5c, 0x41,
- 0x64, 0x6d, 0x69, 0x6e, 0x5c, 0x49, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x5c, 0x56, 0x31,
- 0xea, 0x02, 0x2b, 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x3a, 0x3a, 0x43, 0x6c, 0x6f, 0x75, 0x64,
- 0x3a, 0x3a, 0x53, 0x70, 0x61, 0x6e, 0x6e, 0x65, 0x72, 0x3a, 0x3a, 0x41, 0x64, 0x6d, 0x69, 0x6e,
- 0x3a, 0x3a, 0x49, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x3a, 0x3a, 0x56, 0x31, 0x62, 0x06,
- 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33,
-}
-
-var (
- file_google_spanner_admin_instance_v1_common_proto_rawDescOnce sync.Once
- file_google_spanner_admin_instance_v1_common_proto_rawDescData = file_google_spanner_admin_instance_v1_common_proto_rawDesc
-)
-
-func file_google_spanner_admin_instance_v1_common_proto_rawDescGZIP() []byte {
- file_google_spanner_admin_instance_v1_common_proto_rawDescOnce.Do(func() {
- file_google_spanner_admin_instance_v1_common_proto_rawDescData = protoimpl.X.CompressGZIP(file_google_spanner_admin_instance_v1_common_proto_rawDescData)
- })
- return file_google_spanner_admin_instance_v1_common_proto_rawDescData
-}
-
-var file_google_spanner_admin_instance_v1_common_proto_enumTypes = make([]protoimpl.EnumInfo, 1)
-var file_google_spanner_admin_instance_v1_common_proto_msgTypes = make([]protoimpl.MessageInfo, 1)
-var file_google_spanner_admin_instance_v1_common_proto_goTypes = []any{
- (FulfillmentPeriod)(0), // 0: google.spanner.admin.instance.v1.FulfillmentPeriod
- (*OperationProgress)(nil), // 1: google.spanner.admin.instance.v1.OperationProgress
- (*timestamppb.Timestamp)(nil), // 2: google.protobuf.Timestamp
-}
-var file_google_spanner_admin_instance_v1_common_proto_depIdxs = []int32{
- 2, // 0: google.spanner.admin.instance.v1.OperationProgress.start_time:type_name -> google.protobuf.Timestamp
- 2, // 1: google.spanner.admin.instance.v1.OperationProgress.end_time:type_name -> google.protobuf.Timestamp
- 2, // [2:2] is the sub-list for method output_type
- 2, // [2:2] is the sub-list for method input_type
- 2, // [2:2] is the sub-list for extension type_name
- 2, // [2:2] is the sub-list for extension extendee
- 0, // [0:2] is the sub-list for field type_name
-}
-
-func init() { file_google_spanner_admin_instance_v1_common_proto_init() }
-func file_google_spanner_admin_instance_v1_common_proto_init() {
- if File_google_spanner_admin_instance_v1_common_proto != nil {
- return
- }
- if !protoimpl.UnsafeEnabled {
- file_google_spanner_admin_instance_v1_common_proto_msgTypes[0].Exporter = func(v any, i int) any {
- switch v := v.(*OperationProgress); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- }
- type x struct{}
- out := protoimpl.TypeBuilder{
- File: protoimpl.DescBuilder{
- GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
- RawDescriptor: file_google_spanner_admin_instance_v1_common_proto_rawDesc,
- NumEnums: 1,
- NumMessages: 1,
- NumExtensions: 0,
- NumServices: 0,
- },
- GoTypes: file_google_spanner_admin_instance_v1_common_proto_goTypes,
- DependencyIndexes: file_google_spanner_admin_instance_v1_common_proto_depIdxs,
- EnumInfos: file_google_spanner_admin_instance_v1_common_proto_enumTypes,
- MessageInfos: file_google_spanner_admin_instance_v1_common_proto_msgTypes,
- }.Build()
- File_google_spanner_admin_instance_v1_common_proto = out.File
- file_google_spanner_admin_instance_v1_common_proto_rawDesc = nil
- file_google_spanner_admin_instance_v1_common_proto_goTypes = nil
- file_google_spanner_admin_instance_v1_common_proto_depIdxs = nil
-}
diff --git a/vendor/cloud.google.com/go/spanner/admin/instance/apiv1/instancepb/spanner_instance_admin.pb.go b/vendor/cloud.google.com/go/spanner/admin/instance/apiv1/instancepb/spanner_instance_admin.pb.go
deleted file mode 100644
index 10b80281b..000000000
--- a/vendor/cloud.google.com/go/spanner/admin/instance/apiv1/instancepb/spanner_instance_admin.pb.go
+++ /dev/null
@@ -1,6928 +0,0 @@
-// Copyright 2024 Google LLC
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-// Code generated by protoc-gen-go. DO NOT EDIT.
-// versions:
-// protoc-gen-go v1.34.2
-// protoc v4.25.3
-// source: google/spanner/admin/instance/v1/spanner_instance_admin.proto
-
-package instancepb
-
-import (
- context "context"
- reflect "reflect"
- sync "sync"
-
- iampb "cloud.google.com/go/iam/apiv1/iampb"
- longrunningpb "cloud.google.com/go/longrunning/autogen/longrunningpb"
- _ "google.golang.org/genproto/googleapis/api/annotations"
- grpc "google.golang.org/grpc"
- codes "google.golang.org/grpc/codes"
- status "google.golang.org/grpc/status"
- protoreflect "google.golang.org/protobuf/reflect/protoreflect"
- protoimpl "google.golang.org/protobuf/runtime/protoimpl"
- emptypb "google.golang.org/protobuf/types/known/emptypb"
- fieldmaskpb "google.golang.org/protobuf/types/known/fieldmaskpb"
- timestamppb "google.golang.org/protobuf/types/known/timestamppb"
-)
-
-const (
- // Verify that this generated code is sufficiently up-to-date.
- _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion)
- // Verify that runtime/protoimpl is sufficiently up-to-date.
- _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20)
-)
-
-// Indicates the type of replica. See the [replica types
-// documentation](https://cloud.google.com/spanner/docs/replication#replica_types)
-// for more details.
-type ReplicaInfo_ReplicaType int32
-
-const (
- // Not specified.
- ReplicaInfo_TYPE_UNSPECIFIED ReplicaInfo_ReplicaType = 0
- // Read-write replicas support both reads and writes. These replicas:
- //
- // * Maintain a full copy of your data.
- // * Serve reads.
- // * Can vote whether to commit a write.
- // * Participate in leadership election.
- // * Are eligible to become a leader.
- ReplicaInfo_READ_WRITE ReplicaInfo_ReplicaType = 1
- // Read-only replicas only support reads (not writes). Read-only replicas:
- //
- // * Maintain a full copy of your data.
- // * Serve reads.
- // * Do not participate in voting to commit writes.
- // * Are not eligible to become a leader.
- ReplicaInfo_READ_ONLY ReplicaInfo_ReplicaType = 2
- // Witness replicas don't support reads but do participate in voting to
- // commit writes. Witness replicas:
- //
- // * Do not maintain a full copy of data.
- // * Do not serve reads.
- // * Vote whether to commit writes.
- // * Participate in leader election but are not eligible to become leader.
- ReplicaInfo_WITNESS ReplicaInfo_ReplicaType = 3
-)
-
-// Enum value maps for ReplicaInfo_ReplicaType.
-var (
- ReplicaInfo_ReplicaType_name = map[int32]string{
- 0: "TYPE_UNSPECIFIED",
- 1: "READ_WRITE",
- 2: "READ_ONLY",
- 3: "WITNESS",
- }
- ReplicaInfo_ReplicaType_value = map[string]int32{
- "TYPE_UNSPECIFIED": 0,
- "READ_WRITE": 1,
- "READ_ONLY": 2,
- "WITNESS": 3,
- }
-)
-
-func (x ReplicaInfo_ReplicaType) Enum() *ReplicaInfo_ReplicaType {
- p := new(ReplicaInfo_ReplicaType)
- *p = x
- return p
-}
-
-func (x ReplicaInfo_ReplicaType) String() string {
- return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x))
-}
-
-func (ReplicaInfo_ReplicaType) Descriptor() protoreflect.EnumDescriptor {
- return file_google_spanner_admin_instance_v1_spanner_instance_admin_proto_enumTypes[0].Descriptor()
-}
-
-func (ReplicaInfo_ReplicaType) Type() protoreflect.EnumType {
- return &file_google_spanner_admin_instance_v1_spanner_instance_admin_proto_enumTypes[0]
-}
-
-func (x ReplicaInfo_ReplicaType) Number() protoreflect.EnumNumber {
- return protoreflect.EnumNumber(x)
-}
-
-// Deprecated: Use ReplicaInfo_ReplicaType.Descriptor instead.
-func (ReplicaInfo_ReplicaType) EnumDescriptor() ([]byte, []int) {
- return file_google_spanner_admin_instance_v1_spanner_instance_admin_proto_rawDescGZIP(), []int{0, 0}
-}
-
-// The type of this configuration.
-type InstanceConfig_Type int32
-
-const (
- // Unspecified.
- InstanceConfig_TYPE_UNSPECIFIED InstanceConfig_Type = 0
- // Google managed configuration.
- InstanceConfig_GOOGLE_MANAGED InstanceConfig_Type = 1
- // User managed configuration.
- InstanceConfig_USER_MANAGED InstanceConfig_Type = 2
-)
-
-// Enum value maps for InstanceConfig_Type.
-var (
- InstanceConfig_Type_name = map[int32]string{
- 0: "TYPE_UNSPECIFIED",
- 1: "GOOGLE_MANAGED",
- 2: "USER_MANAGED",
- }
- InstanceConfig_Type_value = map[string]int32{
- "TYPE_UNSPECIFIED": 0,
- "GOOGLE_MANAGED": 1,
- "USER_MANAGED": 2,
- }
-)
-
-func (x InstanceConfig_Type) Enum() *InstanceConfig_Type {
- p := new(InstanceConfig_Type)
- *p = x
- return p
-}
-
-func (x InstanceConfig_Type) String() string {
- return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x))
-}
-
-func (InstanceConfig_Type) Descriptor() protoreflect.EnumDescriptor {
- return file_google_spanner_admin_instance_v1_spanner_instance_admin_proto_enumTypes[1].Descriptor()
-}
-
-func (InstanceConfig_Type) Type() protoreflect.EnumType {
- return &file_google_spanner_admin_instance_v1_spanner_instance_admin_proto_enumTypes[1]
-}
-
-func (x InstanceConfig_Type) Number() protoreflect.EnumNumber {
- return protoreflect.EnumNumber(x)
-}
-
-// Deprecated: Use InstanceConfig_Type.Descriptor instead.
-func (InstanceConfig_Type) EnumDescriptor() ([]byte, []int) {
- return file_google_spanner_admin_instance_v1_spanner_instance_admin_proto_rawDescGZIP(), []int{1, 0}
-}
-
-// Indicates the current state of the instance configuration.
-type InstanceConfig_State int32
-
-const (
- // Not specified.
- InstanceConfig_STATE_UNSPECIFIED InstanceConfig_State = 0
- // The instance configuration is still being created.
- InstanceConfig_CREATING InstanceConfig_State = 1
- // The instance configuration is fully created and ready to be used to
- // create instances.
- InstanceConfig_READY InstanceConfig_State = 2
-)
-
-// Enum value maps for InstanceConfig_State.
-var (
- InstanceConfig_State_name = map[int32]string{
- 0: "STATE_UNSPECIFIED",
- 1: "CREATING",
- 2: "READY",
- }
- InstanceConfig_State_value = map[string]int32{
- "STATE_UNSPECIFIED": 0,
- "CREATING": 1,
- "READY": 2,
- }
-)
-
-func (x InstanceConfig_State) Enum() *InstanceConfig_State {
- p := new(InstanceConfig_State)
- *p = x
- return p
-}
-
-func (x InstanceConfig_State) String() string {
- return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x))
-}
-
-func (InstanceConfig_State) Descriptor() protoreflect.EnumDescriptor {
- return file_google_spanner_admin_instance_v1_spanner_instance_admin_proto_enumTypes[2].Descriptor()
-}
-
-func (InstanceConfig_State) Type() protoreflect.EnumType {
- return &file_google_spanner_admin_instance_v1_spanner_instance_admin_proto_enumTypes[2]
-}
-
-func (x InstanceConfig_State) Number() protoreflect.EnumNumber {
- return protoreflect.EnumNumber(x)
-}
-
-// Deprecated: Use InstanceConfig_State.Descriptor instead.
-func (InstanceConfig_State) EnumDescriptor() ([]byte, []int) {
- return file_google_spanner_admin_instance_v1_spanner_instance_admin_proto_rawDescGZIP(), []int{1, 1}
-}
-
-// Indicates the current state of the instance.
-type Instance_State int32
-
-const (
- // Not specified.
- Instance_STATE_UNSPECIFIED Instance_State = 0
- // The instance is still being created. Resources may not be
- // available yet, and operations such as database creation may not
- // work.
- Instance_CREATING Instance_State = 1
- // The instance is fully created and ready to do work such as
- // creating databases.
- Instance_READY Instance_State = 2
-)
-
-// Enum value maps for Instance_State.
-var (
- Instance_State_name = map[int32]string{
- 0: "STATE_UNSPECIFIED",
- 1: "CREATING",
- 2: "READY",
- }
- Instance_State_value = map[string]int32{
- "STATE_UNSPECIFIED": 0,
- "CREATING": 1,
- "READY": 2,
- }
-)
-
-func (x Instance_State) Enum() *Instance_State {
- p := new(Instance_State)
- *p = x
- return p
-}
-
-func (x Instance_State) String() string {
- return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x))
-}
-
-func (Instance_State) Descriptor() protoreflect.EnumDescriptor {
- return file_google_spanner_admin_instance_v1_spanner_instance_admin_proto_enumTypes[3].Descriptor()
-}
-
-func (Instance_State) Type() protoreflect.EnumType {
- return &file_google_spanner_admin_instance_v1_spanner_instance_admin_proto_enumTypes[3]
-}
-
-func (x Instance_State) Number() protoreflect.EnumNumber {
- return protoreflect.EnumNumber(x)
-}
-
-// Deprecated: Use Instance_State.Descriptor instead.
-func (Instance_State) EnumDescriptor() ([]byte, []int) {
- return file_google_spanner_admin_instance_v1_spanner_instance_admin_proto_rawDescGZIP(), []int{3, 0}
-}
-
-// The edition selected for this instance. Different editions provide
-// different capabilities at different price points.
-type Instance_Edition int32
-
-const (
- // Edition not specified.
- Instance_EDITION_UNSPECIFIED Instance_Edition = 0
- // Standard edition.
- Instance_STANDARD Instance_Edition = 1
- // Enterprise edition.
- Instance_ENTERPRISE Instance_Edition = 2
- // Enterprise Plus edition.
- Instance_ENTERPRISE_PLUS Instance_Edition = 3
-)
-
-// Enum value maps for Instance_Edition.
-var (
- Instance_Edition_name = map[int32]string{
- 0: "EDITION_UNSPECIFIED",
- 1: "STANDARD",
- 2: "ENTERPRISE",
- 3: "ENTERPRISE_PLUS",
- }
- Instance_Edition_value = map[string]int32{
- "EDITION_UNSPECIFIED": 0,
- "STANDARD": 1,
- "ENTERPRISE": 2,
- "ENTERPRISE_PLUS": 3,
- }
-)
-
-func (x Instance_Edition) Enum() *Instance_Edition {
- p := new(Instance_Edition)
- *p = x
- return p
-}
-
-func (x Instance_Edition) String() string {
- return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x))
-}
-
-func (Instance_Edition) Descriptor() protoreflect.EnumDescriptor {
- return file_google_spanner_admin_instance_v1_spanner_instance_admin_proto_enumTypes[4].Descriptor()
-}
-
-func (Instance_Edition) Type() protoreflect.EnumType {
- return &file_google_spanner_admin_instance_v1_spanner_instance_admin_proto_enumTypes[4]
-}
-
-func (x Instance_Edition) Number() protoreflect.EnumNumber {
- return protoreflect.EnumNumber(x)
-}
-
-// Deprecated: Use Instance_Edition.Descriptor instead.
-func (Instance_Edition) EnumDescriptor() ([]byte, []int) {
- return file_google_spanner_admin_instance_v1_spanner_instance_admin_proto_rawDescGZIP(), []int{3, 1}
-}
-
-// Indicates the current state of the instance partition.
-type InstancePartition_State int32
-
-const (
- // Not specified.
- InstancePartition_STATE_UNSPECIFIED InstancePartition_State = 0
- // The instance partition is still being created. Resources may not be
- // available yet, and operations such as creating placements using this
- // instance partition may not work.
- InstancePartition_CREATING InstancePartition_State = 1
- // The instance partition is fully created and ready to do work such as
- // creating placements and using in databases.
- InstancePartition_READY InstancePartition_State = 2
-)
-
-// Enum value maps for InstancePartition_State.
-var (
- InstancePartition_State_name = map[int32]string{
- 0: "STATE_UNSPECIFIED",
- 1: "CREATING",
- 2: "READY",
- }
- InstancePartition_State_value = map[string]int32{
- "STATE_UNSPECIFIED": 0,
- "CREATING": 1,
- "READY": 2,
- }
-)
-
-func (x InstancePartition_State) Enum() *InstancePartition_State {
- p := new(InstancePartition_State)
- *p = x
- return p
-}
-
-func (x InstancePartition_State) String() string {
- return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x))
-}
-
-func (InstancePartition_State) Descriptor() protoreflect.EnumDescriptor {
- return file_google_spanner_admin_instance_v1_spanner_instance_admin_proto_enumTypes[5].Descriptor()
-}
-
-func (InstancePartition_State) Type() protoreflect.EnumType {
- return &file_google_spanner_admin_instance_v1_spanner_instance_admin_proto_enumTypes[5]
-}
-
-func (x InstancePartition_State) Number() protoreflect.EnumNumber {
- return protoreflect.EnumNumber(x)
-}
-
-// Deprecated: Use InstancePartition_State.Descriptor instead.
-func (InstancePartition_State) EnumDescriptor() ([]byte, []int) {
- return file_google_spanner_admin_instance_v1_spanner_instance_admin_proto_rawDescGZIP(), []int{22, 0}
-}
-
-type ReplicaInfo struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
- // The location of the serving resources, e.g. "us-central1".
- Location string `protobuf:"bytes,1,opt,name=location,proto3" json:"location,omitempty"`
- // The type of replica.
- Type ReplicaInfo_ReplicaType `protobuf:"varint,2,opt,name=type,proto3,enum=google.spanner.admin.instance.v1.ReplicaInfo_ReplicaType" json:"type,omitempty"`
- // If true, this location is designated as the default leader location where
- // leader replicas are placed. See the [region types
- // documentation](https://cloud.google.com/spanner/docs/instances#region_types)
- // for more details.
- DefaultLeaderLocation bool `protobuf:"varint,3,opt,name=default_leader_location,json=defaultLeaderLocation,proto3" json:"default_leader_location,omitempty"`
-}
-
-func (x *ReplicaInfo) Reset() {
- *x = ReplicaInfo{}
- if protoimpl.UnsafeEnabled {
- mi := &file_google_spanner_admin_instance_v1_spanner_instance_admin_proto_msgTypes[0]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
-}
-
-func (x *ReplicaInfo) String() string {
- return protoimpl.X.MessageStringOf(x)
-}
-
-func (*ReplicaInfo) ProtoMessage() {}
-
-func (x *ReplicaInfo) ProtoReflect() protoreflect.Message {
- mi := &file_google_spanner_admin_instance_v1_spanner_instance_admin_proto_msgTypes[0]
- if protoimpl.UnsafeEnabled && x != nil {
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- if ms.LoadMessageInfo() == nil {
- ms.StoreMessageInfo(mi)
- }
- return ms
- }
- return mi.MessageOf(x)
-}
-
-// Deprecated: Use ReplicaInfo.ProtoReflect.Descriptor instead.
-func (*ReplicaInfo) Descriptor() ([]byte, []int) {
- return file_google_spanner_admin_instance_v1_spanner_instance_admin_proto_rawDescGZIP(), []int{0}
-}
-
-func (x *ReplicaInfo) GetLocation() string {
- if x != nil {
- return x.Location
- }
- return ""
-}
-
-func (x *ReplicaInfo) GetType() ReplicaInfo_ReplicaType {
- if x != nil {
- return x.Type
- }
- return ReplicaInfo_TYPE_UNSPECIFIED
-}
-
-func (x *ReplicaInfo) GetDefaultLeaderLocation() bool {
- if x != nil {
- return x.DefaultLeaderLocation
- }
- return false
-}
-
-// A possible configuration for a Cloud Spanner instance. Configurations
-// define the geographic placement of nodes and their replication.
-type InstanceConfig struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
- // A unique identifier for the instance configuration. Values
- // are of the form
- // `projects/<project>/instanceConfigs/[a-z][-a-z0-9]*`.
- //
- // User instance configuration must start with `custom-`.
- Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"`
- // The name of this instance configuration as it appears in UIs.
- DisplayName string `protobuf:"bytes,2,opt,name=display_name,json=displayName,proto3" json:"display_name,omitempty"`
- // Output only. Whether this instance configuration is a Google-managed or
- // user-managed configuration.
- ConfigType InstanceConfig_Type `protobuf:"varint,5,opt,name=config_type,json=configType,proto3,enum=google.spanner.admin.instance.v1.InstanceConfig_Type" json:"config_type,omitempty"`
- // The geographic placement of nodes in this instance configuration and their
- // replication properties.
- Replicas []*ReplicaInfo `protobuf:"bytes,3,rep,name=replicas,proto3" json:"replicas,omitempty"`
- // Output only. The available optional replicas to choose from for user
- // managed configurations. Populated for Google managed configurations.
- OptionalReplicas []*ReplicaInfo `protobuf:"bytes,6,rep,name=optional_replicas,json=optionalReplicas,proto3" json:"optional_replicas,omitempty"`
- // Base configuration name, e.g. projects/<project_name>/instanceConfigs/nam3,
- // based on which this configuration is created. Only set for user managed
- // configurations. `base_config` must refer to a configuration of type
- // GOOGLE_MANAGED in the same project as this configuration.
- BaseConfig string `protobuf:"bytes,7,opt,name=base_config,json=baseConfig,proto3" json:"base_config,omitempty"`
- // Cloud Labels are a flexible and lightweight mechanism for organizing cloud
- // resources into groups that reflect a customer's organizational needs and
- // deployment strategies. Cloud Labels can be used to filter collections of
- // resources. They can be used to control how resource metrics are aggregated.
- // And they can be used as arguments to policy management rules (e.g. route,
- // firewall, load balancing, etc.).
- //
- // - Label keys must be between 1 and 63 characters long and must conform to
- // the following regular expression: `[a-z][a-z0-9_-]{0,62}`.
- // - Label values must be between 0 and 63 characters long and must conform
- // to the regular expression `[a-z0-9_-]{0,63}`.
- // - No more than 64 labels can be associated with a given resource.
- //
- // See https://goo.gl/xmQnxf for more information on and examples of labels.
- //
- // If you plan to use labels in your own code, please note that additional
- // characters may be allowed in the future. Therefore, you are advised to use
- // an internal label representation, such as JSON, which doesn't rely upon
- // specific characters being disallowed. For example, representing labels
- // as the string: name + "_" + value would prove problematic if we were to
- // allow "_" in a future release.
- Labels map[string]string `protobuf:"bytes,8,rep,name=labels,proto3" json:"labels,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"`
- // etag is used for optimistic concurrency control as a way
- // to help prevent simultaneous updates of a instance configuration from
- // overwriting each other. It is strongly suggested that systems make use of
- // the etag in the read-modify-write cycle to perform instance configuration
- // updates in order to avoid race conditions: An etag is returned in the
- // response which contains instance configurations, and systems are expected
- // to put that etag in the request to update instance configuration to ensure
- // that their change is applied to the same version of the instance
- // configuration. If no etag is provided in the call to update the instance
- // configuration, then the existing instance configuration is overwritten
- // blindly.
- Etag string `protobuf:"bytes,9,opt,name=etag,proto3" json:"etag,omitempty"`
- // Allowed values of the "default_leader" schema option for databases in
- // instances that use this instance configuration.
- LeaderOptions []string `protobuf:"bytes,4,rep,name=leader_options,json=leaderOptions,proto3" json:"leader_options,omitempty"`
- // Output only. If true, the instance configuration is being created or
- // updated. If false, there are no ongoing operations for the instance
- // configuration.
- Reconciling bool `protobuf:"varint,10,opt,name=reconciling,proto3" json:"reconciling,omitempty"`
- // Output only. The current instance configuration state. Applicable only for
- // `USER_MANAGED` configurations.
- State InstanceConfig_State `protobuf:"varint,11,opt,name=state,proto3,enum=google.spanner.admin.instance.v1.InstanceConfig_State" json:"state,omitempty"`
-}
-
-func (x *InstanceConfig) Reset() {
- *x = InstanceConfig{}
- if protoimpl.UnsafeEnabled {
- mi := &file_google_spanner_admin_instance_v1_spanner_instance_admin_proto_msgTypes[1]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
-}
-
-func (x *InstanceConfig) String() string {
- return protoimpl.X.MessageStringOf(x)
-}
-
-func (*InstanceConfig) ProtoMessage() {}
-
-func (x *InstanceConfig) ProtoReflect() protoreflect.Message {
- mi := &file_google_spanner_admin_instance_v1_spanner_instance_admin_proto_msgTypes[1]
- if protoimpl.UnsafeEnabled && x != nil {
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- if ms.LoadMessageInfo() == nil {
- ms.StoreMessageInfo(mi)
- }
- return ms
- }
- return mi.MessageOf(x)
-}
-
-// Deprecated: Use InstanceConfig.ProtoReflect.Descriptor instead.
-func (*InstanceConfig) Descriptor() ([]byte, []int) {
- return file_google_spanner_admin_instance_v1_spanner_instance_admin_proto_rawDescGZIP(), []int{1}
-}
-
-func (x *InstanceConfig) GetName() string {
- if x != nil {
- return x.Name
- }
- return ""
-}
-
-func (x *InstanceConfig) GetDisplayName() string {
- if x != nil {
- return x.DisplayName
- }
- return ""
-}
-
-func (x *InstanceConfig) GetConfigType() InstanceConfig_Type {
- if x != nil {
- return x.ConfigType
- }
- return InstanceConfig_TYPE_UNSPECIFIED
-}
-
-func (x *InstanceConfig) GetReplicas() []*ReplicaInfo {
- if x != nil {
- return x.Replicas
- }
- return nil
-}
-
-func (x *InstanceConfig) GetOptionalReplicas() []*ReplicaInfo {
- if x != nil {
- return x.OptionalReplicas
- }
- return nil
-}
-
-func (x *InstanceConfig) GetBaseConfig() string {
- if x != nil {
- return x.BaseConfig
- }
- return ""
-}
-
-func (x *InstanceConfig) GetLabels() map[string]string {
- if x != nil {
- return x.Labels
- }
- return nil
-}
-
-func (x *InstanceConfig) GetEtag() string {
- if x != nil {
- return x.Etag
- }
- return ""
-}
-
-func (x *InstanceConfig) GetLeaderOptions() []string {
- if x != nil {
- return x.LeaderOptions
- }
- return nil
-}
-
-func (x *InstanceConfig) GetReconciling() bool {
- if x != nil {
- return x.Reconciling
- }
- return false
-}
-
-func (x *InstanceConfig) GetState() InstanceConfig_State {
- if x != nil {
- return x.State
- }
- return InstanceConfig_STATE_UNSPECIFIED
-}
-
-// Autoscaling configuration for an instance.
-type AutoscalingConfig struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
- // Required. Autoscaling limits for an instance.
- AutoscalingLimits *AutoscalingConfig_AutoscalingLimits `protobuf:"bytes,1,opt,name=autoscaling_limits,json=autoscalingLimits,proto3" json:"autoscaling_limits,omitempty"`
- // Required. The autoscaling targets for an instance.
- AutoscalingTargets *AutoscalingConfig_AutoscalingTargets `protobuf:"bytes,2,opt,name=autoscaling_targets,json=autoscalingTargets,proto3" json:"autoscaling_targets,omitempty"`
-}
-
-func (x *AutoscalingConfig) Reset() {
- *x = AutoscalingConfig{}
- if protoimpl.UnsafeEnabled {
- mi := &file_google_spanner_admin_instance_v1_spanner_instance_admin_proto_msgTypes[2]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
-}
-
-func (x *AutoscalingConfig) String() string {
- return protoimpl.X.MessageStringOf(x)
-}
-
-func (*AutoscalingConfig) ProtoMessage() {}
-
-func (x *AutoscalingConfig) ProtoReflect() protoreflect.Message {
- mi := &file_google_spanner_admin_instance_v1_spanner_instance_admin_proto_msgTypes[2]
- if protoimpl.UnsafeEnabled && x != nil {
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- if ms.LoadMessageInfo() == nil {
- ms.StoreMessageInfo(mi)
- }
- return ms
- }
- return mi.MessageOf(x)
-}
-
-// Deprecated: Use AutoscalingConfig.ProtoReflect.Descriptor instead.
-func (*AutoscalingConfig) Descriptor() ([]byte, []int) {
- return file_google_spanner_admin_instance_v1_spanner_instance_admin_proto_rawDescGZIP(), []int{2}
-}
-
-func (x *AutoscalingConfig) GetAutoscalingLimits() *AutoscalingConfig_AutoscalingLimits {
- if x != nil {
- return x.AutoscalingLimits
- }
- return nil
-}
-
-func (x *AutoscalingConfig) GetAutoscalingTargets() *AutoscalingConfig_AutoscalingTargets {
- if x != nil {
- return x.AutoscalingTargets
- }
- return nil
-}
-
-// An isolated set of Cloud Spanner resources on which databases can be hosted.
-type Instance struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
- // Required. A unique identifier for the instance, which cannot be changed
- // after the instance is created. Values are of the form
- // `projects/<project>/instances/[a-z][-a-z0-9]*[a-z0-9]`. The final
- // segment of the name must be between 2 and 64 characters in length.
- Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"`
- // Required. The name of the instance's configuration. Values are of the form
- // `projects/<project>/instanceConfigs/<configuration>`. See
- // also [InstanceConfig][google.spanner.admin.instance.v1.InstanceConfig] and
- // [ListInstanceConfigs][google.spanner.admin.instance.v1.InstanceAdmin.ListInstanceConfigs].
- Config string `protobuf:"bytes,2,opt,name=config,proto3" json:"config,omitempty"`
- // Required. The descriptive name for this instance as it appears in UIs.
- // Must be unique per project and between 4 and 30 characters in length.
- DisplayName string `protobuf:"bytes,3,opt,name=display_name,json=displayName,proto3" json:"display_name,omitempty"`
- // The number of nodes allocated to this instance. At most one of either
- // node_count or processing_units should be present in the message.
- //
- // Users can set the node_count field to specify the target number of nodes
- // allocated to the instance.
- //
- // This may be zero in API responses for instances that are not yet in state
- // `READY`.
- //
- // See [the
- // documentation](https://cloud.google.com/spanner/docs/compute-capacity)
- // for more information about nodes and processing units.
- NodeCount int32 `protobuf:"varint,5,opt,name=node_count,json=nodeCount,proto3" json:"node_count,omitempty"`
- // The number of processing units allocated to this instance. At most one of
- // processing_units or node_count should be present in the message.
- //
- // Users can set the processing_units field to specify the target number of
- // processing units allocated to the instance.
- //
- // This may be zero in API responses for instances that are not yet in state
- // `READY`.
- //
- // See [the
- // documentation](https://cloud.google.com/spanner/docs/compute-capacity)
- // for more information about nodes and processing units.
- ProcessingUnits int32 `protobuf:"varint,9,opt,name=processing_units,json=processingUnits,proto3" json:"processing_units,omitempty"`
- // Optional. The autoscaling configuration. Autoscaling is enabled if this
- // field is set. When autoscaling is enabled, node_count and processing_units
- // are treated as OUTPUT_ONLY fields and reflect the current compute capacity
- // allocated to the instance.
- AutoscalingConfig *AutoscalingConfig `protobuf:"bytes,17,opt,name=autoscaling_config,json=autoscalingConfig,proto3" json:"autoscaling_config,omitempty"`
- // Output only. The current instance state. For
- // [CreateInstance][google.spanner.admin.instance.v1.InstanceAdmin.CreateInstance],
- // the state must be either omitted or set to `CREATING`. For
- // [UpdateInstance][google.spanner.admin.instance.v1.InstanceAdmin.UpdateInstance],
- // the state must be either omitted or set to `READY`.
- State Instance_State `protobuf:"varint,6,opt,name=state,proto3,enum=google.spanner.admin.instance.v1.Instance_State" json:"state,omitempty"`
- // Cloud Labels are a flexible and lightweight mechanism for organizing cloud
- // resources into groups that reflect a customer's organizational needs and
- // deployment strategies. Cloud Labels can be used to filter collections of
- // resources. They can be used to control how resource metrics are aggregated.
- // And they can be used as arguments to policy management rules (e.g. route,
- // firewall, load balancing, etc.).
- //
- // - Label keys must be between 1 and 63 characters long and must conform to
- // the following regular expression: `[a-z][a-z0-9_-]{0,62}`.
- // - Label values must be between 0 and 63 characters long and must conform
- // to the regular expression `[a-z0-9_-]{0,63}`.
- // - No more than 64 labels can be associated with a given resource.
- //
- // See https://goo.gl/xmQnxf for more information on and examples of labels.
- //
- // If you plan to use labels in your own code, please note that additional
- // characters may be allowed in the future. And so you are advised to use an
- // internal label representation, such as JSON, which doesn't rely upon
- // specific characters being disallowed. For example, representing labels
- // as the string: name + "_" + value would prove problematic if we were to
- // allow "_" in a future release.
- Labels map[string]string `protobuf:"bytes,7,rep,name=labels,proto3" json:"labels,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"`
- // Deprecated. This field is not populated.
- EndpointUris []string `protobuf:"bytes,8,rep,name=endpoint_uris,json=endpointUris,proto3" json:"endpoint_uris,omitempty"`
- // Output only. The time at which the instance was created.
- CreateTime *timestamppb.Timestamp `protobuf:"bytes,11,opt,name=create_time,json=createTime,proto3" json:"create_time,omitempty"`
- // Output only. The time at which the instance was most recently updated.
- UpdateTime *timestamppb.Timestamp `protobuf:"bytes,12,opt,name=update_time,json=updateTime,proto3" json:"update_time,omitempty"`
- // Optional. The `Edition` of the current instance.
- Edition Instance_Edition `protobuf:"varint,20,opt,name=edition,proto3,enum=google.spanner.admin.instance.v1.Instance_Edition" json:"edition,omitempty"`
-}
-
-func (x *Instance) Reset() {
- *x = Instance{}
- if protoimpl.UnsafeEnabled {
- mi := &file_google_spanner_admin_instance_v1_spanner_instance_admin_proto_msgTypes[3]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
-}
-
-func (x *Instance) String() string {
- return protoimpl.X.MessageStringOf(x)
-}
-
-func (*Instance) ProtoMessage() {}
-
-func (x *Instance) ProtoReflect() protoreflect.Message {
- mi := &file_google_spanner_admin_instance_v1_spanner_instance_admin_proto_msgTypes[3]
- if protoimpl.UnsafeEnabled && x != nil {
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- if ms.LoadMessageInfo() == nil {
- ms.StoreMessageInfo(mi)
- }
- return ms
- }
- return mi.MessageOf(x)
-}
-
-// Deprecated: Use Instance.ProtoReflect.Descriptor instead.
-func (*Instance) Descriptor() ([]byte, []int) {
- return file_google_spanner_admin_instance_v1_spanner_instance_admin_proto_rawDescGZIP(), []int{3}
-}
-
-func (x *Instance) GetName() string {
- if x != nil {
- return x.Name
- }
- return ""
-}
-
-func (x *Instance) GetConfig() string {
- if x != nil {
- return x.Config
- }
- return ""
-}
-
-func (x *Instance) GetDisplayName() string {
- if x != nil {
- return x.DisplayName
- }
- return ""
-}
-
-func (x *Instance) GetNodeCount() int32 {
- if x != nil {
- return x.NodeCount
- }
- return 0
-}
-
-func (x *Instance) GetProcessingUnits() int32 {
- if x != nil {
- return x.ProcessingUnits
- }
- return 0
-}
-
-func (x *Instance) GetAutoscalingConfig() *AutoscalingConfig {
- if x != nil {
- return x.AutoscalingConfig
- }
- return nil
-}
-
-func (x *Instance) GetState() Instance_State {
- if x != nil {
- return x.State
- }
- return Instance_STATE_UNSPECIFIED
-}
-
-func (x *Instance) GetLabels() map[string]string {
- if x != nil {
- return x.Labels
- }
- return nil
-}
-
-func (x *Instance) GetEndpointUris() []string {
- if x != nil {
- return x.EndpointUris
- }
- return nil
-}
-
-func (x *Instance) GetCreateTime() *timestamppb.Timestamp {
- if x != nil {
- return x.CreateTime
- }
- return nil
-}
-
-func (x *Instance) GetUpdateTime() *timestamppb.Timestamp {
- if x != nil {
- return x.UpdateTime
- }
- return nil
-}
-
-func (x *Instance) GetEdition() Instance_Edition {
- if x != nil {
- return x.Edition
- }
- return Instance_EDITION_UNSPECIFIED
-}
-
-// The request for
-// [ListInstanceConfigs][google.spanner.admin.instance.v1.InstanceAdmin.ListInstanceConfigs].
-type ListInstanceConfigsRequest struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
- // Required. The name of the project for which a list of supported instance
- // configurations is requested. Values are of the form
- // `projects/<project>`.
- Parent string `protobuf:"bytes,1,opt,name=parent,proto3" json:"parent,omitempty"`
- // Number of instance configurations to be returned in the response. If 0 or
- // less, defaults to the server's maximum allowed page size.
- PageSize int32 `protobuf:"varint,2,opt,name=page_size,json=pageSize,proto3" json:"page_size,omitempty"`
- // If non-empty, `page_token` should contain a
- // [next_page_token][google.spanner.admin.instance.v1.ListInstanceConfigsResponse.next_page_token]
- // from a previous
- // [ListInstanceConfigsResponse][google.spanner.admin.instance.v1.ListInstanceConfigsResponse].
- PageToken string `protobuf:"bytes,3,opt,name=page_token,json=pageToken,proto3" json:"page_token,omitempty"`
-}
-
-func (x *ListInstanceConfigsRequest) Reset() {
- *x = ListInstanceConfigsRequest{}
- if protoimpl.UnsafeEnabled {
- mi := &file_google_spanner_admin_instance_v1_spanner_instance_admin_proto_msgTypes[4]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
-}
-
-func (x *ListInstanceConfigsRequest) String() string {
- return protoimpl.X.MessageStringOf(x)
-}
-
-func (*ListInstanceConfigsRequest) ProtoMessage() {}
-
-func (x *ListInstanceConfigsRequest) ProtoReflect() protoreflect.Message {
- mi := &file_google_spanner_admin_instance_v1_spanner_instance_admin_proto_msgTypes[4]
- if protoimpl.UnsafeEnabled && x != nil {
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- if ms.LoadMessageInfo() == nil {
- ms.StoreMessageInfo(mi)
- }
- return ms
- }
- return mi.MessageOf(x)
-}
-
-// Deprecated: Use ListInstanceConfigsRequest.ProtoReflect.Descriptor instead.
-func (*ListInstanceConfigsRequest) Descriptor() ([]byte, []int) {
- return file_google_spanner_admin_instance_v1_spanner_instance_admin_proto_rawDescGZIP(), []int{4}
-}
-
-func (x *ListInstanceConfigsRequest) GetParent() string {
- if x != nil {
- return x.Parent
- }
- return ""
-}
-
-func (x *ListInstanceConfigsRequest) GetPageSize() int32 {
- if x != nil {
- return x.PageSize
- }
- return 0
-}
-
-func (x *ListInstanceConfigsRequest) GetPageToken() string {
- if x != nil {
- return x.PageToken
- }
- return ""
-}
-
-// The response for
-// [ListInstanceConfigs][google.spanner.admin.instance.v1.InstanceAdmin.ListInstanceConfigs].
-type ListInstanceConfigsResponse struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
- // The list of requested instance configurations.
- InstanceConfigs []*InstanceConfig `protobuf:"bytes,1,rep,name=instance_configs,json=instanceConfigs,proto3" json:"instance_configs,omitempty"`
- // `next_page_token` can be sent in a subsequent
- // [ListInstanceConfigs][google.spanner.admin.instance.v1.InstanceAdmin.ListInstanceConfigs]
- // call to fetch more of the matching instance configurations.
- NextPageToken string `protobuf:"bytes,2,opt,name=next_page_token,json=nextPageToken,proto3" json:"next_page_token,omitempty"`
-}
-
-func (x *ListInstanceConfigsResponse) Reset() {
- *x = ListInstanceConfigsResponse{}
- if protoimpl.UnsafeEnabled {
- mi := &file_google_spanner_admin_instance_v1_spanner_instance_admin_proto_msgTypes[5]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
-}
-
-func (x *ListInstanceConfigsResponse) String() string {
- return protoimpl.X.MessageStringOf(x)
-}
-
-func (*ListInstanceConfigsResponse) ProtoMessage() {}
-
-func (x *ListInstanceConfigsResponse) ProtoReflect() protoreflect.Message {
- mi := &file_google_spanner_admin_instance_v1_spanner_instance_admin_proto_msgTypes[5]
- if protoimpl.UnsafeEnabled && x != nil {
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- if ms.LoadMessageInfo() == nil {
- ms.StoreMessageInfo(mi)
- }
- return ms
- }
- return mi.MessageOf(x)
-}
-
-// Deprecated: Use ListInstanceConfigsResponse.ProtoReflect.Descriptor instead.
-func (*ListInstanceConfigsResponse) Descriptor() ([]byte, []int) {
- return file_google_spanner_admin_instance_v1_spanner_instance_admin_proto_rawDescGZIP(), []int{5}
-}
-
-func (x *ListInstanceConfigsResponse) GetInstanceConfigs() []*InstanceConfig {
- if x != nil {
- return x.InstanceConfigs
- }
- return nil
-}
-
-func (x *ListInstanceConfigsResponse) GetNextPageToken() string {
- if x != nil {
- return x.NextPageToken
- }
- return ""
-}
-
-// The request for
-// [GetInstanceConfigRequest][google.spanner.admin.instance.v1.InstanceAdmin.GetInstanceConfig].
-type GetInstanceConfigRequest struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
- // Required. The name of the requested instance configuration. Values are of
- // the form `projects/<project>/instanceConfigs/<config>`.
- Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"`
-}
-
-func (x *GetInstanceConfigRequest) Reset() {
- *x = GetInstanceConfigRequest{}
- if protoimpl.UnsafeEnabled {
- mi := &file_google_spanner_admin_instance_v1_spanner_instance_admin_proto_msgTypes[6]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
-}
-
-func (x *GetInstanceConfigRequest) String() string {
- return protoimpl.X.MessageStringOf(x)
-}
-
-func (*GetInstanceConfigRequest) ProtoMessage() {}
-
-func (x *GetInstanceConfigRequest) ProtoReflect() protoreflect.Message {
- mi := &file_google_spanner_admin_instance_v1_spanner_instance_admin_proto_msgTypes[6]
- if protoimpl.UnsafeEnabled && x != nil {
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- if ms.LoadMessageInfo() == nil {
- ms.StoreMessageInfo(mi)
- }
- return ms
- }
- return mi.MessageOf(x)
-}
-
-// Deprecated: Use GetInstanceConfigRequest.ProtoReflect.Descriptor instead.
-func (*GetInstanceConfigRequest) Descriptor() ([]byte, []int) {
- return file_google_spanner_admin_instance_v1_spanner_instance_admin_proto_rawDescGZIP(), []int{6}
-}
-
-func (x *GetInstanceConfigRequest) GetName() string {
- if x != nil {
- return x.Name
- }
- return ""
-}
-
-// The request for
-// [CreateInstanceConfigRequest][InstanceAdmin.CreateInstanceConfigRequest].
-type CreateInstanceConfigRequest struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
- // Required. The name of the project in which to create the instance
- // configuration. Values are of the form `projects/<project>`.
- Parent string `protobuf:"bytes,1,opt,name=parent,proto3" json:"parent,omitempty"`
- // Required. The ID of the instance configuration to create. Valid identifiers
- // are of the form `custom-[-a-z0-9]*[a-z0-9]` and must be between 2 and 64
- // characters in length. The `custom-` prefix is required to avoid name
- // conflicts with Google-managed configurations.
- InstanceConfigId string `protobuf:"bytes,2,opt,name=instance_config_id,json=instanceConfigId,proto3" json:"instance_config_id,omitempty"`
- // Required. The InstanceConfig proto of the configuration to create.
- // instance_config.name must be
- // `<parent>/instanceConfigs/<instance_config_id>`.
- // instance_config.base_config must be a Google managed configuration name,
- // e.g. <parent>/instanceConfigs/us-east1, <parent>/instanceConfigs/nam3.
- InstanceConfig *InstanceConfig `protobuf:"bytes,3,opt,name=instance_config,json=instanceConfig,proto3" json:"instance_config,omitempty"`
- // An option to validate, but not actually execute, a request,
- // and provide the same response.
- ValidateOnly bool `protobuf:"varint,4,opt,name=validate_only,json=validateOnly,proto3" json:"validate_only,omitempty"`
-}
-
-func (x *CreateInstanceConfigRequest) Reset() {
- *x = CreateInstanceConfigRequest{}
- if protoimpl.UnsafeEnabled {
- mi := &file_google_spanner_admin_instance_v1_spanner_instance_admin_proto_msgTypes[7]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
-}
-
-func (x *CreateInstanceConfigRequest) String() string {
- return protoimpl.X.MessageStringOf(x)
-}
-
-func (*CreateInstanceConfigRequest) ProtoMessage() {}
-
-func (x *CreateInstanceConfigRequest) ProtoReflect() protoreflect.Message {
- mi := &file_google_spanner_admin_instance_v1_spanner_instance_admin_proto_msgTypes[7]
- if protoimpl.UnsafeEnabled && x != nil {
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- if ms.LoadMessageInfo() == nil {
- ms.StoreMessageInfo(mi)
- }
- return ms
- }
- return mi.MessageOf(x)
-}
-
-// Deprecated: Use CreateInstanceConfigRequest.ProtoReflect.Descriptor instead.
-func (*CreateInstanceConfigRequest) Descriptor() ([]byte, []int) {
- return file_google_spanner_admin_instance_v1_spanner_instance_admin_proto_rawDescGZIP(), []int{7}
-}
-
-func (x *CreateInstanceConfigRequest) GetParent() string {
- if x != nil {
- return x.Parent
- }
- return ""
-}
-
-func (x *CreateInstanceConfigRequest) GetInstanceConfigId() string {
- if x != nil {
- return x.InstanceConfigId
- }
- return ""
-}
-
-func (x *CreateInstanceConfigRequest) GetInstanceConfig() *InstanceConfig {
- if x != nil {
- return x.InstanceConfig
- }
- return nil
-}
-
-func (x *CreateInstanceConfigRequest) GetValidateOnly() bool {
- if x != nil {
- return x.ValidateOnly
- }
- return false
-}
-
-// The request for
-// [UpdateInstanceConfigRequest][InstanceAdmin.UpdateInstanceConfigRequest].
-type UpdateInstanceConfigRequest struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
- // Required. The user instance configuration to update, which must always
- // include the instance configuration name. Otherwise, only fields mentioned
- // in
- // [update_mask][google.spanner.admin.instance.v1.UpdateInstanceConfigRequest.update_mask]
- // need be included. To prevent conflicts of concurrent updates,
- // [etag][google.spanner.admin.instance.v1.InstanceConfig.reconciling] can
- // be used.
- InstanceConfig *InstanceConfig `protobuf:"bytes,1,opt,name=instance_config,json=instanceConfig,proto3" json:"instance_config,omitempty"`
- // Required. A mask specifying which fields in
- // [InstanceConfig][google.spanner.admin.instance.v1.InstanceConfig] should be
- // updated. The field mask must always be specified; this prevents any future
- // fields in [InstanceConfig][google.spanner.admin.instance.v1.InstanceConfig]
- // from being erased accidentally by clients that do not know about them. Only
- // display_name and labels can be updated.
- UpdateMask *fieldmaskpb.FieldMask `protobuf:"bytes,2,opt,name=update_mask,json=updateMask,proto3" json:"update_mask,omitempty"`
- // An option to validate, but not actually execute, a request,
- // and provide the same response.
- ValidateOnly bool `protobuf:"varint,3,opt,name=validate_only,json=validateOnly,proto3" json:"validate_only,omitempty"`
-}
-
-func (x *UpdateInstanceConfigRequest) Reset() {
- *x = UpdateInstanceConfigRequest{}
- if protoimpl.UnsafeEnabled {
- mi := &file_google_spanner_admin_instance_v1_spanner_instance_admin_proto_msgTypes[8]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
-}
-
-func (x *UpdateInstanceConfigRequest) String() string {
- return protoimpl.X.MessageStringOf(x)
-}
-
-func (*UpdateInstanceConfigRequest) ProtoMessage() {}
-
-func (x *UpdateInstanceConfigRequest) ProtoReflect() protoreflect.Message {
- mi := &file_google_spanner_admin_instance_v1_spanner_instance_admin_proto_msgTypes[8]
- if protoimpl.UnsafeEnabled && x != nil {
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- if ms.LoadMessageInfo() == nil {
- ms.StoreMessageInfo(mi)
- }
- return ms
- }
- return mi.MessageOf(x)
-}
-
-// Deprecated: Use UpdateInstanceConfigRequest.ProtoReflect.Descriptor instead.
-func (*UpdateInstanceConfigRequest) Descriptor() ([]byte, []int) {
- return file_google_spanner_admin_instance_v1_spanner_instance_admin_proto_rawDescGZIP(), []int{8}
-}
-
-func (x *UpdateInstanceConfigRequest) GetInstanceConfig() *InstanceConfig {
- if x != nil {
- return x.InstanceConfig
- }
- return nil
-}
-
-func (x *UpdateInstanceConfigRequest) GetUpdateMask() *fieldmaskpb.FieldMask {
- if x != nil {
- return x.UpdateMask
- }
- return nil
-}
-
-func (x *UpdateInstanceConfigRequest) GetValidateOnly() bool {
- if x != nil {
- return x.ValidateOnly
- }
- return false
-}
-
-// The request for
-// [DeleteInstanceConfigRequest][InstanceAdmin.DeleteInstanceConfigRequest].
-type DeleteInstanceConfigRequest struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
- // Required. The name of the instance configuration to be deleted.
- // Values are of the form
- // `projects/<project>/instanceConfigs/<instance_config>`
- Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"`
- // Used for optimistic concurrency control as a way to help prevent
- // simultaneous deletes of an instance configuration from overwriting each
- // other. If not empty, the API
- // only deletes the instance configuration when the etag provided matches the
- // current status of the requested instance configuration. Otherwise, deletes
- // the instance configuration without checking the current status of the
- // requested instance configuration.
- Etag string `protobuf:"bytes,2,opt,name=etag,proto3" json:"etag,omitempty"`
- // An option to validate, but not actually execute, a request,
- // and provide the same response.
- ValidateOnly bool `protobuf:"varint,3,opt,name=validate_only,json=validateOnly,proto3" json:"validate_only,omitempty"`
-}
-
-func (x *DeleteInstanceConfigRequest) Reset() {
- *x = DeleteInstanceConfigRequest{}
- if protoimpl.UnsafeEnabled {
- mi := &file_google_spanner_admin_instance_v1_spanner_instance_admin_proto_msgTypes[9]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
-}
-
-func (x *DeleteInstanceConfigRequest) String() string {
- return protoimpl.X.MessageStringOf(x)
-}
-
-func (*DeleteInstanceConfigRequest) ProtoMessage() {}
-
-func (x *DeleteInstanceConfigRequest) ProtoReflect() protoreflect.Message {
- mi := &file_google_spanner_admin_instance_v1_spanner_instance_admin_proto_msgTypes[9]
- if protoimpl.UnsafeEnabled && x != nil {
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- if ms.LoadMessageInfo() == nil {
- ms.StoreMessageInfo(mi)
- }
- return ms
- }
- return mi.MessageOf(x)
-}
-
-// Deprecated: Use DeleteInstanceConfigRequest.ProtoReflect.Descriptor instead.
-func (*DeleteInstanceConfigRequest) Descriptor() ([]byte, []int) {
- return file_google_spanner_admin_instance_v1_spanner_instance_admin_proto_rawDescGZIP(), []int{9}
-}
-
-func (x *DeleteInstanceConfigRequest) GetName() string {
- if x != nil {
- return x.Name
- }
- return ""
-}
-
-func (x *DeleteInstanceConfigRequest) GetEtag() string {
- if x != nil {
- return x.Etag
- }
- return ""
-}
-
-func (x *DeleteInstanceConfigRequest) GetValidateOnly() bool {
- if x != nil {
- return x.ValidateOnly
- }
- return false
-}
-
-// The request for
-// [ListInstanceConfigOperations][google.spanner.admin.instance.v1.InstanceAdmin.ListInstanceConfigOperations].
-type ListInstanceConfigOperationsRequest struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
- // Required. The project of the instance configuration operations.
- // Values are of the form `projects/<project>`.
- Parent string `protobuf:"bytes,1,opt,name=parent,proto3" json:"parent,omitempty"`
- // An expression that filters the list of returned operations.
- //
- // A filter expression consists of a field name, a
- // comparison operator, and a value for filtering.
- // The value must be a string, a number, or a boolean. The comparison operator
- // must be one of: `<`, `>`, `<=`, `>=`, `!=`, `=`, or `:`.
- // Colon `:` is the contains operator. Filter rules are not case sensitive.
- //
- // The following fields in the [Operation][google.longrunning.Operation]
- // are eligible for filtering:
- //
- // - `name` - The name of the long-running operation
- // - `done` - False if the operation is in progress, else true.
- // - `metadata.@type` - the type of metadata. For example, the type string
- // for
- // [CreateInstanceConfigMetadata][google.spanner.admin.instance.v1.CreateInstanceConfigMetadata]
- // is
- // `type.googleapis.com/google.spanner.admin.instance.v1.CreateInstanceConfigMetadata`.
- // - `metadata.<field_name>` - any field in metadata.value.
- // `metadata.@type` must be specified first, if filtering on metadata
- // fields.
- // - `error` - Error associated with the long-running operation.
- // - `response.@type` - the type of response.
- // - `response.<field_name>` - any field in response.value.
- //
- // You can combine multiple expressions by enclosing each expression in
- // parentheses. By default, expressions are combined with AND logic. However,
- // you can specify AND, OR, and NOT logic explicitly.
- //
- // Here are a few examples:
- //
- // - `done:true` - The operation is complete.
- // - `(metadata.@type=` \
- // `type.googleapis.com/google.spanner.admin.instance.v1.CreateInstanceConfigMetadata)
- // AND` \
- // `(metadata.instance_config.name:custom-config) AND` \
- // `(metadata.progress.start_time < \"2021-03-28T14:50:00Z\") AND` \
- // `(error:*)` - Return operations where:
- // - The operation's metadata type is
- // [CreateInstanceConfigMetadata][google.spanner.admin.instance.v1.CreateInstanceConfigMetadata].
- // - The instance configuration name contains "custom-config".
- // - The operation started before 2021-03-28T14:50:00Z.
- // - The operation resulted in an error.
- Filter string `protobuf:"bytes,2,opt,name=filter,proto3" json:"filter,omitempty"`
- // Number of operations to be returned in the response. If 0 or
- // less, defaults to the server's maximum allowed page size.
- PageSize int32 `protobuf:"varint,3,opt,name=page_size,json=pageSize,proto3" json:"page_size,omitempty"`
- // If non-empty, `page_token` should contain a
- // [next_page_token][google.spanner.admin.instance.v1.ListInstanceConfigOperationsResponse.next_page_token]
- // from a previous
- // [ListInstanceConfigOperationsResponse][google.spanner.admin.instance.v1.ListInstanceConfigOperationsResponse]
- // to the same `parent` and with the same `filter`.
- PageToken string `protobuf:"bytes,4,opt,name=page_token,json=pageToken,proto3" json:"page_token,omitempty"`
-}
-
-func (x *ListInstanceConfigOperationsRequest) Reset() {
- *x = ListInstanceConfigOperationsRequest{}
- if protoimpl.UnsafeEnabled {
- mi := &file_google_spanner_admin_instance_v1_spanner_instance_admin_proto_msgTypes[10]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
-}
-
-func (x *ListInstanceConfigOperationsRequest) String() string {
- return protoimpl.X.MessageStringOf(x)
-}
-
-func (*ListInstanceConfigOperationsRequest) ProtoMessage() {}
-
-func (x *ListInstanceConfigOperationsRequest) ProtoReflect() protoreflect.Message {
- mi := &file_google_spanner_admin_instance_v1_spanner_instance_admin_proto_msgTypes[10]
- if protoimpl.UnsafeEnabled && x != nil {
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- if ms.LoadMessageInfo() == nil {
- ms.StoreMessageInfo(mi)
- }
- return ms
- }
- return mi.MessageOf(x)
-}
-
-// Deprecated: Use ListInstanceConfigOperationsRequest.ProtoReflect.Descriptor instead.
-func (*ListInstanceConfigOperationsRequest) Descriptor() ([]byte, []int) {
- return file_google_spanner_admin_instance_v1_spanner_instance_admin_proto_rawDescGZIP(), []int{10}
-}
-
-func (x *ListInstanceConfigOperationsRequest) GetParent() string {
- if x != nil {
- return x.Parent
- }
- return ""
-}
-
-func (x *ListInstanceConfigOperationsRequest) GetFilter() string {
- if x != nil {
- return x.Filter
- }
- return ""
-}
-
-func (x *ListInstanceConfigOperationsRequest) GetPageSize() int32 {
- if x != nil {
- return x.PageSize
- }
- return 0
-}
-
-func (x *ListInstanceConfigOperationsRequest) GetPageToken() string {
- if x != nil {
- return x.PageToken
- }
- return ""
-}
-
-// The response for
-// [ListInstanceConfigOperations][google.spanner.admin.instance.v1.InstanceAdmin.ListInstanceConfigOperations].
-type ListInstanceConfigOperationsResponse struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
- // The list of matching instance configuration [long-running
- // operations][google.longrunning.Operation]. Each operation's name will be
- // prefixed by the name of the instance configuration. The operation's
- // [metadata][google.longrunning.Operation.metadata] field type
- // `metadata.type_url` describes the type of the metadata.
- Operations []*longrunningpb.Operation `protobuf:"bytes,1,rep,name=operations,proto3" json:"operations,omitempty"`
- // `next_page_token` can be sent in a subsequent
- // [ListInstanceConfigOperations][google.spanner.admin.instance.v1.InstanceAdmin.ListInstanceConfigOperations]
- // call to fetch more of the matching metadata.
- NextPageToken string `protobuf:"bytes,2,opt,name=next_page_token,json=nextPageToken,proto3" json:"next_page_token,omitempty"`
-}
-
-func (x *ListInstanceConfigOperationsResponse) Reset() {
- *x = ListInstanceConfigOperationsResponse{}
- if protoimpl.UnsafeEnabled {
- mi := &file_google_spanner_admin_instance_v1_spanner_instance_admin_proto_msgTypes[11]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
-}
-
-func (x *ListInstanceConfigOperationsResponse) String() string {
- return protoimpl.X.MessageStringOf(x)
-}
-
-func (*ListInstanceConfigOperationsResponse) ProtoMessage() {}
-
-func (x *ListInstanceConfigOperationsResponse) ProtoReflect() protoreflect.Message {
- mi := &file_google_spanner_admin_instance_v1_spanner_instance_admin_proto_msgTypes[11]
- if protoimpl.UnsafeEnabled && x != nil {
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- if ms.LoadMessageInfo() == nil {
- ms.StoreMessageInfo(mi)
- }
- return ms
- }
- return mi.MessageOf(x)
-}
-
-// Deprecated: Use ListInstanceConfigOperationsResponse.ProtoReflect.Descriptor instead.
-func (*ListInstanceConfigOperationsResponse) Descriptor() ([]byte, []int) {
- return file_google_spanner_admin_instance_v1_spanner_instance_admin_proto_rawDescGZIP(), []int{11}
-}
-
-func (x *ListInstanceConfigOperationsResponse) GetOperations() []*longrunningpb.Operation {
- if x != nil {
- return x.Operations
- }
- return nil
-}
-
-func (x *ListInstanceConfigOperationsResponse) GetNextPageToken() string {
- if x != nil {
- return x.NextPageToken
- }
- return ""
-}
-
-// The request for
-// [GetInstance][google.spanner.admin.instance.v1.InstanceAdmin.GetInstance].
-type GetInstanceRequest struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
- // Required. The name of the requested instance. Values are of the form
- // `projects/<project>/instances/<instance>`.
- Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"`
- // If field_mask is present, specifies the subset of
- // [Instance][google.spanner.admin.instance.v1.Instance] fields that should be
- // returned. If absent, all
- // [Instance][google.spanner.admin.instance.v1.Instance] fields are returned.
- FieldMask *fieldmaskpb.FieldMask `protobuf:"bytes,2,opt,name=field_mask,json=fieldMask,proto3" json:"field_mask,omitempty"`
-}
-
-func (x *GetInstanceRequest) Reset() {
- *x = GetInstanceRequest{}
- if protoimpl.UnsafeEnabled {
- mi := &file_google_spanner_admin_instance_v1_spanner_instance_admin_proto_msgTypes[12]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
-}
-
-func (x *GetInstanceRequest) String() string {
- return protoimpl.X.MessageStringOf(x)
-}
-
-func (*GetInstanceRequest) ProtoMessage() {}
-
-func (x *GetInstanceRequest) ProtoReflect() protoreflect.Message {
- mi := &file_google_spanner_admin_instance_v1_spanner_instance_admin_proto_msgTypes[12]
- if protoimpl.UnsafeEnabled && x != nil {
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- if ms.LoadMessageInfo() == nil {
- ms.StoreMessageInfo(mi)
- }
- return ms
- }
- return mi.MessageOf(x)
-}
-
-// Deprecated: Use GetInstanceRequest.ProtoReflect.Descriptor instead.
-func (*GetInstanceRequest) Descriptor() ([]byte, []int) {
- return file_google_spanner_admin_instance_v1_spanner_instance_admin_proto_rawDescGZIP(), []int{12}
-}
-
-func (x *GetInstanceRequest) GetName() string {
- if x != nil {
- return x.Name
- }
- return ""
-}
-
-func (x *GetInstanceRequest) GetFieldMask() *fieldmaskpb.FieldMask {
- if x != nil {
- return x.FieldMask
- }
- return nil
-}
-
-// The request for
-// [CreateInstance][google.spanner.admin.instance.v1.InstanceAdmin.CreateInstance].
-type CreateInstanceRequest struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
- // Required. The name of the project in which to create the instance. Values
- // are of the form `projects/<project>`.
- Parent string `protobuf:"bytes,1,opt,name=parent,proto3" json:"parent,omitempty"`
- // Required. The ID of the instance to create. Valid identifiers are of the
- // form `[a-z][-a-z0-9]*[a-z0-9]` and must be between 2 and 64 characters in
- // length.
- InstanceId string `protobuf:"bytes,2,opt,name=instance_id,json=instanceId,proto3" json:"instance_id,omitempty"`
- // Required. The instance to create. The name may be omitted, but if
- // specified must be `<parent>/instances/<instance_id>`.
- Instance *Instance `protobuf:"bytes,3,opt,name=instance,proto3" json:"instance,omitempty"`
-}
-
-func (x *CreateInstanceRequest) Reset() {
- *x = CreateInstanceRequest{}
- if protoimpl.UnsafeEnabled {
- mi := &file_google_spanner_admin_instance_v1_spanner_instance_admin_proto_msgTypes[13]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
-}
-
-func (x *CreateInstanceRequest) String() string {
- return protoimpl.X.MessageStringOf(x)
-}
-
-func (*CreateInstanceRequest) ProtoMessage() {}
-
-func (x *CreateInstanceRequest) ProtoReflect() protoreflect.Message {
- mi := &file_google_spanner_admin_instance_v1_spanner_instance_admin_proto_msgTypes[13]
- if protoimpl.UnsafeEnabled && x != nil {
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- if ms.LoadMessageInfo() == nil {
- ms.StoreMessageInfo(mi)
- }
- return ms
- }
- return mi.MessageOf(x)
-}
-
-// Deprecated: Use CreateInstanceRequest.ProtoReflect.Descriptor instead.
-func (*CreateInstanceRequest) Descriptor() ([]byte, []int) {
- return file_google_spanner_admin_instance_v1_spanner_instance_admin_proto_rawDescGZIP(), []int{13}
-}
-
-func (x *CreateInstanceRequest) GetParent() string {
- if x != nil {
- return x.Parent
- }
- return ""
-}
-
-func (x *CreateInstanceRequest) GetInstanceId() string {
- if x != nil {
- return x.InstanceId
- }
- return ""
-}
-
-func (x *CreateInstanceRequest) GetInstance() *Instance {
- if x != nil {
- return x.Instance
- }
- return nil
-}
-
-// The request for
-// [ListInstances][google.spanner.admin.instance.v1.InstanceAdmin.ListInstances].
-type ListInstancesRequest struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
- // Required. The name of the project for which a list of instances is
- // requested. Values are of the form `projects/<project>`.
- Parent string `protobuf:"bytes,1,opt,name=parent,proto3" json:"parent,omitempty"`
- // Number of instances to be returned in the response. If 0 or less, defaults
- // to the server's maximum allowed page size.
- PageSize int32 `protobuf:"varint,2,opt,name=page_size,json=pageSize,proto3" json:"page_size,omitempty"`
- // If non-empty, `page_token` should contain a
- // [next_page_token][google.spanner.admin.instance.v1.ListInstancesResponse.next_page_token]
- // from a previous
- // [ListInstancesResponse][google.spanner.admin.instance.v1.ListInstancesResponse].
- PageToken string `protobuf:"bytes,3,opt,name=page_token,json=pageToken,proto3" json:"page_token,omitempty"`
- // An expression for filtering the results of the request. Filter rules are
- // case insensitive. The fields eligible for filtering are:
- //
- // - `name`
- // - `display_name`
- // - `labels.key` where key is the name of a label
- //
- // Some examples of using filters are:
- //
- // - `name:*` --> The instance has a name.
- // - `name:Howl` --> The instance's name contains the string "howl".
- // - `name:HOWL` --> Equivalent to above.
- // - `NAME:howl` --> Equivalent to above.
- // - `labels.env:*` --> The instance has the label "env".
- // - `labels.env:dev` --> The instance has the label "env" and the value of
- // the label contains the string "dev".
- // - `name:howl labels.env:dev` --> The instance's name contains "howl" and
- // it has the label "env" with its value
- // containing "dev".
- Filter string `protobuf:"bytes,4,opt,name=filter,proto3" json:"filter,omitempty"`
- // Deadline used while retrieving metadata for instances.
- // Instances whose metadata cannot be retrieved within this deadline will be
- // added to
- // [unreachable][google.spanner.admin.instance.v1.ListInstancesResponse.unreachable]
- // in
- // [ListInstancesResponse][google.spanner.admin.instance.v1.ListInstancesResponse].
- InstanceDeadline *timestamppb.Timestamp `protobuf:"bytes,5,opt,name=instance_deadline,json=instanceDeadline,proto3" json:"instance_deadline,omitempty"`
-}
-
-func (x *ListInstancesRequest) Reset() {
- *x = ListInstancesRequest{}
- if protoimpl.UnsafeEnabled {
- mi := &file_google_spanner_admin_instance_v1_spanner_instance_admin_proto_msgTypes[14]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
-}
-
-func (x *ListInstancesRequest) String() string {
- return protoimpl.X.MessageStringOf(x)
-}
-
-func (*ListInstancesRequest) ProtoMessage() {}
-
-func (x *ListInstancesRequest) ProtoReflect() protoreflect.Message {
- mi := &file_google_spanner_admin_instance_v1_spanner_instance_admin_proto_msgTypes[14]
- if protoimpl.UnsafeEnabled && x != nil {
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- if ms.LoadMessageInfo() == nil {
- ms.StoreMessageInfo(mi)
- }
- return ms
- }
- return mi.MessageOf(x)
-}
-
-// Deprecated: Use ListInstancesRequest.ProtoReflect.Descriptor instead.
-func (*ListInstancesRequest) Descriptor() ([]byte, []int) {
- return file_google_spanner_admin_instance_v1_spanner_instance_admin_proto_rawDescGZIP(), []int{14}
-}
-
-func (x *ListInstancesRequest) GetParent() string {
- if x != nil {
- return x.Parent
- }
- return ""
-}
-
-func (x *ListInstancesRequest) GetPageSize() int32 {
- if x != nil {
- return x.PageSize
- }
- return 0
-}
-
-func (x *ListInstancesRequest) GetPageToken() string {
- if x != nil {
- return x.PageToken
- }
- return ""
-}
-
-func (x *ListInstancesRequest) GetFilter() string {
- if x != nil {
- return x.Filter
- }
- return ""
-}
-
-func (x *ListInstancesRequest) GetInstanceDeadline() *timestamppb.Timestamp {
- if x != nil {
- return x.InstanceDeadline
- }
- return nil
-}
-
-// The response for
-// [ListInstances][google.spanner.admin.instance.v1.InstanceAdmin.ListInstances].
-type ListInstancesResponse struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
- // The list of requested instances.
- Instances []*Instance `protobuf:"bytes,1,rep,name=instances,proto3" json:"instances,omitempty"`
- // `next_page_token` can be sent in a subsequent
- // [ListInstances][google.spanner.admin.instance.v1.InstanceAdmin.ListInstances]
- // call to fetch more of the matching instances.
- NextPageToken string `protobuf:"bytes,2,opt,name=next_page_token,json=nextPageToken,proto3" json:"next_page_token,omitempty"`
- // The list of unreachable instances.
- // It includes the names of instances whose metadata could not be retrieved
- // within
- // [instance_deadline][google.spanner.admin.instance.v1.ListInstancesRequest.instance_deadline].
- Unreachable []string `protobuf:"bytes,3,rep,name=unreachable,proto3" json:"unreachable,omitempty"`
-}
-
-func (x *ListInstancesResponse) Reset() {
- *x = ListInstancesResponse{}
- if protoimpl.UnsafeEnabled {
- mi := &file_google_spanner_admin_instance_v1_spanner_instance_admin_proto_msgTypes[15]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
-}
-
-func (x *ListInstancesResponse) String() string {
- return protoimpl.X.MessageStringOf(x)
-}
-
-func (*ListInstancesResponse) ProtoMessage() {}
-
-func (x *ListInstancesResponse) ProtoReflect() protoreflect.Message {
- mi := &file_google_spanner_admin_instance_v1_spanner_instance_admin_proto_msgTypes[15]
- if protoimpl.UnsafeEnabled && x != nil {
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- if ms.LoadMessageInfo() == nil {
- ms.StoreMessageInfo(mi)
- }
- return ms
- }
- return mi.MessageOf(x)
-}
-
-// Deprecated: Use ListInstancesResponse.ProtoReflect.Descriptor instead.
-func (*ListInstancesResponse) Descriptor() ([]byte, []int) {
- return file_google_spanner_admin_instance_v1_spanner_instance_admin_proto_rawDescGZIP(), []int{15}
-}
-
-func (x *ListInstancesResponse) GetInstances() []*Instance {
- if x != nil {
- return x.Instances
- }
- return nil
-}
-
-func (x *ListInstancesResponse) GetNextPageToken() string {
- if x != nil {
- return x.NextPageToken
- }
- return ""
-}
-
-func (x *ListInstancesResponse) GetUnreachable() []string {
- if x != nil {
- return x.Unreachable
- }
- return nil
-}
-
-// The request for
-// [UpdateInstance][google.spanner.admin.instance.v1.InstanceAdmin.UpdateInstance].
-type UpdateInstanceRequest struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
- // Required. The instance to update, which must always include the instance
- // name. Otherwise, only fields mentioned in
- // [field_mask][google.spanner.admin.instance.v1.UpdateInstanceRequest.field_mask]
- // need be included.
- Instance *Instance `protobuf:"bytes,1,opt,name=instance,proto3" json:"instance,omitempty"`
- // Required. A mask specifying which fields in
- // [Instance][google.spanner.admin.instance.v1.Instance] should be updated.
- // The field mask must always be specified; this prevents any future fields in
- // [Instance][google.spanner.admin.instance.v1.Instance] from being erased
- // accidentally by clients that do not know about them.
- FieldMask *fieldmaskpb.FieldMask `protobuf:"bytes,2,opt,name=field_mask,json=fieldMask,proto3" json:"field_mask,omitempty"`
-}
-
-func (x *UpdateInstanceRequest) Reset() {
- *x = UpdateInstanceRequest{}
- if protoimpl.UnsafeEnabled {
- mi := &file_google_spanner_admin_instance_v1_spanner_instance_admin_proto_msgTypes[16]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
-}
-
-func (x *UpdateInstanceRequest) String() string {
- return protoimpl.X.MessageStringOf(x)
-}
-
-func (*UpdateInstanceRequest) ProtoMessage() {}
-
-func (x *UpdateInstanceRequest) ProtoReflect() protoreflect.Message {
- mi := &file_google_spanner_admin_instance_v1_spanner_instance_admin_proto_msgTypes[16]
- if protoimpl.UnsafeEnabled && x != nil {
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- if ms.LoadMessageInfo() == nil {
- ms.StoreMessageInfo(mi)
- }
- return ms
- }
- return mi.MessageOf(x)
-}
-
-// Deprecated: Use UpdateInstanceRequest.ProtoReflect.Descriptor instead.
-func (*UpdateInstanceRequest) Descriptor() ([]byte, []int) {
- return file_google_spanner_admin_instance_v1_spanner_instance_admin_proto_rawDescGZIP(), []int{16}
-}
-
-func (x *UpdateInstanceRequest) GetInstance() *Instance {
- if x != nil {
- return x.Instance
- }
- return nil
-}
-
-func (x *UpdateInstanceRequest) GetFieldMask() *fieldmaskpb.FieldMask {
- if x != nil {
- return x.FieldMask
- }
- return nil
-}
-
-// The request for
-// [DeleteInstance][google.spanner.admin.instance.v1.InstanceAdmin.DeleteInstance].
-type DeleteInstanceRequest struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
- // Required. The name of the instance to be deleted. Values are of the form
- // `projects/<project>/instances/<instance>`
- Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"`
-}
-
-func (x *DeleteInstanceRequest) Reset() {
- *x = DeleteInstanceRequest{}
- if protoimpl.UnsafeEnabled {
- mi := &file_google_spanner_admin_instance_v1_spanner_instance_admin_proto_msgTypes[17]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
-}
-
-func (x *DeleteInstanceRequest) String() string {
- return protoimpl.X.MessageStringOf(x)
-}
-
-func (*DeleteInstanceRequest) ProtoMessage() {}
-
-func (x *DeleteInstanceRequest) ProtoReflect() protoreflect.Message {
- mi := &file_google_spanner_admin_instance_v1_spanner_instance_admin_proto_msgTypes[17]
- if protoimpl.UnsafeEnabled && x != nil {
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- if ms.LoadMessageInfo() == nil {
- ms.StoreMessageInfo(mi)
- }
- return ms
- }
- return mi.MessageOf(x)
-}
-
-// Deprecated: Use DeleteInstanceRequest.ProtoReflect.Descriptor instead.
-func (*DeleteInstanceRequest) Descriptor() ([]byte, []int) {
- return file_google_spanner_admin_instance_v1_spanner_instance_admin_proto_rawDescGZIP(), []int{17}
-}
-
-func (x *DeleteInstanceRequest) GetName() string {
- if x != nil {
- return x.Name
- }
- return ""
-}
-
-// Metadata type for the operation returned by
-// [CreateInstance][google.spanner.admin.instance.v1.InstanceAdmin.CreateInstance].
-type CreateInstanceMetadata struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
- // The instance being created.
- Instance *Instance `protobuf:"bytes,1,opt,name=instance,proto3" json:"instance,omitempty"`
- // The time at which the
- // [CreateInstance][google.spanner.admin.instance.v1.InstanceAdmin.CreateInstance]
- // request was received.
- StartTime *timestamppb.Timestamp `protobuf:"bytes,2,opt,name=start_time,json=startTime,proto3" json:"start_time,omitempty"`
- // The time at which this operation was cancelled. If set, this operation is
- // in the process of undoing itself (which is guaranteed to succeed) and
- // cannot be cancelled again.
- CancelTime *timestamppb.Timestamp `protobuf:"bytes,3,opt,name=cancel_time,json=cancelTime,proto3" json:"cancel_time,omitempty"`
- // The time at which this operation failed or was completed successfully.
- EndTime *timestamppb.Timestamp `protobuf:"bytes,4,opt,name=end_time,json=endTime,proto3" json:"end_time,omitempty"`
- // The expected fulfillment period of this create operation.
- ExpectedFulfillmentPeriod FulfillmentPeriod `protobuf:"varint,5,opt,name=expected_fulfillment_period,json=expectedFulfillmentPeriod,proto3,enum=google.spanner.admin.instance.v1.FulfillmentPeriod" json:"expected_fulfillment_period,omitempty"`
-}
-
-func (x *CreateInstanceMetadata) Reset() {
- *x = CreateInstanceMetadata{}
- if protoimpl.UnsafeEnabled {
- mi := &file_google_spanner_admin_instance_v1_spanner_instance_admin_proto_msgTypes[18]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
-}
-
-func (x *CreateInstanceMetadata) String() string {
- return protoimpl.X.MessageStringOf(x)
-}
-
-func (*CreateInstanceMetadata) ProtoMessage() {}
-
-func (x *CreateInstanceMetadata) ProtoReflect() protoreflect.Message {
- mi := &file_google_spanner_admin_instance_v1_spanner_instance_admin_proto_msgTypes[18]
- if protoimpl.UnsafeEnabled && x != nil {
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- if ms.LoadMessageInfo() == nil {
- ms.StoreMessageInfo(mi)
- }
- return ms
- }
- return mi.MessageOf(x)
-}
-
-// Deprecated: Use CreateInstanceMetadata.ProtoReflect.Descriptor instead.
-func (*CreateInstanceMetadata) Descriptor() ([]byte, []int) {
- return file_google_spanner_admin_instance_v1_spanner_instance_admin_proto_rawDescGZIP(), []int{18}
-}
-
-func (x *CreateInstanceMetadata) GetInstance() *Instance {
- if x != nil {
- return x.Instance
- }
- return nil
-}
-
-func (x *CreateInstanceMetadata) GetStartTime() *timestamppb.Timestamp {
- if x != nil {
- return x.StartTime
- }
- return nil
-}
-
-func (x *CreateInstanceMetadata) GetCancelTime() *timestamppb.Timestamp {
- if x != nil {
- return x.CancelTime
- }
- return nil
-}
-
-func (x *CreateInstanceMetadata) GetEndTime() *timestamppb.Timestamp {
- if x != nil {
- return x.EndTime
- }
- return nil
-}
-
-func (x *CreateInstanceMetadata) GetExpectedFulfillmentPeriod() FulfillmentPeriod {
- if x != nil {
- return x.ExpectedFulfillmentPeriod
- }
- return FulfillmentPeriod_FULFILLMENT_PERIOD_UNSPECIFIED
-}
-
-// Metadata type for the operation returned by
-// [UpdateInstance][google.spanner.admin.instance.v1.InstanceAdmin.UpdateInstance].
-type UpdateInstanceMetadata struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
- // The desired end state of the update.
- Instance *Instance `protobuf:"bytes,1,opt,name=instance,proto3" json:"instance,omitempty"`
- // The time at which
- // [UpdateInstance][google.spanner.admin.instance.v1.InstanceAdmin.UpdateInstance]
- // request was received.
- StartTime *timestamppb.Timestamp `protobuf:"bytes,2,opt,name=start_time,json=startTime,proto3" json:"start_time,omitempty"`
- // The time at which this operation was cancelled. If set, this operation is
- // in the process of undoing itself (which is guaranteed to succeed) and
- // cannot be cancelled again.
- CancelTime *timestamppb.Timestamp `protobuf:"bytes,3,opt,name=cancel_time,json=cancelTime,proto3" json:"cancel_time,omitempty"`
- // The time at which this operation failed or was completed successfully.
- EndTime *timestamppb.Timestamp `protobuf:"bytes,4,opt,name=end_time,json=endTime,proto3" json:"end_time,omitempty"`
- // The expected fulfillment period of this update operation.
- ExpectedFulfillmentPeriod FulfillmentPeriod `protobuf:"varint,5,opt,name=expected_fulfillment_period,json=expectedFulfillmentPeriod,proto3,enum=google.spanner.admin.instance.v1.FulfillmentPeriod" json:"expected_fulfillment_period,omitempty"`
-}
-
-func (x *UpdateInstanceMetadata) Reset() {
- *x = UpdateInstanceMetadata{}
- if protoimpl.UnsafeEnabled {
- mi := &file_google_spanner_admin_instance_v1_spanner_instance_admin_proto_msgTypes[19]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
-}
-
-func (x *UpdateInstanceMetadata) String() string {
- return protoimpl.X.MessageStringOf(x)
-}
-
-func (*UpdateInstanceMetadata) ProtoMessage() {}
-
-func (x *UpdateInstanceMetadata) ProtoReflect() protoreflect.Message {
- mi := &file_google_spanner_admin_instance_v1_spanner_instance_admin_proto_msgTypes[19]
- if protoimpl.UnsafeEnabled && x != nil {
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- if ms.LoadMessageInfo() == nil {
- ms.StoreMessageInfo(mi)
- }
- return ms
- }
- return mi.MessageOf(x)
-}
-
-// Deprecated: Use UpdateInstanceMetadata.ProtoReflect.Descriptor instead.
-func (*UpdateInstanceMetadata) Descriptor() ([]byte, []int) {
- return file_google_spanner_admin_instance_v1_spanner_instance_admin_proto_rawDescGZIP(), []int{19}
-}
-
-func (x *UpdateInstanceMetadata) GetInstance() *Instance {
- if x != nil {
- return x.Instance
- }
- return nil
-}
-
-func (x *UpdateInstanceMetadata) GetStartTime() *timestamppb.Timestamp {
- if x != nil {
- return x.StartTime
- }
- return nil
-}
-
-func (x *UpdateInstanceMetadata) GetCancelTime() *timestamppb.Timestamp {
- if x != nil {
- return x.CancelTime
- }
- return nil
-}
-
-func (x *UpdateInstanceMetadata) GetEndTime() *timestamppb.Timestamp {
- if x != nil {
- return x.EndTime
- }
- return nil
-}
-
-func (x *UpdateInstanceMetadata) GetExpectedFulfillmentPeriod() FulfillmentPeriod {
- if x != nil {
- return x.ExpectedFulfillmentPeriod
- }
- return FulfillmentPeriod_FULFILLMENT_PERIOD_UNSPECIFIED
-}
-
-// Metadata type for the operation returned by
-// [CreateInstanceConfig][google.spanner.admin.instance.v1.InstanceAdmin.CreateInstanceConfig].
-type CreateInstanceConfigMetadata struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
- // The target instance configuration end state.
- InstanceConfig *InstanceConfig `protobuf:"bytes,1,opt,name=instance_config,json=instanceConfig,proto3" json:"instance_config,omitempty"`
- // The progress of the
- // [CreateInstanceConfig][google.spanner.admin.instance.v1.InstanceAdmin.CreateInstanceConfig]
- // operation.
- Progress *OperationProgress `protobuf:"bytes,2,opt,name=progress,proto3" json:"progress,omitempty"`
- // The time at which this operation was cancelled.
- CancelTime *timestamppb.Timestamp `protobuf:"bytes,3,opt,name=cancel_time,json=cancelTime,proto3" json:"cancel_time,omitempty"`
-}
-
-func (x *CreateInstanceConfigMetadata) Reset() {
- *x = CreateInstanceConfigMetadata{}
- if protoimpl.UnsafeEnabled {
- mi := &file_google_spanner_admin_instance_v1_spanner_instance_admin_proto_msgTypes[20]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
-}
-
-func (x *CreateInstanceConfigMetadata) String() string {
- return protoimpl.X.MessageStringOf(x)
-}
-
-func (*CreateInstanceConfigMetadata) ProtoMessage() {}
-
-func (x *CreateInstanceConfigMetadata) ProtoReflect() protoreflect.Message {
- mi := &file_google_spanner_admin_instance_v1_spanner_instance_admin_proto_msgTypes[20]
- if protoimpl.UnsafeEnabled && x != nil {
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- if ms.LoadMessageInfo() == nil {
- ms.StoreMessageInfo(mi)
- }
- return ms
- }
- return mi.MessageOf(x)
-}
-
-// Deprecated: Use CreateInstanceConfigMetadata.ProtoReflect.Descriptor instead.
-func (*CreateInstanceConfigMetadata) Descriptor() ([]byte, []int) {
- return file_google_spanner_admin_instance_v1_spanner_instance_admin_proto_rawDescGZIP(), []int{20}
-}
-
-func (x *CreateInstanceConfigMetadata) GetInstanceConfig() *InstanceConfig {
- if x != nil {
- return x.InstanceConfig
- }
- return nil
-}
-
-func (x *CreateInstanceConfigMetadata) GetProgress() *OperationProgress {
- if x != nil {
- return x.Progress
- }
- return nil
-}
-
-func (x *CreateInstanceConfigMetadata) GetCancelTime() *timestamppb.Timestamp {
- if x != nil {
- return x.CancelTime
- }
- return nil
-}
-
-// Metadata type for the operation returned by
-// [UpdateInstanceConfig][google.spanner.admin.instance.v1.InstanceAdmin.UpdateInstanceConfig].
-type UpdateInstanceConfigMetadata struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
- // The desired instance configuration after updating.
- InstanceConfig *InstanceConfig `protobuf:"bytes,1,opt,name=instance_config,json=instanceConfig,proto3" json:"instance_config,omitempty"`
- // The progress of the
- // [UpdateInstanceConfig][google.spanner.admin.instance.v1.InstanceAdmin.UpdateInstanceConfig]
- // operation.
- Progress *OperationProgress `protobuf:"bytes,2,opt,name=progress,proto3" json:"progress,omitempty"`
- // The time at which this operation was cancelled.
- CancelTime *timestamppb.Timestamp `protobuf:"bytes,3,opt,name=cancel_time,json=cancelTime,proto3" json:"cancel_time,omitempty"`
-}
-
-func (x *UpdateInstanceConfigMetadata) Reset() {
- *x = UpdateInstanceConfigMetadata{}
- if protoimpl.UnsafeEnabled {
- mi := &file_google_spanner_admin_instance_v1_spanner_instance_admin_proto_msgTypes[21]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
-}
-
-func (x *UpdateInstanceConfigMetadata) String() string {
- return protoimpl.X.MessageStringOf(x)
-}
-
-func (*UpdateInstanceConfigMetadata) ProtoMessage() {}
-
-func (x *UpdateInstanceConfigMetadata) ProtoReflect() protoreflect.Message {
- mi := &file_google_spanner_admin_instance_v1_spanner_instance_admin_proto_msgTypes[21]
- if protoimpl.UnsafeEnabled && x != nil {
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- if ms.LoadMessageInfo() == nil {
- ms.StoreMessageInfo(mi)
- }
- return ms
- }
- return mi.MessageOf(x)
-}
-
-// Deprecated: Use UpdateInstanceConfigMetadata.ProtoReflect.Descriptor instead.
-func (*UpdateInstanceConfigMetadata) Descriptor() ([]byte, []int) {
- return file_google_spanner_admin_instance_v1_spanner_instance_admin_proto_rawDescGZIP(), []int{21}
-}
-
-func (x *UpdateInstanceConfigMetadata) GetInstanceConfig() *InstanceConfig {
- if x != nil {
- return x.InstanceConfig
- }
- return nil
-}
-
-func (x *UpdateInstanceConfigMetadata) GetProgress() *OperationProgress {
- if x != nil {
- return x.Progress
- }
- return nil
-}
-
-func (x *UpdateInstanceConfigMetadata) GetCancelTime() *timestamppb.Timestamp {
- if x != nil {
- return x.CancelTime
- }
- return nil
-}
-
-// An isolated set of Cloud Spanner resources that databases can define
-// placements on.
-type InstancePartition struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
- // Required. A unique identifier for the instance partition. Values are of the
- // form
- // `projects/<project>/instances/<instance>/instancePartitions/[a-z][-a-z0-9]*[a-z0-9]`.
- // The final segment of the name must be between 2 and 64 characters in
- // length. An instance partition's name cannot be changed after the instance
- // partition is created.
- Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"`
- // Required. The name of the instance partition's configuration. Values are of
- // the form `projects/<project>/instanceConfigs/<configuration>`. See also
- // [InstanceConfig][google.spanner.admin.instance.v1.InstanceConfig] and
- // [ListInstanceConfigs][google.spanner.admin.instance.v1.InstanceAdmin.ListInstanceConfigs].
- Config string `protobuf:"bytes,2,opt,name=config,proto3" json:"config,omitempty"`
- // Required. The descriptive name for this instance partition as it appears in
- // UIs. Must be unique per project and between 4 and 30 characters in length.
- DisplayName string `protobuf:"bytes,3,opt,name=display_name,json=displayName,proto3" json:"display_name,omitempty"`
- // Compute capacity defines amount of server and storage resources that are
- // available to the databases in an instance partition. At most one of either
- // node_count or processing_units should be present in the message. See [the
- // documentation](https://cloud.google.com/spanner/docs/compute-capacity)
- // for more information about nodes and processing units.
- //
- // Types that are assignable to ComputeCapacity:
- //
- // *InstancePartition_NodeCount
- // *InstancePartition_ProcessingUnits
- ComputeCapacity isInstancePartition_ComputeCapacity `protobuf_oneof:"compute_capacity"`
- // Output only. The current instance partition state.
- State InstancePartition_State `protobuf:"varint,7,opt,name=state,proto3,enum=google.spanner.admin.instance.v1.InstancePartition_State" json:"state,omitempty"`
- // Output only. The time at which the instance partition was created.
- CreateTime *timestamppb.Timestamp `protobuf:"bytes,8,opt,name=create_time,json=createTime,proto3" json:"create_time,omitempty"`
- // Output only. The time at which the instance partition was most recently
- // updated.
- UpdateTime *timestamppb.Timestamp `protobuf:"bytes,9,opt,name=update_time,json=updateTime,proto3" json:"update_time,omitempty"`
- // Output only. The names of the databases that reference this
- // instance partition. Referencing databases should share the parent instance.
- // The existence of any referencing database prevents the instance partition
- // from being deleted.
- ReferencingDatabases []string `protobuf:"bytes,10,rep,name=referencing_databases,json=referencingDatabases,proto3" json:"referencing_databases,omitempty"`
- // Output only. The names of the backups that reference this instance
- // partition. Referencing backups should share the parent instance. The
- // existence of any referencing backup prevents the instance partition from
- // being deleted.
- ReferencingBackups []string `protobuf:"bytes,11,rep,name=referencing_backups,json=referencingBackups,proto3" json:"referencing_backups,omitempty"`
- // Used for optimistic concurrency control as a way
- // to help prevent simultaneous updates of a instance partition from
- // overwriting each other. It is strongly suggested that systems make use of
- // the etag in the read-modify-write cycle to perform instance partition
- // updates in order to avoid race conditions: An etag is returned in the
- // response which contains instance partitions, and systems are expected to
- // put that etag in the request to update instance partitions to ensure that
- // their change will be applied to the same version of the instance partition.
- // If no etag is provided in the call to update instance partition, then the
- // existing instance partition is overwritten blindly.
- Etag string `protobuf:"bytes,12,opt,name=etag,proto3" json:"etag,omitempty"`
-}
-
-func (x *InstancePartition) Reset() {
- *x = InstancePartition{}
- if protoimpl.UnsafeEnabled {
- mi := &file_google_spanner_admin_instance_v1_spanner_instance_admin_proto_msgTypes[22]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
-}
-
-func (x *InstancePartition) String() string {
- return protoimpl.X.MessageStringOf(x)
-}
-
-func (*InstancePartition) ProtoMessage() {}
-
-func (x *InstancePartition) ProtoReflect() protoreflect.Message {
- mi := &file_google_spanner_admin_instance_v1_spanner_instance_admin_proto_msgTypes[22]
- if protoimpl.UnsafeEnabled && x != nil {
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- if ms.LoadMessageInfo() == nil {
- ms.StoreMessageInfo(mi)
- }
- return ms
- }
- return mi.MessageOf(x)
-}
-
-// Deprecated: Use InstancePartition.ProtoReflect.Descriptor instead.
-func (*InstancePartition) Descriptor() ([]byte, []int) {
- return file_google_spanner_admin_instance_v1_spanner_instance_admin_proto_rawDescGZIP(), []int{22}
-}
-
-func (x *InstancePartition) GetName() string {
- if x != nil {
- return x.Name
- }
- return ""
-}
-
-func (x *InstancePartition) GetConfig() string {
- if x != nil {
- return x.Config
- }
- return ""
-}
-
-func (x *InstancePartition) GetDisplayName() string {
- if x != nil {
- return x.DisplayName
- }
- return ""
-}
-
-func (m *InstancePartition) GetComputeCapacity() isInstancePartition_ComputeCapacity {
- if m != nil {
- return m.ComputeCapacity
- }
- return nil
-}
-
-func (x *InstancePartition) GetNodeCount() int32 {
- if x, ok := x.GetComputeCapacity().(*InstancePartition_NodeCount); ok {
- return x.NodeCount
- }
- return 0
-}
-
-func (x *InstancePartition) GetProcessingUnits() int32 {
- if x, ok := x.GetComputeCapacity().(*InstancePartition_ProcessingUnits); ok {
- return x.ProcessingUnits
- }
- return 0
-}
-
-func (x *InstancePartition) GetState() InstancePartition_State {
- if x != nil {
- return x.State
- }
- return InstancePartition_STATE_UNSPECIFIED
-}
-
-func (x *InstancePartition) GetCreateTime() *timestamppb.Timestamp {
- if x != nil {
- return x.CreateTime
- }
- return nil
-}
-
-func (x *InstancePartition) GetUpdateTime() *timestamppb.Timestamp {
- if x != nil {
- return x.UpdateTime
- }
- return nil
-}
-
-func (x *InstancePartition) GetReferencingDatabases() []string {
- if x != nil {
- return x.ReferencingDatabases
- }
- return nil
-}
-
-func (x *InstancePartition) GetReferencingBackups() []string {
- if x != nil {
- return x.ReferencingBackups
- }
- return nil
-}
-
-func (x *InstancePartition) GetEtag() string {
- if x != nil {
- return x.Etag
- }
- return ""
-}
-
-type isInstancePartition_ComputeCapacity interface {
- isInstancePartition_ComputeCapacity()
-}
-
-type InstancePartition_NodeCount struct {
- // The number of nodes allocated to this instance partition.
- //
- // Users can set the node_count field to specify the target number of nodes
- // allocated to the instance partition.
- //
- // This may be zero in API responses for instance partitions that are not
- // yet in state `READY`.
- NodeCount int32 `protobuf:"varint,5,opt,name=node_count,json=nodeCount,proto3,oneof"`
-}
-
-type InstancePartition_ProcessingUnits struct {
- // The number of processing units allocated to this instance partition.
- //
- // Users can set the processing_units field to specify the target number of
- // processing units allocated to the instance partition.
- //
- // This may be zero in API responses for instance partitions that are not
- // yet in state `READY`.
- ProcessingUnits int32 `protobuf:"varint,6,opt,name=processing_units,json=processingUnits,proto3,oneof"`
-}
-
-func (*InstancePartition_NodeCount) isInstancePartition_ComputeCapacity() {}
-
-func (*InstancePartition_ProcessingUnits) isInstancePartition_ComputeCapacity() {}
-
-// Metadata type for the operation returned by
-// [CreateInstancePartition][google.spanner.admin.instance.v1.InstanceAdmin.CreateInstancePartition].
-type CreateInstancePartitionMetadata struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
- // The instance partition being created.
- InstancePartition *InstancePartition `protobuf:"bytes,1,opt,name=instance_partition,json=instancePartition,proto3" json:"instance_partition,omitempty"`
- // The time at which the
- // [CreateInstancePartition][google.spanner.admin.instance.v1.InstanceAdmin.CreateInstancePartition]
- // request was received.
- StartTime *timestamppb.Timestamp `protobuf:"bytes,2,opt,name=start_time,json=startTime,proto3" json:"start_time,omitempty"`
- // The time at which this operation was cancelled. If set, this operation is
- // in the process of undoing itself (which is guaranteed to succeed) and
- // cannot be cancelled again.
- CancelTime *timestamppb.Timestamp `protobuf:"bytes,3,opt,name=cancel_time,json=cancelTime,proto3" json:"cancel_time,omitempty"`
- // The time at which this operation failed or was completed successfully.
- EndTime *timestamppb.Timestamp `protobuf:"bytes,4,opt,name=end_time,json=endTime,proto3" json:"end_time,omitempty"`
-}
-
-func (x *CreateInstancePartitionMetadata) Reset() {
- *x = CreateInstancePartitionMetadata{}
- if protoimpl.UnsafeEnabled {
- mi := &file_google_spanner_admin_instance_v1_spanner_instance_admin_proto_msgTypes[23]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
-}
-
-func (x *CreateInstancePartitionMetadata) String() string {
- return protoimpl.X.MessageStringOf(x)
-}
-
-func (*CreateInstancePartitionMetadata) ProtoMessage() {}
-
-func (x *CreateInstancePartitionMetadata) ProtoReflect() protoreflect.Message {
- mi := &file_google_spanner_admin_instance_v1_spanner_instance_admin_proto_msgTypes[23]
- if protoimpl.UnsafeEnabled && x != nil {
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- if ms.LoadMessageInfo() == nil {
- ms.StoreMessageInfo(mi)
- }
- return ms
- }
- return mi.MessageOf(x)
-}
-
-// Deprecated: Use CreateInstancePartitionMetadata.ProtoReflect.Descriptor instead.
-func (*CreateInstancePartitionMetadata) Descriptor() ([]byte, []int) {
- return file_google_spanner_admin_instance_v1_spanner_instance_admin_proto_rawDescGZIP(), []int{23}
-}
-
-func (x *CreateInstancePartitionMetadata) GetInstancePartition() *InstancePartition {
- if x != nil {
- return x.InstancePartition
- }
- return nil
-}
-
-func (x *CreateInstancePartitionMetadata) GetStartTime() *timestamppb.Timestamp {
- if x != nil {
- return x.StartTime
- }
- return nil
-}
-
-func (x *CreateInstancePartitionMetadata) GetCancelTime() *timestamppb.Timestamp {
- if x != nil {
- return x.CancelTime
- }
- return nil
-}
-
-func (x *CreateInstancePartitionMetadata) GetEndTime() *timestamppb.Timestamp {
- if x != nil {
- return x.EndTime
- }
- return nil
-}
-
-// The request for
-// [CreateInstancePartition][google.spanner.admin.instance.v1.InstanceAdmin.CreateInstancePartition].
-type CreateInstancePartitionRequest struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
- // Required. The name of the instance in which to create the instance
- // partition. Values are of the form
- // `projects/<project>/instances/<instance>`.
- Parent string `protobuf:"bytes,1,opt,name=parent,proto3" json:"parent,omitempty"`
- // Required. The ID of the instance partition to create. Valid identifiers are
- // of the form `[a-z][-a-z0-9]*[a-z0-9]` and must be between 2 and 64
- // characters in length.
- InstancePartitionId string `protobuf:"bytes,2,opt,name=instance_partition_id,json=instancePartitionId,proto3" json:"instance_partition_id,omitempty"`
- // Required. The instance partition to create. The instance_partition.name may
- // be omitted, but if specified must be
- // `<parent>/instancePartitions/<instance_partition_id>`.
- InstancePartition *InstancePartition `protobuf:"bytes,3,opt,name=instance_partition,json=instancePartition,proto3" json:"instance_partition,omitempty"`
-}
-
-func (x *CreateInstancePartitionRequest) Reset() {
- *x = CreateInstancePartitionRequest{}
- if protoimpl.UnsafeEnabled {
- mi := &file_google_spanner_admin_instance_v1_spanner_instance_admin_proto_msgTypes[24]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
-}
-
-func (x *CreateInstancePartitionRequest) String() string {
- return protoimpl.X.MessageStringOf(x)
-}
-
-func (*CreateInstancePartitionRequest) ProtoMessage() {}
-
-func (x *CreateInstancePartitionRequest) ProtoReflect() protoreflect.Message {
- mi := &file_google_spanner_admin_instance_v1_spanner_instance_admin_proto_msgTypes[24]
- if protoimpl.UnsafeEnabled && x != nil {
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- if ms.LoadMessageInfo() == nil {
- ms.StoreMessageInfo(mi)
- }
- return ms
- }
- return mi.MessageOf(x)
-}
-
-// Deprecated: Use CreateInstancePartitionRequest.ProtoReflect.Descriptor instead.
-func (*CreateInstancePartitionRequest) Descriptor() ([]byte, []int) {
- return file_google_spanner_admin_instance_v1_spanner_instance_admin_proto_rawDescGZIP(), []int{24}
-}
-
-func (x *CreateInstancePartitionRequest) GetParent() string {
- if x != nil {
- return x.Parent
- }
- return ""
-}
-
-func (x *CreateInstancePartitionRequest) GetInstancePartitionId() string {
- if x != nil {
- return x.InstancePartitionId
- }
- return ""
-}
-
-func (x *CreateInstancePartitionRequest) GetInstancePartition() *InstancePartition {
- if x != nil {
- return x.InstancePartition
- }
- return nil
-}
-
-// The request for
-// [DeleteInstancePartition][google.spanner.admin.instance.v1.InstanceAdmin.DeleteInstancePartition].
-type DeleteInstancePartitionRequest struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
- // Required. The name of the instance partition to be deleted.
- // Values are of the form
- // `projects/{project}/instances/{instance}/instancePartitions/{instance_partition}`
- Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"`
- // Optional. If not empty, the API only deletes the instance partition when
- // the etag provided matches the current status of the requested instance
- // partition. Otherwise, deletes the instance partition without checking the
- // current status of the requested instance partition.
- Etag string `protobuf:"bytes,2,opt,name=etag,proto3" json:"etag,omitempty"`
-}
-
-func (x *DeleteInstancePartitionRequest) Reset() {
- *x = DeleteInstancePartitionRequest{}
- if protoimpl.UnsafeEnabled {
- mi := &file_google_spanner_admin_instance_v1_spanner_instance_admin_proto_msgTypes[25]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
-}
-
-func (x *DeleteInstancePartitionRequest) String() string {
- return protoimpl.X.MessageStringOf(x)
-}
-
-func (*DeleteInstancePartitionRequest) ProtoMessage() {}
-
-func (x *DeleteInstancePartitionRequest) ProtoReflect() protoreflect.Message {
- mi := &file_google_spanner_admin_instance_v1_spanner_instance_admin_proto_msgTypes[25]
- if protoimpl.UnsafeEnabled && x != nil {
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- if ms.LoadMessageInfo() == nil {
- ms.StoreMessageInfo(mi)
- }
- return ms
- }
- return mi.MessageOf(x)
-}
-
-// Deprecated: Use DeleteInstancePartitionRequest.ProtoReflect.Descriptor instead.
-func (*DeleteInstancePartitionRequest) Descriptor() ([]byte, []int) {
- return file_google_spanner_admin_instance_v1_spanner_instance_admin_proto_rawDescGZIP(), []int{25}
-}
-
-func (x *DeleteInstancePartitionRequest) GetName() string {
- if x != nil {
- return x.Name
- }
- return ""
-}
-
-func (x *DeleteInstancePartitionRequest) GetEtag() string {
- if x != nil {
- return x.Etag
- }
- return ""
-}
-
-// The request for
-// [GetInstancePartition][google.spanner.admin.instance.v1.InstanceAdmin.GetInstancePartition].
-type GetInstancePartitionRequest struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
- // Required. The name of the requested instance partition. Values are of
- // the form
- // `projects/{project}/instances/{instance}/instancePartitions/{instance_partition}`.
- Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"`
-}
-
-func (x *GetInstancePartitionRequest) Reset() {
- *x = GetInstancePartitionRequest{}
- if protoimpl.UnsafeEnabled {
- mi := &file_google_spanner_admin_instance_v1_spanner_instance_admin_proto_msgTypes[26]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
-}
-
-func (x *GetInstancePartitionRequest) String() string {
- return protoimpl.X.MessageStringOf(x)
-}
-
-func (*GetInstancePartitionRequest) ProtoMessage() {}
-
-func (x *GetInstancePartitionRequest) ProtoReflect() protoreflect.Message {
- mi := &file_google_spanner_admin_instance_v1_spanner_instance_admin_proto_msgTypes[26]
- if protoimpl.UnsafeEnabled && x != nil {
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- if ms.LoadMessageInfo() == nil {
- ms.StoreMessageInfo(mi)
- }
- return ms
- }
- return mi.MessageOf(x)
-}
-
-// Deprecated: Use GetInstancePartitionRequest.ProtoReflect.Descriptor instead.
-func (*GetInstancePartitionRequest) Descriptor() ([]byte, []int) {
- return file_google_spanner_admin_instance_v1_spanner_instance_admin_proto_rawDescGZIP(), []int{26}
-}
-
-func (x *GetInstancePartitionRequest) GetName() string {
- if x != nil {
- return x.Name
- }
- return ""
-}
-
-// The request for
-// [UpdateInstancePartition][google.spanner.admin.instance.v1.InstanceAdmin.UpdateInstancePartition].
-type UpdateInstancePartitionRequest struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
- // Required. The instance partition to update, which must always include the
- // instance partition name. Otherwise, only fields mentioned in
- // [field_mask][google.spanner.admin.instance.v1.UpdateInstancePartitionRequest.field_mask]
- // need be included.
- InstancePartition *InstancePartition `protobuf:"bytes,1,opt,name=instance_partition,json=instancePartition,proto3" json:"instance_partition,omitempty"`
- // Required. A mask specifying which fields in
- // [InstancePartition][google.spanner.admin.instance.v1.InstancePartition]
- // should be updated. The field mask must always be specified; this prevents
- // any future fields in
- // [InstancePartition][google.spanner.admin.instance.v1.InstancePartition]
- // from being erased accidentally by clients that do not know about them.
- FieldMask *fieldmaskpb.FieldMask `protobuf:"bytes,2,opt,name=field_mask,json=fieldMask,proto3" json:"field_mask,omitempty"`
-}
-
-func (x *UpdateInstancePartitionRequest) Reset() {
- *x = UpdateInstancePartitionRequest{}
- if protoimpl.UnsafeEnabled {
- mi := &file_google_spanner_admin_instance_v1_spanner_instance_admin_proto_msgTypes[27]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
-}
-
-func (x *UpdateInstancePartitionRequest) String() string {
- return protoimpl.X.MessageStringOf(x)
-}
-
-func (*UpdateInstancePartitionRequest) ProtoMessage() {}
-
-func (x *UpdateInstancePartitionRequest) ProtoReflect() protoreflect.Message {
- mi := &file_google_spanner_admin_instance_v1_spanner_instance_admin_proto_msgTypes[27]
- if protoimpl.UnsafeEnabled && x != nil {
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- if ms.LoadMessageInfo() == nil {
- ms.StoreMessageInfo(mi)
- }
- return ms
- }
- return mi.MessageOf(x)
-}
-
-// Deprecated: Use UpdateInstancePartitionRequest.ProtoReflect.Descriptor instead.
-func (*UpdateInstancePartitionRequest) Descriptor() ([]byte, []int) {
- return file_google_spanner_admin_instance_v1_spanner_instance_admin_proto_rawDescGZIP(), []int{27}
-}
-
-func (x *UpdateInstancePartitionRequest) GetInstancePartition() *InstancePartition {
- if x != nil {
- return x.InstancePartition
- }
- return nil
-}
-
-func (x *UpdateInstancePartitionRequest) GetFieldMask() *fieldmaskpb.FieldMask {
- if x != nil {
- return x.FieldMask
- }
- return nil
-}
-
-// Metadata type for the operation returned by
-// [UpdateInstancePartition][google.spanner.admin.instance.v1.InstanceAdmin.UpdateInstancePartition].
-type UpdateInstancePartitionMetadata struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
- // The desired end state of the update.
- InstancePartition *InstancePartition `protobuf:"bytes,1,opt,name=instance_partition,json=instancePartition,proto3" json:"instance_partition,omitempty"`
- // The time at which
- // [UpdateInstancePartition][google.spanner.admin.instance.v1.InstanceAdmin.UpdateInstancePartition]
- // request was received.
- StartTime *timestamppb.Timestamp `protobuf:"bytes,2,opt,name=start_time,json=startTime,proto3" json:"start_time,omitempty"`
- // The time at which this operation was cancelled. If set, this operation is
- // in the process of undoing itself (which is guaranteed to succeed) and
- // cannot be cancelled again.
- CancelTime *timestamppb.Timestamp `protobuf:"bytes,3,opt,name=cancel_time,json=cancelTime,proto3" json:"cancel_time,omitempty"`
- // The time at which this operation failed or was completed successfully.
- EndTime *timestamppb.Timestamp `protobuf:"bytes,4,opt,name=end_time,json=endTime,proto3" json:"end_time,omitempty"`
-}
-
-func (x *UpdateInstancePartitionMetadata) Reset() {
- *x = UpdateInstancePartitionMetadata{}
- if protoimpl.UnsafeEnabled {
- mi := &file_google_spanner_admin_instance_v1_spanner_instance_admin_proto_msgTypes[28]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
-}
-
-func (x *UpdateInstancePartitionMetadata) String() string {
- return protoimpl.X.MessageStringOf(x)
-}
-
-func (*UpdateInstancePartitionMetadata) ProtoMessage() {}
-
-func (x *UpdateInstancePartitionMetadata) ProtoReflect() protoreflect.Message {
- mi := &file_google_spanner_admin_instance_v1_spanner_instance_admin_proto_msgTypes[28]
- if protoimpl.UnsafeEnabled && x != nil {
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- if ms.LoadMessageInfo() == nil {
- ms.StoreMessageInfo(mi)
- }
- return ms
- }
- return mi.MessageOf(x)
-}
-
-// Deprecated: Use UpdateInstancePartitionMetadata.ProtoReflect.Descriptor instead.
-func (*UpdateInstancePartitionMetadata) Descriptor() ([]byte, []int) {
- return file_google_spanner_admin_instance_v1_spanner_instance_admin_proto_rawDescGZIP(), []int{28}
-}
-
-func (x *UpdateInstancePartitionMetadata) GetInstancePartition() *InstancePartition {
- if x != nil {
- return x.InstancePartition
- }
- return nil
-}
-
-func (x *UpdateInstancePartitionMetadata) GetStartTime() *timestamppb.Timestamp {
- if x != nil {
- return x.StartTime
- }
- return nil
-}
-
-func (x *UpdateInstancePartitionMetadata) GetCancelTime() *timestamppb.Timestamp {
- if x != nil {
- return x.CancelTime
- }
- return nil
-}
-
-func (x *UpdateInstancePartitionMetadata) GetEndTime() *timestamppb.Timestamp {
- if x != nil {
- return x.EndTime
- }
- return nil
-}
-
-// The request for
-// [ListInstancePartitions][google.spanner.admin.instance.v1.InstanceAdmin.ListInstancePartitions].
-type ListInstancePartitionsRequest struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
- // Required. The instance whose instance partitions should be listed. Values
- // are of the form `projects/<project>/instances/<instance>`.
- Parent string `protobuf:"bytes,1,opt,name=parent,proto3" json:"parent,omitempty"`
- // Number of instance partitions to be returned in the response. If 0 or less,
- // defaults to the server's maximum allowed page size.
- PageSize int32 `protobuf:"varint,2,opt,name=page_size,json=pageSize,proto3" json:"page_size,omitempty"`
- // If non-empty, `page_token` should contain a
- // [next_page_token][google.spanner.admin.instance.v1.ListInstancePartitionsResponse.next_page_token]
- // from a previous
- // [ListInstancePartitionsResponse][google.spanner.admin.instance.v1.ListInstancePartitionsResponse].
- PageToken string `protobuf:"bytes,3,opt,name=page_token,json=pageToken,proto3" json:"page_token,omitempty"`
- // Optional. Deadline used while retrieving metadata for instance partitions.
- // Instance partitions whose metadata cannot be retrieved within this deadline
- // will be added to
- // [unreachable][google.spanner.admin.instance.v1.ListInstancePartitionsResponse.unreachable]
- // in
- // [ListInstancePartitionsResponse][google.spanner.admin.instance.v1.ListInstancePartitionsResponse].
- InstancePartitionDeadline *timestamppb.Timestamp `protobuf:"bytes,4,opt,name=instance_partition_deadline,json=instancePartitionDeadline,proto3" json:"instance_partition_deadline,omitempty"`
-}
-
-func (x *ListInstancePartitionsRequest) Reset() {
- *x = ListInstancePartitionsRequest{}
- if protoimpl.UnsafeEnabled {
- mi := &file_google_spanner_admin_instance_v1_spanner_instance_admin_proto_msgTypes[29]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
-}
-
-func (x *ListInstancePartitionsRequest) String() string {
- return protoimpl.X.MessageStringOf(x)
-}
-
-func (*ListInstancePartitionsRequest) ProtoMessage() {}
-
-func (x *ListInstancePartitionsRequest) ProtoReflect() protoreflect.Message {
- mi := &file_google_spanner_admin_instance_v1_spanner_instance_admin_proto_msgTypes[29]
- if protoimpl.UnsafeEnabled && x != nil {
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- if ms.LoadMessageInfo() == nil {
- ms.StoreMessageInfo(mi)
- }
- return ms
- }
- return mi.MessageOf(x)
-}
-
-// Deprecated: Use ListInstancePartitionsRequest.ProtoReflect.Descriptor instead.
-func (*ListInstancePartitionsRequest) Descriptor() ([]byte, []int) {
- return file_google_spanner_admin_instance_v1_spanner_instance_admin_proto_rawDescGZIP(), []int{29}
-}
-
-func (x *ListInstancePartitionsRequest) GetParent() string {
- if x != nil {
- return x.Parent
- }
- return ""
-}
-
-func (x *ListInstancePartitionsRequest) GetPageSize() int32 {
- if x != nil {
- return x.PageSize
- }
- return 0
-}
-
-func (x *ListInstancePartitionsRequest) GetPageToken() string {
- if x != nil {
- return x.PageToken
- }
- return ""
-}
-
-func (x *ListInstancePartitionsRequest) GetInstancePartitionDeadline() *timestamppb.Timestamp {
- if x != nil {
- return x.InstancePartitionDeadline
- }
- return nil
-}
-
-// The response for
-// [ListInstancePartitions][google.spanner.admin.instance.v1.InstanceAdmin.ListInstancePartitions].
-type ListInstancePartitionsResponse struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
- // The list of requested instancePartitions.
- InstancePartitions []*InstancePartition `protobuf:"bytes,1,rep,name=instance_partitions,json=instancePartitions,proto3" json:"instance_partitions,omitempty"`
- // `next_page_token` can be sent in a subsequent
- // [ListInstancePartitions][google.spanner.admin.instance.v1.InstanceAdmin.ListInstancePartitions]
- // call to fetch more of the matching instance partitions.
- NextPageToken string `protobuf:"bytes,2,opt,name=next_page_token,json=nextPageToken,proto3" json:"next_page_token,omitempty"`
- // The list of unreachable instance partitions.
- // It includes the names of instance partitions whose metadata could
- // not be retrieved within
- // [instance_partition_deadline][google.spanner.admin.instance.v1.ListInstancePartitionsRequest.instance_partition_deadline].
- Unreachable []string `protobuf:"bytes,3,rep,name=unreachable,proto3" json:"unreachable,omitempty"`
-}
-
-func (x *ListInstancePartitionsResponse) Reset() {
- *x = ListInstancePartitionsResponse{}
- if protoimpl.UnsafeEnabled {
- mi := &file_google_spanner_admin_instance_v1_spanner_instance_admin_proto_msgTypes[30]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
-}
-
-func (x *ListInstancePartitionsResponse) String() string {
- return protoimpl.X.MessageStringOf(x)
-}
-
-func (*ListInstancePartitionsResponse) ProtoMessage() {}
-
-func (x *ListInstancePartitionsResponse) ProtoReflect() protoreflect.Message {
- mi := &file_google_spanner_admin_instance_v1_spanner_instance_admin_proto_msgTypes[30]
- if protoimpl.UnsafeEnabled && x != nil {
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- if ms.LoadMessageInfo() == nil {
- ms.StoreMessageInfo(mi)
- }
- return ms
- }
- return mi.MessageOf(x)
-}
-
-// Deprecated: Use ListInstancePartitionsResponse.ProtoReflect.Descriptor instead.
-func (*ListInstancePartitionsResponse) Descriptor() ([]byte, []int) {
- return file_google_spanner_admin_instance_v1_spanner_instance_admin_proto_rawDescGZIP(), []int{30}
-}
-
-func (x *ListInstancePartitionsResponse) GetInstancePartitions() []*InstancePartition {
- if x != nil {
- return x.InstancePartitions
- }
- return nil
-}
-
-func (x *ListInstancePartitionsResponse) GetNextPageToken() string {
- if x != nil {
- return x.NextPageToken
- }
- return ""
-}
-
-func (x *ListInstancePartitionsResponse) GetUnreachable() []string {
- if x != nil {
- return x.Unreachable
- }
- return nil
-}
-
-// The request for
-// [ListInstancePartitionOperations][google.spanner.admin.instance.v1.InstanceAdmin.ListInstancePartitionOperations].
-type ListInstancePartitionOperationsRequest struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
- // Required. The parent instance of the instance partition operations.
- // Values are of the form `projects/<project>/instances/<instance>`.
- Parent string `protobuf:"bytes,1,opt,name=parent,proto3" json:"parent,omitempty"`
- // Optional. An expression that filters the list of returned operations.
- //
- // A filter expression consists of a field name, a
- // comparison operator, and a value for filtering.
- // The value must be a string, a number, or a boolean. The comparison operator
- // must be one of: `<`, `>`, `<=`, `>=`, `!=`, `=`, or `:`.
- // Colon `:` is the contains operator. Filter rules are not case sensitive.
- //
- // The following fields in the [Operation][google.longrunning.Operation]
- // are eligible for filtering:
- //
- // - `name` - The name of the long-running operation
- // - `done` - False if the operation is in progress, else true.
- // - `metadata.@type` - the type of metadata. For example, the type string
- // for
- // [CreateInstancePartitionMetadata][google.spanner.admin.instance.v1.CreateInstancePartitionMetadata]
- // is
- // `type.googleapis.com/google.spanner.admin.instance.v1.CreateInstancePartitionMetadata`.
- // - `metadata.<field_name>` - any field in metadata.value.
- // `metadata.@type` must be specified first, if filtering on metadata
- // fields.
- // - `error` - Error associated with the long-running operation.
- // - `response.@type` - the type of response.
- // - `response.<field_name>` - any field in response.value.
- //
- // You can combine multiple expressions by enclosing each expression in
- // parentheses. By default, expressions are combined with AND logic. However,
- // you can specify AND, OR, and NOT logic explicitly.
- //
- // Here are a few examples:
- //
- // - `done:true` - The operation is complete.
- // - `(metadata.@type=` \
- // `type.googleapis.com/google.spanner.admin.instance.v1.CreateInstancePartitionMetadata)
- // AND` \
- // `(metadata.instance_partition.name:custom-instance-partition) AND` \
- // `(metadata.start_time < \"2021-03-28T14:50:00Z\") AND` \
- // `(error:*)` - Return operations where:
- // - The operation's metadata type is
- // [CreateInstancePartitionMetadata][google.spanner.admin.instance.v1.CreateInstancePartitionMetadata].
- // - The instance partition name contains "custom-instance-partition".
- // - The operation started before 2021-03-28T14:50:00Z.
- // - The operation resulted in an error.
- Filter string `protobuf:"bytes,2,opt,name=filter,proto3" json:"filter,omitempty"`
- // Optional. Number of operations to be returned in the response. If 0 or
- // less, defaults to the server's maximum allowed page size.
- PageSize int32 `protobuf:"varint,3,opt,name=page_size,json=pageSize,proto3" json:"page_size,omitempty"`
- // Optional. If non-empty, `page_token` should contain a
- // [next_page_token][google.spanner.admin.instance.v1.ListInstancePartitionOperationsResponse.next_page_token]
- // from a previous
- // [ListInstancePartitionOperationsResponse][google.spanner.admin.instance.v1.ListInstancePartitionOperationsResponse]
- // to the same `parent` and with the same `filter`.
- PageToken string `protobuf:"bytes,4,opt,name=page_token,json=pageToken,proto3" json:"page_token,omitempty"`
- // Optional. Deadline used while retrieving metadata for instance partition
- // operations. Instance partitions whose operation metadata cannot be
- // retrieved within this deadline will be added to
- // [unreachable][ListInstancePartitionOperationsResponse.unreachable] in
- // [ListInstancePartitionOperationsResponse][google.spanner.admin.instance.v1.ListInstancePartitionOperationsResponse].
- InstancePartitionDeadline *timestamppb.Timestamp `protobuf:"bytes,5,opt,name=instance_partition_deadline,json=instancePartitionDeadline,proto3" json:"instance_partition_deadline,omitempty"`
-}
-
-func (x *ListInstancePartitionOperationsRequest) Reset() {
- *x = ListInstancePartitionOperationsRequest{}
- if protoimpl.UnsafeEnabled {
- mi := &file_google_spanner_admin_instance_v1_spanner_instance_admin_proto_msgTypes[31]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
-}
-
-func (x *ListInstancePartitionOperationsRequest) String() string {
- return protoimpl.X.MessageStringOf(x)
-}
-
-func (*ListInstancePartitionOperationsRequest) ProtoMessage() {}
-
-func (x *ListInstancePartitionOperationsRequest) ProtoReflect() protoreflect.Message {
- mi := &file_google_spanner_admin_instance_v1_spanner_instance_admin_proto_msgTypes[31]
- if protoimpl.UnsafeEnabled && x != nil {
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- if ms.LoadMessageInfo() == nil {
- ms.StoreMessageInfo(mi)
- }
- return ms
- }
- return mi.MessageOf(x)
-}
-
-// Deprecated: Use ListInstancePartitionOperationsRequest.ProtoReflect.Descriptor instead.
-func (*ListInstancePartitionOperationsRequest) Descriptor() ([]byte, []int) {
- return file_google_spanner_admin_instance_v1_spanner_instance_admin_proto_rawDescGZIP(), []int{31}
-}
-
-func (x *ListInstancePartitionOperationsRequest) GetParent() string {
- if x != nil {
- return x.Parent
- }
- return ""
-}
-
-func (x *ListInstancePartitionOperationsRequest) GetFilter() string {
- if x != nil {
- return x.Filter
- }
- return ""
-}
-
-func (x *ListInstancePartitionOperationsRequest) GetPageSize() int32 {
- if x != nil {
- return x.PageSize
- }
- return 0
-}
-
-func (x *ListInstancePartitionOperationsRequest) GetPageToken() string {
- if x != nil {
- return x.PageToken
- }
- return ""
-}
-
-func (x *ListInstancePartitionOperationsRequest) GetInstancePartitionDeadline() *timestamppb.Timestamp {
- if x != nil {
- return x.InstancePartitionDeadline
- }
- return nil
-}
-
-// The response for
-// [ListInstancePartitionOperations][google.spanner.admin.instance.v1.InstanceAdmin.ListInstancePartitionOperations].
-type ListInstancePartitionOperationsResponse struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
- // The list of matching instance partition [long-running
- // operations][google.longrunning.Operation]. Each operation's name will be
- // prefixed by the instance partition's name. The operation's
- // [metadata][google.longrunning.Operation.metadata] field type
- // `metadata.type_url` describes the type of the metadata.
- Operations []*longrunningpb.Operation `protobuf:"bytes,1,rep,name=operations,proto3" json:"operations,omitempty"`
- // `next_page_token` can be sent in a subsequent
- // [ListInstancePartitionOperations][google.spanner.admin.instance.v1.InstanceAdmin.ListInstancePartitionOperations]
- // call to fetch more of the matching metadata.
- NextPageToken string `protobuf:"bytes,2,opt,name=next_page_token,json=nextPageToken,proto3" json:"next_page_token,omitempty"`
- // The list of unreachable instance partitions.
- // It includes the names of instance partitions whose operation metadata could
- // not be retrieved within
- // [instance_partition_deadline][google.spanner.admin.instance.v1.ListInstancePartitionOperationsRequest.instance_partition_deadline].
- UnreachableInstancePartitions []string `protobuf:"bytes,3,rep,name=unreachable_instance_partitions,json=unreachableInstancePartitions,proto3" json:"unreachable_instance_partitions,omitempty"`
-}
-
-func (x *ListInstancePartitionOperationsResponse) Reset() {
- *x = ListInstancePartitionOperationsResponse{}
- if protoimpl.UnsafeEnabled {
- mi := &file_google_spanner_admin_instance_v1_spanner_instance_admin_proto_msgTypes[32]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
-}
-
-func (x *ListInstancePartitionOperationsResponse) String() string {
- return protoimpl.X.MessageStringOf(x)
-}
-
-func (*ListInstancePartitionOperationsResponse) ProtoMessage() {}
-
-func (x *ListInstancePartitionOperationsResponse) ProtoReflect() protoreflect.Message {
- mi := &file_google_spanner_admin_instance_v1_spanner_instance_admin_proto_msgTypes[32]
- if protoimpl.UnsafeEnabled && x != nil {
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- if ms.LoadMessageInfo() == nil {
- ms.StoreMessageInfo(mi)
- }
- return ms
- }
- return mi.MessageOf(x)
-}
-
-// Deprecated: Use ListInstancePartitionOperationsResponse.ProtoReflect.Descriptor instead.
-func (*ListInstancePartitionOperationsResponse) Descriptor() ([]byte, []int) {
- return file_google_spanner_admin_instance_v1_spanner_instance_admin_proto_rawDescGZIP(), []int{32}
-}
-
-func (x *ListInstancePartitionOperationsResponse) GetOperations() []*longrunningpb.Operation {
- if x != nil {
- return x.Operations
- }
- return nil
-}
-
-func (x *ListInstancePartitionOperationsResponse) GetNextPageToken() string {
- if x != nil {
- return x.NextPageToken
- }
- return ""
-}
-
-func (x *ListInstancePartitionOperationsResponse) GetUnreachableInstancePartitions() []string {
- if x != nil {
- return x.UnreachableInstancePartitions
- }
- return nil
-}
-
-// The request for
-// [MoveInstance][google.spanner.admin.instance.v1.InstanceAdmin.MoveInstance].
-type MoveInstanceRequest struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
- // Required. The instance to move.
- // Values are of the form `projects/<project>/instances/<instance>`.
- Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"`
- // Required. The target instance configuration where to move the instance.
- // Values are of the form `projects/<project>/instanceConfigs/<config>`.
- TargetConfig string `protobuf:"bytes,2,opt,name=target_config,json=targetConfig,proto3" json:"target_config,omitempty"`
-}
-
-func (x *MoveInstanceRequest) Reset() {
- *x = MoveInstanceRequest{}
- if protoimpl.UnsafeEnabled {
- mi := &file_google_spanner_admin_instance_v1_spanner_instance_admin_proto_msgTypes[33]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
-}
-
-func (x *MoveInstanceRequest) String() string {
- return protoimpl.X.MessageStringOf(x)
-}
-
-func (*MoveInstanceRequest) ProtoMessage() {}
-
-func (x *MoveInstanceRequest) ProtoReflect() protoreflect.Message {
- mi := &file_google_spanner_admin_instance_v1_spanner_instance_admin_proto_msgTypes[33]
- if protoimpl.UnsafeEnabled && x != nil {
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- if ms.LoadMessageInfo() == nil {
- ms.StoreMessageInfo(mi)
- }
- return ms
- }
- return mi.MessageOf(x)
-}
-
-// Deprecated: Use MoveInstanceRequest.ProtoReflect.Descriptor instead.
-func (*MoveInstanceRequest) Descriptor() ([]byte, []int) {
- return file_google_spanner_admin_instance_v1_spanner_instance_admin_proto_rawDescGZIP(), []int{33}
-}
-
-func (x *MoveInstanceRequest) GetName() string {
- if x != nil {
- return x.Name
- }
- return ""
-}
-
-func (x *MoveInstanceRequest) GetTargetConfig() string {
- if x != nil {
- return x.TargetConfig
- }
- return ""
-}
-
-// The response for
-// [MoveInstance][google.spanner.admin.instance.v1.InstanceAdmin.MoveInstance].
-type MoveInstanceResponse struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-}
-
-func (x *MoveInstanceResponse) Reset() {
- *x = MoveInstanceResponse{}
- if protoimpl.UnsafeEnabled {
- mi := &file_google_spanner_admin_instance_v1_spanner_instance_admin_proto_msgTypes[34]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
-}
-
-func (x *MoveInstanceResponse) String() string {
- return protoimpl.X.MessageStringOf(x)
-}
-
-func (*MoveInstanceResponse) ProtoMessage() {}
-
-func (x *MoveInstanceResponse) ProtoReflect() protoreflect.Message {
- mi := &file_google_spanner_admin_instance_v1_spanner_instance_admin_proto_msgTypes[34]
- if protoimpl.UnsafeEnabled && x != nil {
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- if ms.LoadMessageInfo() == nil {
- ms.StoreMessageInfo(mi)
- }
- return ms
- }
- return mi.MessageOf(x)
-}
-
-// Deprecated: Use MoveInstanceResponse.ProtoReflect.Descriptor instead.
-func (*MoveInstanceResponse) Descriptor() ([]byte, []int) {
- return file_google_spanner_admin_instance_v1_spanner_instance_admin_proto_rawDescGZIP(), []int{34}
-}
-
-// Metadata type for the operation returned by
-// [MoveInstance][google.spanner.admin.instance.v1.InstanceAdmin.MoveInstance].
-type MoveInstanceMetadata struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
- // The target instance configuration where to move the instance.
- // Values are of the form `projects/<project>/instanceConfigs/<config>`.
- TargetConfig string `protobuf:"bytes,1,opt,name=target_config,json=targetConfig,proto3" json:"target_config,omitempty"`
- // The progress of the
- // [MoveInstance][google.spanner.admin.instance.v1.InstanceAdmin.MoveInstance]
- // operation.
- // [progress_percent][google.spanner.admin.instance.v1.OperationProgress.progress_percent]
- // is reset when cancellation is requested.
- Progress *OperationProgress `protobuf:"bytes,2,opt,name=progress,proto3" json:"progress,omitempty"`
- // The time at which this operation was cancelled.
- CancelTime *timestamppb.Timestamp `protobuf:"bytes,3,opt,name=cancel_time,json=cancelTime,proto3" json:"cancel_time,omitempty"`
-}
-
-func (x *MoveInstanceMetadata) Reset() {
- *x = MoveInstanceMetadata{}
- if protoimpl.UnsafeEnabled {
- mi := &file_google_spanner_admin_instance_v1_spanner_instance_admin_proto_msgTypes[35]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
-}
-
-func (x *MoveInstanceMetadata) String() string {
- return protoimpl.X.MessageStringOf(x)
-}
-
-func (*MoveInstanceMetadata) ProtoMessage() {}
-
-func (x *MoveInstanceMetadata) ProtoReflect() protoreflect.Message {
- mi := &file_google_spanner_admin_instance_v1_spanner_instance_admin_proto_msgTypes[35]
- if protoimpl.UnsafeEnabled && x != nil {
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- if ms.LoadMessageInfo() == nil {
- ms.StoreMessageInfo(mi)
- }
- return ms
- }
- return mi.MessageOf(x)
-}
-
-// Deprecated: Use MoveInstanceMetadata.ProtoReflect.Descriptor instead.
-func (*MoveInstanceMetadata) Descriptor() ([]byte, []int) {
- return file_google_spanner_admin_instance_v1_spanner_instance_admin_proto_rawDescGZIP(), []int{35}
-}
-
-func (x *MoveInstanceMetadata) GetTargetConfig() string {
- if x != nil {
- return x.TargetConfig
- }
- return ""
-}
-
-func (x *MoveInstanceMetadata) GetProgress() *OperationProgress {
- if x != nil {
- return x.Progress
- }
- return nil
-}
-
-func (x *MoveInstanceMetadata) GetCancelTime() *timestamppb.Timestamp {
- if x != nil {
- return x.CancelTime
- }
- return nil
-}
-
-// The autoscaling limits for the instance. Users can define the minimum and
-// maximum compute capacity allocated to the instance, and the autoscaler will
-// only scale within that range. Users can either use nodes or processing
-// units to specify the limits, but should use the same unit to set both the
-// min_limit and max_limit.
-type AutoscalingConfig_AutoscalingLimits struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
- // The minimum compute capacity for the instance.
- //
- // Types that are assignable to MinLimit:
- //
- // *AutoscalingConfig_AutoscalingLimits_MinNodes
- // *AutoscalingConfig_AutoscalingLimits_MinProcessingUnits
- MinLimit isAutoscalingConfig_AutoscalingLimits_MinLimit `protobuf_oneof:"min_limit"`
- // The maximum compute capacity for the instance. The maximum compute
- // capacity should be less than or equal to 10X the minimum compute
- // capacity.
- //
- // Types that are assignable to MaxLimit:
- //
- // *AutoscalingConfig_AutoscalingLimits_MaxNodes
- // *AutoscalingConfig_AutoscalingLimits_MaxProcessingUnits
- MaxLimit isAutoscalingConfig_AutoscalingLimits_MaxLimit `protobuf_oneof:"max_limit"`
-}
-
-func (x *AutoscalingConfig_AutoscalingLimits) Reset() {
- *x = AutoscalingConfig_AutoscalingLimits{}
- if protoimpl.UnsafeEnabled {
- mi := &file_google_spanner_admin_instance_v1_spanner_instance_admin_proto_msgTypes[37]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
-}
-
-func (x *AutoscalingConfig_AutoscalingLimits) String() string {
- return protoimpl.X.MessageStringOf(x)
-}
-
-func (*AutoscalingConfig_AutoscalingLimits) ProtoMessage() {}
-
-func (x *AutoscalingConfig_AutoscalingLimits) ProtoReflect() protoreflect.Message {
- mi := &file_google_spanner_admin_instance_v1_spanner_instance_admin_proto_msgTypes[37]
- if protoimpl.UnsafeEnabled && x != nil {
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- if ms.LoadMessageInfo() == nil {
- ms.StoreMessageInfo(mi)
- }
- return ms
- }
- return mi.MessageOf(x)
-}
-
-// Deprecated: Use AutoscalingConfig_AutoscalingLimits.ProtoReflect.Descriptor instead.
-func (*AutoscalingConfig_AutoscalingLimits) Descriptor() ([]byte, []int) {
- return file_google_spanner_admin_instance_v1_spanner_instance_admin_proto_rawDescGZIP(), []int{2, 0}
-}
-
-func (m *AutoscalingConfig_AutoscalingLimits) GetMinLimit() isAutoscalingConfig_AutoscalingLimits_MinLimit {
- if m != nil {
- return m.MinLimit
- }
- return nil
-}
-
-func (x *AutoscalingConfig_AutoscalingLimits) GetMinNodes() int32 {
- if x, ok := x.GetMinLimit().(*AutoscalingConfig_AutoscalingLimits_MinNodes); ok {
- return x.MinNodes
- }
- return 0
-}
-
-func (x *AutoscalingConfig_AutoscalingLimits) GetMinProcessingUnits() int32 {
- if x, ok := x.GetMinLimit().(*AutoscalingConfig_AutoscalingLimits_MinProcessingUnits); ok {
- return x.MinProcessingUnits
- }
- return 0
-}
-
-func (m *AutoscalingConfig_AutoscalingLimits) GetMaxLimit() isAutoscalingConfig_AutoscalingLimits_MaxLimit {
- if m != nil {
- return m.MaxLimit
- }
- return nil
-}
-
-func (x *AutoscalingConfig_AutoscalingLimits) GetMaxNodes() int32 {
- if x, ok := x.GetMaxLimit().(*AutoscalingConfig_AutoscalingLimits_MaxNodes); ok {
- return x.MaxNodes
- }
- return 0
-}
-
-func (x *AutoscalingConfig_AutoscalingLimits) GetMaxProcessingUnits() int32 {
- if x, ok := x.GetMaxLimit().(*AutoscalingConfig_AutoscalingLimits_MaxProcessingUnits); ok {
- return x.MaxProcessingUnits
- }
- return 0
-}
-
-type isAutoscalingConfig_AutoscalingLimits_MinLimit interface {
- isAutoscalingConfig_AutoscalingLimits_MinLimit()
-}
-
-type AutoscalingConfig_AutoscalingLimits_MinNodes struct {
- // Minimum number of nodes allocated to the instance. If set, this number
- // should be greater than or equal to 1.
- MinNodes int32 `protobuf:"varint,1,opt,name=min_nodes,json=minNodes,proto3,oneof"`
-}
-
-type AutoscalingConfig_AutoscalingLimits_MinProcessingUnits struct {
- // Minimum number of processing units allocated to the instance. If set,
- // this number should be multiples of 1000.
- MinProcessingUnits int32 `protobuf:"varint,2,opt,name=min_processing_units,json=minProcessingUnits,proto3,oneof"`
-}
-
-func (*AutoscalingConfig_AutoscalingLimits_MinNodes) isAutoscalingConfig_AutoscalingLimits_MinLimit() {
-}
-
-func (*AutoscalingConfig_AutoscalingLimits_MinProcessingUnits) isAutoscalingConfig_AutoscalingLimits_MinLimit() {
-}
-
-type isAutoscalingConfig_AutoscalingLimits_MaxLimit interface {
- isAutoscalingConfig_AutoscalingLimits_MaxLimit()
-}
-
-type AutoscalingConfig_AutoscalingLimits_MaxNodes struct {
- // Maximum number of nodes allocated to the instance. If set, this number
- // should be greater than or equal to min_nodes.
- MaxNodes int32 `protobuf:"varint,3,opt,name=max_nodes,json=maxNodes,proto3,oneof"`
-}
-
-type AutoscalingConfig_AutoscalingLimits_MaxProcessingUnits struct {
- // Maximum number of processing units allocated to the instance. If set,
- // this number should be multiples of 1000 and be greater than or equal to
- // min_processing_units.
- MaxProcessingUnits int32 `protobuf:"varint,4,opt,name=max_processing_units,json=maxProcessingUnits,proto3,oneof"`
-}
-
-func (*AutoscalingConfig_AutoscalingLimits_MaxNodes) isAutoscalingConfig_AutoscalingLimits_MaxLimit() {
-}
-
-func (*AutoscalingConfig_AutoscalingLimits_MaxProcessingUnits) isAutoscalingConfig_AutoscalingLimits_MaxLimit() {
-}
-
-// The autoscaling targets for an instance.
-type AutoscalingConfig_AutoscalingTargets struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
- // Required. The target high priority cpu utilization percentage that the
- // autoscaler should be trying to achieve for the instance. This number is
- // on a scale from 0 (no utilization) to 100 (full utilization). The valid
- // range is [10, 90] inclusive.
- HighPriorityCpuUtilizationPercent int32 `protobuf:"varint,1,opt,name=high_priority_cpu_utilization_percent,json=highPriorityCpuUtilizationPercent,proto3" json:"high_priority_cpu_utilization_percent,omitempty"`
- // Required. The target storage utilization percentage that the autoscaler
- // should be trying to achieve for the instance. This number is on a scale
- // from 0 (no utilization) to 100 (full utilization). The valid range is
- // [10, 100] inclusive.
- StorageUtilizationPercent int32 `protobuf:"varint,2,opt,name=storage_utilization_percent,json=storageUtilizationPercent,proto3" json:"storage_utilization_percent,omitempty"`
-}
-
-func (x *AutoscalingConfig_AutoscalingTargets) Reset() {
- *x = AutoscalingConfig_AutoscalingTargets{}
- if protoimpl.UnsafeEnabled {
- mi := &file_google_spanner_admin_instance_v1_spanner_instance_admin_proto_msgTypes[38]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
-}
-
-func (x *AutoscalingConfig_AutoscalingTargets) String() string {
- return protoimpl.X.MessageStringOf(x)
-}
-
-func (*AutoscalingConfig_AutoscalingTargets) ProtoMessage() {}
-
-func (x *AutoscalingConfig_AutoscalingTargets) ProtoReflect() protoreflect.Message {
- mi := &file_google_spanner_admin_instance_v1_spanner_instance_admin_proto_msgTypes[38]
- if protoimpl.UnsafeEnabled && x != nil {
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- if ms.LoadMessageInfo() == nil {
- ms.StoreMessageInfo(mi)
- }
- return ms
- }
- return mi.MessageOf(x)
-}
-
-// Deprecated: Use AutoscalingConfig_AutoscalingTargets.ProtoReflect.Descriptor instead.
-func (*AutoscalingConfig_AutoscalingTargets) Descriptor() ([]byte, []int) {
- return file_google_spanner_admin_instance_v1_spanner_instance_admin_proto_rawDescGZIP(), []int{2, 1}
-}
-
-func (x *AutoscalingConfig_AutoscalingTargets) GetHighPriorityCpuUtilizationPercent() int32 {
- if x != nil {
- return x.HighPriorityCpuUtilizationPercent
- }
- return 0
-}
-
-func (x *AutoscalingConfig_AutoscalingTargets) GetStorageUtilizationPercent() int32 {
- if x != nil {
- return x.StorageUtilizationPercent
- }
- return 0
-}
-
-var File_google_spanner_admin_instance_v1_spanner_instance_admin_proto protoreflect.FileDescriptor
-
-var file_google_spanner_admin_instance_v1_spanner_instance_admin_proto_rawDesc = []byte{
- 0x0a, 0x3d, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x73, 0x70, 0x61, 0x6e, 0x6e, 0x65, 0x72,
- 0x2f, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2f, 0x69, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x2f,
- 0x76, 0x31, 0x2f, 0x73, 0x70, 0x61, 0x6e, 0x6e, 0x65, 0x72, 0x5f, 0x69, 0x6e, 0x73, 0x74, 0x61,
- 0x6e, 0x63, 0x65, 0x5f, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12,
- 0x20, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x70, 0x61, 0x6e, 0x6e, 0x65, 0x72, 0x2e,
- 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x69, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x2e, 0x76,
- 0x31, 0x1a, 0x1c, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x61, 0x6e,
- 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a,
- 0x17, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x63, 0x6c, 0x69, 0x65,
- 0x6e, 0x74, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65,
- 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x5f, 0x62, 0x65, 0x68, 0x61, 0x76,
- 0x69, 0x6f, 0x72, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x19, 0x67, 0x6f, 0x6f, 0x67, 0x6c,
- 0x65, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x2e, 0x70,
- 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x69, 0x61, 0x6d,
- 0x2f, 0x76, 0x31, 0x2f, 0x69, 0x61, 0x6d, 0x5f, 0x70, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x2e, 0x70,
- 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1a, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x69, 0x61, 0x6d,
- 0x2f, 0x76, 0x31, 0x2f, 0x70, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f,
- 0x1a, 0x23, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x6c, 0x6f, 0x6e, 0x67, 0x72, 0x75, 0x6e,
- 0x6e, 0x69, 0x6e, 0x67, 0x2f, 0x6f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e,
- 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1b, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72,
- 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x65, 0x6d, 0x70, 0x74, 0x79, 0x2e, 0x70, 0x72, 0x6f,
- 0x74, 0x6f, 0x1a, 0x20, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f,
- 0x62, 0x75, 0x66, 0x2f, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x5f, 0x6d, 0x61, 0x73, 0x6b, 0x2e, 0x70,
- 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f,
- 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x2e,
- 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x2d, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x73, 0x70,
- 0x61, 0x6e, 0x6e, 0x65, 0x72, 0x2f, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2f, 0x69, 0x6e, 0x73, 0x74,
- 0x61, 0x6e, 0x63, 0x65, 0x2f, 0x76, 0x31, 0x2f, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x2e, 0x70,
- 0x72, 0x6f, 0x74, 0x6f, 0x22, 0x81, 0x02, 0x0a, 0x0b, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61,
- 0x49, 0x6e, 0x66, 0x6f, 0x12, 0x1a, 0x0a, 0x08, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e,
- 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e,
- 0x12, 0x4d, 0x0a, 0x04, 0x74, 0x79, 0x70, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x39,
- 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x70, 0x61, 0x6e, 0x6e, 0x65, 0x72, 0x2e,
- 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x69, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x2e, 0x76,
- 0x31, 0x2e, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x49, 0x6e, 0x66, 0x6f, 0x2e, 0x52, 0x65,
- 0x70, 0x6c, 0x69, 0x63, 0x61, 0x54, 0x79, 0x70, 0x65, 0x52, 0x04, 0x74, 0x79, 0x70, 0x65, 0x12,
- 0x36, 0x0a, 0x17, 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x5f, 0x6c, 0x65, 0x61, 0x64, 0x65,
- 0x72, 0x5f, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x08,
- 0x52, 0x15, 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x4c, 0x65, 0x61, 0x64, 0x65, 0x72, 0x4c,
- 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x22, 0x4f, 0x0a, 0x0b, 0x52, 0x65, 0x70, 0x6c, 0x69,
- 0x63, 0x61, 0x54, 0x79, 0x70, 0x65, 0x12, 0x14, 0x0a, 0x10, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x55,
- 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, 0x49, 0x45, 0x44, 0x10, 0x00, 0x12, 0x0e, 0x0a, 0x0a,
- 0x52, 0x45, 0x41, 0x44, 0x5f, 0x57, 0x52, 0x49, 0x54, 0x45, 0x10, 0x01, 0x12, 0x0d, 0x0a, 0x09,
- 0x52, 0x45, 0x41, 0x44, 0x5f, 0x4f, 0x4e, 0x4c, 0x59, 0x10, 0x02, 0x12, 0x0b, 0x0a, 0x07, 0x57,
- 0x49, 0x54, 0x4e, 0x45, 0x53, 0x53, 0x10, 0x03, 0x22, 0xc2, 0x07, 0x0a, 0x0e, 0x49, 0x6e, 0x73,
- 0x74, 0x61, 0x6e, 0x63, 0x65, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x12, 0x0a, 0x04, 0x6e,
- 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12,
- 0x21, 0x0a, 0x0c, 0x64, 0x69, 0x73, 0x70, 0x6c, 0x61, 0x79, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18,
- 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x64, 0x69, 0x73, 0x70, 0x6c, 0x61, 0x79, 0x4e, 0x61,
- 0x6d, 0x65, 0x12, 0x5b, 0x0a, 0x0b, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x5f, 0x74, 0x79, 0x70,
- 0x65, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x35, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65,
- 0x2e, 0x73, 0x70, 0x61, 0x6e, 0x6e, 0x65, 0x72, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x69,
- 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x49, 0x6e, 0x73, 0x74, 0x61,
- 0x6e, 0x63, 0x65, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x54, 0x79, 0x70, 0x65, 0x42, 0x03,
- 0xe0, 0x41, 0x03, 0x52, 0x0a, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x54, 0x79, 0x70, 0x65, 0x12,
- 0x49, 0x0a, 0x08, 0x72, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28,
- 0x0b, 0x32, 0x2d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x70, 0x61, 0x6e, 0x6e,
- 0x65, 0x72, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x69, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63,
- 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x49, 0x6e, 0x66, 0x6f,
- 0x52, 0x08, 0x72, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x73, 0x12, 0x5f, 0x0a, 0x11, 0x6f, 0x70,
- 0x74, 0x69, 0x6f, 0x6e, 0x61, 0x6c, 0x5f, 0x72, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x73, 0x18,
- 0x06, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x2d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73,
- 0x70, 0x61, 0x6e, 0x6e, 0x65, 0x72, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x69, 0x6e, 0x73,
- 0x74, 0x61, 0x6e, 0x63, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61,
- 0x49, 0x6e, 0x66, 0x6f, 0x42, 0x03, 0xe0, 0x41, 0x03, 0x52, 0x10, 0x6f, 0x70, 0x74, 0x69, 0x6f,
- 0x6e, 0x61, 0x6c, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x73, 0x12, 0x4b, 0x0a, 0x0b, 0x62,
- 0x61, 0x73, 0x65, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x18, 0x07, 0x20, 0x01, 0x28, 0x09,
- 0x42, 0x2a, 0xfa, 0x41, 0x27, 0x0a, 0x25, 0x73, 0x70, 0x61, 0x6e, 0x6e, 0x65, 0x72, 0x2e, 0x67,
- 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x49, 0x6e,
- 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x0a, 0x62, 0x61,
- 0x73, 0x65, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x54, 0x0a, 0x06, 0x6c, 0x61, 0x62, 0x65,
- 0x6c, 0x73, 0x18, 0x08, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x3c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c,
- 0x65, 0x2e, 0x73, 0x70, 0x61, 0x6e, 0x6e, 0x65, 0x72, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e,
- 0x69, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x49, 0x6e, 0x73, 0x74,
- 0x61, 0x6e, 0x63, 0x65, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x4c, 0x61, 0x62, 0x65, 0x6c,
- 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x06, 0x6c, 0x61, 0x62, 0x65, 0x6c, 0x73, 0x12, 0x12,
- 0x0a, 0x04, 0x65, 0x74, 0x61, 0x67, 0x18, 0x09, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x65, 0x74,
- 0x61, 0x67, 0x12, 0x25, 0x0a, 0x0e, 0x6c, 0x65, 0x61, 0x64, 0x65, 0x72, 0x5f, 0x6f, 0x70, 0x74,
- 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x04, 0x20, 0x03, 0x28, 0x09, 0x52, 0x0d, 0x6c, 0x65, 0x61, 0x64,
- 0x65, 0x72, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x25, 0x0a, 0x0b, 0x72, 0x65, 0x63,
- 0x6f, 0x6e, 0x63, 0x69, 0x6c, 0x69, 0x6e, 0x67, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x08, 0x42, 0x03,
- 0xe0, 0x41, 0x03, 0x52, 0x0b, 0x72, 0x65, 0x63, 0x6f, 0x6e, 0x63, 0x69, 0x6c, 0x69, 0x6e, 0x67,
- 0x12, 0x51, 0x0a, 0x05, 0x73, 0x74, 0x61, 0x74, 0x65, 0x18, 0x0b, 0x20, 0x01, 0x28, 0x0e, 0x32,
- 0x36, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x70, 0x61, 0x6e, 0x6e, 0x65, 0x72,
- 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x69, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x2e,
- 0x76, 0x31, 0x2e, 0x49, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x43, 0x6f, 0x6e, 0x66, 0x69,
- 0x67, 0x2e, 0x53, 0x74, 0x61, 0x74, 0x65, 0x42, 0x03, 0xe0, 0x41, 0x03, 0x52, 0x05, 0x73, 0x74,
- 0x61, 0x74, 0x65, 0x1a, 0x39, 0x0a, 0x0b, 0x4c, 0x61, 0x62, 0x65, 0x6c, 0x73, 0x45, 0x6e, 0x74,
- 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52,
- 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20,
- 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x22, 0x42,
- 0x0a, 0x04, 0x54, 0x79, 0x70, 0x65, 0x12, 0x14, 0x0a, 0x10, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x55,
- 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, 0x49, 0x45, 0x44, 0x10, 0x00, 0x12, 0x12, 0x0a, 0x0e,
- 0x47, 0x4f, 0x4f, 0x47, 0x4c, 0x45, 0x5f, 0x4d, 0x41, 0x4e, 0x41, 0x47, 0x45, 0x44, 0x10, 0x01,
- 0x12, 0x10, 0x0a, 0x0c, 0x55, 0x53, 0x45, 0x52, 0x5f, 0x4d, 0x41, 0x4e, 0x41, 0x47, 0x45, 0x44,
- 0x10, 0x02, 0x22, 0x37, 0x0a, 0x05, 0x53, 0x74, 0x61, 0x74, 0x65, 0x12, 0x15, 0x0a, 0x11, 0x53,
- 0x54, 0x41, 0x54, 0x45, 0x5f, 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, 0x49, 0x45, 0x44,
- 0x10, 0x00, 0x12, 0x0c, 0x0a, 0x08, 0x43, 0x52, 0x45, 0x41, 0x54, 0x49, 0x4e, 0x47, 0x10, 0x01,
- 0x12, 0x09, 0x0a, 0x05, 0x52, 0x45, 0x41, 0x44, 0x59, 0x10, 0x02, 0x3a, 0x60, 0xea, 0x41, 0x5d,
- 0x0a, 0x25, 0x73, 0x70, 0x61, 0x6e, 0x6e, 0x65, 0x72, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65,
- 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x49, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63,
- 0x65, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x34, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74,
- 0x73, 0x2f, 0x7b, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x7d, 0x2f, 0x69, 0x6e, 0x73, 0x74,
- 0x61, 0x6e, 0x63, 0x65, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x73, 0x2f, 0x7b, 0x69, 0x6e, 0x73,
- 0x74, 0x61, 0x6e, 0x63, 0x65, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x7d, 0x22, 0x95, 0x05,
- 0x0a, 0x11, 0x41, 0x75, 0x74, 0x6f, 0x73, 0x63, 0x61, 0x6c, 0x69, 0x6e, 0x67, 0x43, 0x6f, 0x6e,
- 0x66, 0x69, 0x67, 0x12, 0x79, 0x0a, 0x12, 0x61, 0x75, 0x74, 0x6f, 0x73, 0x63, 0x61, 0x6c, 0x69,
- 0x6e, 0x67, 0x5f, 0x6c, 0x69, 0x6d, 0x69, 0x74, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32,
- 0x45, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x70, 0x61, 0x6e, 0x6e, 0x65, 0x72,
- 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x69, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x2e,
- 0x76, 0x31, 0x2e, 0x41, 0x75, 0x74, 0x6f, 0x73, 0x63, 0x61, 0x6c, 0x69, 0x6e, 0x67, 0x43, 0x6f,
- 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x41, 0x75, 0x74, 0x6f, 0x73, 0x63, 0x61, 0x6c, 0x69, 0x6e, 0x67,
- 0x4c, 0x69, 0x6d, 0x69, 0x74, 0x73, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x11, 0x61, 0x75, 0x74,
- 0x6f, 0x73, 0x63, 0x61, 0x6c, 0x69, 0x6e, 0x67, 0x4c, 0x69, 0x6d, 0x69, 0x74, 0x73, 0x12, 0x7c,
- 0x0a, 0x13, 0x61, 0x75, 0x74, 0x6f, 0x73, 0x63, 0x61, 0x6c, 0x69, 0x6e, 0x67, 0x5f, 0x74, 0x61,
- 0x72, 0x67, 0x65, 0x74, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x46, 0x2e, 0x67, 0x6f,
- 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x70, 0x61, 0x6e, 0x6e, 0x65, 0x72, 0x2e, 0x61, 0x64, 0x6d,
- 0x69, 0x6e, 0x2e, 0x69, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x41,
- 0x75, 0x74, 0x6f, 0x73, 0x63, 0x61, 0x6c, 0x69, 0x6e, 0x67, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67,
- 0x2e, 0x41, 0x75, 0x74, 0x6f, 0x73, 0x63, 0x61, 0x6c, 0x69, 0x6e, 0x67, 0x54, 0x61, 0x72, 0x67,
- 0x65, 0x74, 0x73, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x12, 0x61, 0x75, 0x74, 0x6f, 0x73, 0x63,
- 0x61, 0x6c, 0x69, 0x6e, 0x67, 0x54, 0x61, 0x72, 0x67, 0x65, 0x74, 0x73, 0x1a, 0xd3, 0x01, 0x0a,
- 0x11, 0x41, 0x75, 0x74, 0x6f, 0x73, 0x63, 0x61, 0x6c, 0x69, 0x6e, 0x67, 0x4c, 0x69, 0x6d, 0x69,
- 0x74, 0x73, 0x12, 0x1d, 0x0a, 0x09, 0x6d, 0x69, 0x6e, 0x5f, 0x6e, 0x6f, 0x64, 0x65, 0x73, 0x18,
- 0x01, 0x20, 0x01, 0x28, 0x05, 0x48, 0x00, 0x52, 0x08, 0x6d, 0x69, 0x6e, 0x4e, 0x6f, 0x64, 0x65,
- 0x73, 0x12, 0x32, 0x0a, 0x14, 0x6d, 0x69, 0x6e, 0x5f, 0x70, 0x72, 0x6f, 0x63, 0x65, 0x73, 0x73,
- 0x69, 0x6e, 0x67, 0x5f, 0x75, 0x6e, 0x69, 0x74, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x05, 0x48,
- 0x00, 0x52, 0x12, 0x6d, 0x69, 0x6e, 0x50, 0x72, 0x6f, 0x63, 0x65, 0x73, 0x73, 0x69, 0x6e, 0x67,
- 0x55, 0x6e, 0x69, 0x74, 0x73, 0x12, 0x1d, 0x0a, 0x09, 0x6d, 0x61, 0x78, 0x5f, 0x6e, 0x6f, 0x64,
- 0x65, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x05, 0x48, 0x01, 0x52, 0x08, 0x6d, 0x61, 0x78, 0x4e,
- 0x6f, 0x64, 0x65, 0x73, 0x12, 0x32, 0x0a, 0x14, 0x6d, 0x61, 0x78, 0x5f, 0x70, 0x72, 0x6f, 0x63,
- 0x65, 0x73, 0x73, 0x69, 0x6e, 0x67, 0x5f, 0x75, 0x6e, 0x69, 0x74, 0x73, 0x18, 0x04, 0x20, 0x01,
- 0x28, 0x05, 0x48, 0x01, 0x52, 0x12, 0x6d, 0x61, 0x78, 0x50, 0x72, 0x6f, 0x63, 0x65, 0x73, 0x73,
- 0x69, 0x6e, 0x67, 0x55, 0x6e, 0x69, 0x74, 0x73, 0x42, 0x0b, 0x0a, 0x09, 0x6d, 0x69, 0x6e, 0x5f,
- 0x6c, 0x69, 0x6d, 0x69, 0x74, 0x42, 0x0b, 0x0a, 0x09, 0x6d, 0x61, 0x78, 0x5f, 0x6c, 0x69, 0x6d,
- 0x69, 0x74, 0x1a, 0xb0, 0x01, 0x0a, 0x12, 0x41, 0x75, 0x74, 0x6f, 0x73, 0x63, 0x61, 0x6c, 0x69,
- 0x6e, 0x67, 0x54, 0x61, 0x72, 0x67, 0x65, 0x74, 0x73, 0x12, 0x55, 0x0a, 0x25, 0x68, 0x69, 0x67,
- 0x68, 0x5f, 0x70, 0x72, 0x69, 0x6f, 0x72, 0x69, 0x74, 0x79, 0x5f, 0x63, 0x70, 0x75, 0x5f, 0x75,
- 0x74, 0x69, 0x6c, 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x70, 0x65, 0x72, 0x63, 0x65,
- 0x6e, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x05, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x21, 0x68,
- 0x69, 0x67, 0x68, 0x50, 0x72, 0x69, 0x6f, 0x72, 0x69, 0x74, 0x79, 0x43, 0x70, 0x75, 0x55, 0x74,
- 0x69, 0x6c, 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x50, 0x65, 0x72, 0x63, 0x65, 0x6e, 0x74,
- 0x12, 0x43, 0x0a, 0x1b, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x5f, 0x75, 0x74, 0x69, 0x6c,
- 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x70, 0x65, 0x72, 0x63, 0x65, 0x6e, 0x74, 0x18,
- 0x02, 0x20, 0x01, 0x28, 0x05, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x19, 0x73, 0x74, 0x6f, 0x72,
- 0x61, 0x67, 0x65, 0x55, 0x74, 0x69, 0x6c, 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x50, 0x65,
- 0x72, 0x63, 0x65, 0x6e, 0x74, 0x22, 0xf8, 0x07, 0x0a, 0x08, 0x49, 0x6e, 0x73, 0x74, 0x61, 0x6e,
- 0x63, 0x65, 0x12, 0x17, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09,
- 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x45, 0x0a, 0x06, 0x63,
- 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x42, 0x2d, 0xe0, 0x41, 0x02,
- 0xfa, 0x41, 0x27, 0x0a, 0x25, 0x73, 0x70, 0x61, 0x6e, 0x6e, 0x65, 0x72, 0x2e, 0x67, 0x6f, 0x6f,
- 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x49, 0x6e, 0x73, 0x74,
- 0x61, 0x6e, 0x63, 0x65, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x06, 0x63, 0x6f, 0x6e, 0x66,
- 0x69, 0x67, 0x12, 0x26, 0x0a, 0x0c, 0x64, 0x69, 0x73, 0x70, 0x6c, 0x61, 0x79, 0x5f, 0x6e, 0x61,
- 0x6d, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x0b, 0x64,
- 0x69, 0x73, 0x70, 0x6c, 0x61, 0x79, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x1d, 0x0a, 0x0a, 0x6e, 0x6f,
- 0x64, 0x65, 0x5f, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x18, 0x05, 0x20, 0x01, 0x28, 0x05, 0x52, 0x09,
- 0x6e, 0x6f, 0x64, 0x65, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x12, 0x29, 0x0a, 0x10, 0x70, 0x72, 0x6f,
- 0x63, 0x65, 0x73, 0x73, 0x69, 0x6e, 0x67, 0x5f, 0x75, 0x6e, 0x69, 0x74, 0x73, 0x18, 0x09, 0x20,
- 0x01, 0x28, 0x05, 0x52, 0x0f, 0x70, 0x72, 0x6f, 0x63, 0x65, 0x73, 0x73, 0x69, 0x6e, 0x67, 0x55,
- 0x6e, 0x69, 0x74, 0x73, 0x12, 0x67, 0x0a, 0x12, 0x61, 0x75, 0x74, 0x6f, 0x73, 0x63, 0x61, 0x6c,
- 0x69, 0x6e, 0x67, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x18, 0x11, 0x20, 0x01, 0x28, 0x0b,
- 0x32, 0x33, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x70, 0x61, 0x6e, 0x6e, 0x65,
- 0x72, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x69, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65,
- 0x2e, 0x76, 0x31, 0x2e, 0x41, 0x75, 0x74, 0x6f, 0x73, 0x63, 0x61, 0x6c, 0x69, 0x6e, 0x67, 0x43,
- 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x42, 0x03, 0xe0, 0x41, 0x01, 0x52, 0x11, 0x61, 0x75, 0x74, 0x6f,
- 0x73, 0x63, 0x61, 0x6c, 0x69, 0x6e, 0x67, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x4b, 0x0a,
- 0x05, 0x73, 0x74, 0x61, 0x74, 0x65, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x30, 0x2e, 0x67,
- 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x70, 0x61, 0x6e, 0x6e, 0x65, 0x72, 0x2e, 0x61, 0x64,
- 0x6d, 0x69, 0x6e, 0x2e, 0x69, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x2e, 0x76, 0x31, 0x2e,
- 0x49, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x2e, 0x53, 0x74, 0x61, 0x74, 0x65, 0x42, 0x03,
- 0xe0, 0x41, 0x03, 0x52, 0x05, 0x73, 0x74, 0x61, 0x74, 0x65, 0x12, 0x4e, 0x0a, 0x06, 0x6c, 0x61,
- 0x62, 0x65, 0x6c, 0x73, 0x18, 0x07, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x36, 0x2e, 0x67, 0x6f, 0x6f,
- 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x70, 0x61, 0x6e, 0x6e, 0x65, 0x72, 0x2e, 0x61, 0x64, 0x6d, 0x69,
- 0x6e, 0x2e, 0x69, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x49, 0x6e,
- 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x2e, 0x4c, 0x61, 0x62, 0x65, 0x6c, 0x73, 0x45, 0x6e, 0x74,
- 0x72, 0x79, 0x52, 0x06, 0x6c, 0x61, 0x62, 0x65, 0x6c, 0x73, 0x12, 0x23, 0x0a, 0x0d, 0x65, 0x6e,
- 0x64, 0x70, 0x6f, 0x69, 0x6e, 0x74, 0x5f, 0x75, 0x72, 0x69, 0x73, 0x18, 0x08, 0x20, 0x03, 0x28,
- 0x09, 0x52, 0x0c, 0x65, 0x6e, 0x64, 0x70, 0x6f, 0x69, 0x6e, 0x74, 0x55, 0x72, 0x69, 0x73, 0x12,
- 0x40, 0x0a, 0x0b, 0x63, 0x72, 0x65, 0x61, 0x74, 0x65, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x18, 0x0b,
- 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72,
- 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70,
- 0x42, 0x03, 0xe0, 0x41, 0x03, 0x52, 0x0a, 0x63, 0x72, 0x65, 0x61, 0x74, 0x65, 0x54, 0x69, 0x6d,
- 0x65, 0x12, 0x40, 0x0a, 0x0b, 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, 0x5f, 0x74, 0x69, 0x6d, 0x65,
- 0x18, 0x0c, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e,
- 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61,
- 0x6d, 0x70, 0x42, 0x03, 0xe0, 0x41, 0x03, 0x52, 0x0a, 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, 0x54,
- 0x69, 0x6d, 0x65, 0x12, 0x51, 0x0a, 0x07, 0x65, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x14,
- 0x20, 0x01, 0x28, 0x0e, 0x32, 0x32, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x70,
- 0x61, 0x6e, 0x6e, 0x65, 0x72, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x69, 0x6e, 0x73, 0x74,
- 0x61, 0x6e, 0x63, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x49, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65,
- 0x2e, 0x45, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x42, 0x03, 0xe0, 0x41, 0x01, 0x52, 0x07, 0x65,
- 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x1a, 0x39, 0x0a, 0x0b, 0x4c, 0x61, 0x62, 0x65, 0x6c, 0x73,
- 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01,
- 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65,
- 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38,
- 0x01, 0x22, 0x37, 0x0a, 0x05, 0x53, 0x74, 0x61, 0x74, 0x65, 0x12, 0x15, 0x0a, 0x11, 0x53, 0x54,
- 0x41, 0x54, 0x45, 0x5f, 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, 0x49, 0x45, 0x44, 0x10,
- 0x00, 0x12, 0x0c, 0x0a, 0x08, 0x43, 0x52, 0x45, 0x41, 0x54, 0x49, 0x4e, 0x47, 0x10, 0x01, 0x12,
- 0x09, 0x0a, 0x05, 0x52, 0x45, 0x41, 0x44, 0x59, 0x10, 0x02, 0x22, 0x55, 0x0a, 0x07, 0x45, 0x64,
- 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x17, 0x0a, 0x13, 0x45, 0x44, 0x49, 0x54, 0x49, 0x4f, 0x4e,
- 0x5f, 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, 0x49, 0x45, 0x44, 0x10, 0x00, 0x12, 0x0c,
- 0x0a, 0x08, 0x53, 0x54, 0x41, 0x4e, 0x44, 0x41, 0x52, 0x44, 0x10, 0x01, 0x12, 0x0e, 0x0a, 0x0a,
- 0x45, 0x4e, 0x54, 0x45, 0x52, 0x50, 0x52, 0x49, 0x53, 0x45, 0x10, 0x02, 0x12, 0x13, 0x0a, 0x0f,
- 0x45, 0x4e, 0x54, 0x45, 0x52, 0x50, 0x52, 0x49, 0x53, 0x45, 0x5f, 0x50, 0x4c, 0x55, 0x53, 0x10,
- 0x03, 0x3a, 0x4d, 0xea, 0x41, 0x4a, 0x0a, 0x1f, 0x73, 0x70, 0x61, 0x6e, 0x6e, 0x65, 0x72, 0x2e,
- 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x49,
- 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x12, 0x27, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74,
- 0x73, 0x2f, 0x7b, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x7d, 0x2f, 0x69, 0x6e, 0x73, 0x74,
- 0x61, 0x6e, 0x63, 0x65, 0x73, 0x2f, 0x7b, 0x69, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x7d,
- 0x22, 0xa5, 0x01, 0x0a, 0x1a, 0x4c, 0x69, 0x73, 0x74, 0x49, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63,
- 0x65, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12,
- 0x4b, 0x0a, 0x06, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42,
- 0x33, 0xe0, 0x41, 0x02, 0xfa, 0x41, 0x2d, 0x0a, 0x2b, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x72, 0x65,
- 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, 0x2e, 0x67, 0x6f,
- 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x50, 0x72, 0x6f,
- 0x6a, 0x65, 0x63, 0x74, 0x52, 0x06, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x12, 0x1b, 0x0a, 0x09,
- 0x70, 0x61, 0x67, 0x65, 0x5f, 0x73, 0x69, 0x7a, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x05, 0x52,
- 0x08, 0x70, 0x61, 0x67, 0x65, 0x53, 0x69, 0x7a, 0x65, 0x12, 0x1d, 0x0a, 0x0a, 0x70, 0x61, 0x67,
- 0x65, 0x5f, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x70,
- 0x61, 0x67, 0x65, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x22, 0xa2, 0x01, 0x0a, 0x1b, 0x4c, 0x69, 0x73,
- 0x74, 0x49, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x73,
- 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x5b, 0x0a, 0x10, 0x69, 0x6e, 0x73, 0x74,
- 0x61, 0x6e, 0x63, 0x65, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x73, 0x18, 0x01, 0x20, 0x03,
- 0x28, 0x0b, 0x32, 0x30, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x70, 0x61, 0x6e,
- 0x6e, 0x65, 0x72, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x69, 0x6e, 0x73, 0x74, 0x61, 0x6e,
- 0x63, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x49, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x43, 0x6f,
- 0x6e, 0x66, 0x69, 0x67, 0x52, 0x0f, 0x69, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x43, 0x6f,
- 0x6e, 0x66, 0x69, 0x67, 0x73, 0x12, 0x26, 0x0a, 0x0f, 0x6e, 0x65, 0x78, 0x74, 0x5f, 0x70, 0x61,
- 0x67, 0x65, 0x5f, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0d,
- 0x6e, 0x65, 0x78, 0x74, 0x50, 0x61, 0x67, 0x65, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x22, 0x5d, 0x0a,
- 0x18, 0x47, 0x65, 0x74, 0x49, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x43, 0x6f, 0x6e, 0x66,
- 0x69, 0x67, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x41, 0x0a, 0x04, 0x6e, 0x61, 0x6d,
- 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x2d, 0xe0, 0x41, 0x02, 0xfa, 0x41, 0x27, 0x0a,
- 0x25, 0x73, 0x70, 0x61, 0x6e, 0x6e, 0x65, 0x72, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61,
- 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x49, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65,
- 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x22, 0xa2, 0x02, 0x0a,
- 0x1b, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x49, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x43,
- 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x4b, 0x0a, 0x06,
- 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x33, 0xe0, 0x41,
- 0x02, 0xfa, 0x41, 0x2d, 0x0a, 0x2b, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x72, 0x65, 0x73, 0x6f, 0x75,
- 0x72, 0x63, 0x65, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c,
- 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x50, 0x72, 0x6f, 0x6a, 0x65, 0x63,
- 0x74, 0x52, 0x06, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x12, 0x31, 0x0a, 0x12, 0x69, 0x6e, 0x73,
- 0x74, 0x61, 0x6e, 0x63, 0x65, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x5f, 0x69, 0x64, 0x18,
- 0x02, 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x10, 0x69, 0x6e, 0x73, 0x74,
- 0x61, 0x6e, 0x63, 0x65, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x49, 0x64, 0x12, 0x5e, 0x0a, 0x0f,
- 0x69, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x18,
- 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x30, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73,
- 0x70, 0x61, 0x6e, 0x6e, 0x65, 0x72, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x69, 0x6e, 0x73,
- 0x74, 0x61, 0x6e, 0x63, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x49, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63,
- 0x65, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x0e, 0x69, 0x6e,
- 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x23, 0x0a, 0x0d,
- 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x5f, 0x6f, 0x6e, 0x6c, 0x79, 0x18, 0x04, 0x20,
- 0x01, 0x28, 0x08, 0x52, 0x0c, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x4f, 0x6e, 0x6c,
- 0x79, 0x22, 0xe4, 0x01, 0x0a, 0x1b, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x49, 0x6e, 0x73, 0x74,
- 0x61, 0x6e, 0x63, 0x65, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73,
- 0x74, 0x12, 0x5e, 0x0a, 0x0f, 0x69, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x5f, 0x63, 0x6f,
- 0x6e, 0x66, 0x69, 0x67, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x30, 0x2e, 0x67, 0x6f, 0x6f,
- 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x70, 0x61, 0x6e, 0x6e, 0x65, 0x72, 0x2e, 0x61, 0x64, 0x6d, 0x69,
- 0x6e, 0x2e, 0x69, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x49, 0x6e,
- 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x42, 0x03, 0xe0, 0x41,
- 0x02, 0x52, 0x0e, 0x69, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x43, 0x6f, 0x6e, 0x66, 0x69,
- 0x67, 0x12, 0x40, 0x0a, 0x0b, 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, 0x5f, 0x6d, 0x61, 0x73, 0x6b,
- 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e,
- 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x4d, 0x61,
- 0x73, 0x6b, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x0a, 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, 0x4d,
- 0x61, 0x73, 0x6b, 0x12, 0x23, 0x0a, 0x0d, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x5f,
- 0x6f, 0x6e, 0x6c, 0x79, 0x18, 0x03, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0c, 0x76, 0x61, 0x6c, 0x69,
- 0x64, 0x61, 0x74, 0x65, 0x4f, 0x6e, 0x6c, 0x79, 0x22, 0x99, 0x01, 0x0a, 0x1b, 0x44, 0x65, 0x6c,
- 0x65, 0x74, 0x65, 0x49, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x43, 0x6f, 0x6e, 0x66, 0x69,
- 0x67, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x41, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65,
- 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x2d, 0xe0, 0x41, 0x02, 0xfa, 0x41, 0x27, 0x0a, 0x25,
- 0x73, 0x70, 0x61, 0x6e, 0x6e, 0x65, 0x72, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70,
- 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x49, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x43,
- 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x12, 0x0a, 0x04, 0x65,
- 0x74, 0x61, 0x67, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x65, 0x74, 0x61, 0x67, 0x12,
- 0x23, 0x0a, 0x0d, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x5f, 0x6f, 0x6e, 0x6c, 0x79,
- 0x18, 0x03, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0c, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65,
- 0x4f, 0x6e, 0x6c, 0x79, 0x22, 0xc6, 0x01, 0x0a, 0x23, 0x4c, 0x69, 0x73, 0x74, 0x49, 0x6e, 0x73,
- 0x74, 0x61, 0x6e, 0x63, 0x65, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x4f, 0x70, 0x65, 0x72, 0x61,
- 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x4b, 0x0a, 0x06,
- 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x33, 0xe0, 0x41,
- 0x02, 0xfa, 0x41, 0x2d, 0x0a, 0x2b, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x72, 0x65, 0x73, 0x6f, 0x75,
- 0x72, 0x63, 0x65, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c,
- 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x50, 0x72, 0x6f, 0x6a, 0x65, 0x63,
- 0x74, 0x52, 0x06, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x12, 0x16, 0x0a, 0x06, 0x66, 0x69, 0x6c,
- 0x74, 0x65, 0x72, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x66, 0x69, 0x6c, 0x74, 0x65,
- 0x72, 0x12, 0x1b, 0x0a, 0x09, 0x70, 0x61, 0x67, 0x65, 0x5f, 0x73, 0x69, 0x7a, 0x65, 0x18, 0x03,
- 0x20, 0x01, 0x28, 0x05, 0x52, 0x08, 0x70, 0x61, 0x67, 0x65, 0x53, 0x69, 0x7a, 0x65, 0x12, 0x1d,
- 0x0a, 0x0a, 0x70, 0x61, 0x67, 0x65, 0x5f, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x18, 0x04, 0x20, 0x01,
- 0x28, 0x09, 0x52, 0x09, 0x70, 0x61, 0x67, 0x65, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x22, 0x8d, 0x01,
- 0x0a, 0x24, 0x4c, 0x69, 0x73, 0x74, 0x49, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x43, 0x6f,
- 0x6e, 0x66, 0x69, 0x67, 0x4f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x65,
- 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x3d, 0x0a, 0x0a, 0x6f, 0x70, 0x65, 0x72, 0x61, 0x74,
- 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1d, 0x2e, 0x67, 0x6f, 0x6f,
- 0x67, 0x6c, 0x65, 0x2e, 0x6c, 0x6f, 0x6e, 0x67, 0x72, 0x75, 0x6e, 0x6e, 0x69, 0x6e, 0x67, 0x2e,
- 0x4f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x0a, 0x6f, 0x70, 0x65, 0x72, 0x61,
- 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x26, 0x0a, 0x0f, 0x6e, 0x65, 0x78, 0x74, 0x5f, 0x70, 0x61,
- 0x67, 0x65, 0x5f, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0d,
- 0x6e, 0x65, 0x78, 0x74, 0x50, 0x61, 0x67, 0x65, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x22, 0x8c, 0x01,
- 0x0a, 0x12, 0x47, 0x65, 0x74, 0x49, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x52, 0x65, 0x71,
- 0x75, 0x65, 0x73, 0x74, 0x12, 0x3b, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01,
- 0x28, 0x09, 0x42, 0x27, 0xe0, 0x41, 0x02, 0xfa, 0x41, 0x21, 0x0a, 0x1f, 0x73, 0x70, 0x61, 0x6e,
- 0x6e, 0x65, 0x72, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63,
- 0x6f, 0x6d, 0x2f, 0x49, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x52, 0x04, 0x6e, 0x61, 0x6d,
- 0x65, 0x12, 0x39, 0x0a, 0x0a, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x5f, 0x6d, 0x61, 0x73, 0x6b, 0x18,
- 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70,
- 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x4d, 0x61, 0x73,
- 0x6b, 0x52, 0x09, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x4d, 0x61, 0x73, 0x6b, 0x22, 0xd7, 0x01, 0x0a,
- 0x15, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x49, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x52,
- 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x4b, 0x0a, 0x06, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74,
- 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x33, 0xe0, 0x41, 0x02, 0xfa, 0x41, 0x2d, 0x0a, 0x2b,
- 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x6d, 0x61, 0x6e,
- 0x61, 0x67, 0x65, 0x72, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e,
- 0x63, 0x6f, 0x6d, 0x2f, 0x50, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x06, 0x70, 0x61, 0x72,
- 0x65, 0x6e, 0x74, 0x12, 0x24, 0x0a, 0x0b, 0x69, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x5f,
- 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x0a, 0x69,
- 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x49, 0x64, 0x12, 0x4b, 0x0a, 0x08, 0x69, 0x6e, 0x73,
- 0x74, 0x61, 0x6e, 0x63, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2a, 0x2e, 0x67, 0x6f,
- 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x70, 0x61, 0x6e, 0x6e, 0x65, 0x72, 0x2e, 0x61, 0x64, 0x6d,
- 0x69, 0x6e, 0x2e, 0x69, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x49,
- 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x08, 0x69, 0x6e,
- 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x22, 0x80, 0x02, 0x0a, 0x14, 0x4c, 0x69, 0x73, 0x74, 0x49,
- 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12,
- 0x4b, 0x0a, 0x06, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42,
- 0x33, 0xe0, 0x41, 0x02, 0xfa, 0x41, 0x2d, 0x0a, 0x2b, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x72, 0x65,
- 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, 0x2e, 0x67, 0x6f,
- 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x50, 0x72, 0x6f,
- 0x6a, 0x65, 0x63, 0x74, 0x52, 0x06, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x12, 0x1b, 0x0a, 0x09,
- 0x70, 0x61, 0x67, 0x65, 0x5f, 0x73, 0x69, 0x7a, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x05, 0x52,
- 0x08, 0x70, 0x61, 0x67, 0x65, 0x53, 0x69, 0x7a, 0x65, 0x12, 0x1d, 0x0a, 0x0a, 0x70, 0x61, 0x67,
- 0x65, 0x5f, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x70,
- 0x61, 0x67, 0x65, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x12, 0x16, 0x0a, 0x06, 0x66, 0x69, 0x6c, 0x74,
- 0x65, 0x72, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72,
- 0x12, 0x47, 0x0a, 0x11, 0x69, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x5f, 0x64, 0x65, 0x61,
- 0x64, 0x6c, 0x69, 0x6e, 0x65, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f,
- 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69,
- 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x10, 0x69, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63,
- 0x65, 0x44, 0x65, 0x61, 0x64, 0x6c, 0x69, 0x6e, 0x65, 0x22, 0xab, 0x01, 0x0a, 0x15, 0x4c, 0x69,
- 0x73, 0x74, 0x49, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f,
- 0x6e, 0x73, 0x65, 0x12, 0x48, 0x0a, 0x09, 0x69, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x73,
- 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x2a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e,
- 0x73, 0x70, 0x61, 0x6e, 0x6e, 0x65, 0x72, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x69, 0x6e,
- 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x49, 0x6e, 0x73, 0x74, 0x61, 0x6e,
- 0x63, 0x65, 0x52, 0x09, 0x69, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x73, 0x12, 0x26, 0x0a,
- 0x0f, 0x6e, 0x65, 0x78, 0x74, 0x5f, 0x70, 0x61, 0x67, 0x65, 0x5f, 0x74, 0x6f, 0x6b, 0x65, 0x6e,
- 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0d, 0x6e, 0x65, 0x78, 0x74, 0x50, 0x61, 0x67, 0x65,
- 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x12, 0x20, 0x0a, 0x0b, 0x75, 0x6e, 0x72, 0x65, 0x61, 0x63, 0x68,
- 0x61, 0x62, 0x6c, 0x65, 0x18, 0x03, 0x20, 0x03, 0x28, 0x09, 0x52, 0x0b, 0x75, 0x6e, 0x72, 0x65,
- 0x61, 0x63, 0x68, 0x61, 0x62, 0x6c, 0x65, 0x22, 0xa4, 0x01, 0x0a, 0x15, 0x55, 0x70, 0x64, 0x61,
- 0x74, 0x65, 0x49, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73,
- 0x74, 0x12, 0x4b, 0x0a, 0x08, 0x69, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x18, 0x01, 0x20,
- 0x01, 0x28, 0x0b, 0x32, 0x2a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x70, 0x61,
- 0x6e, 0x6e, 0x65, 0x72, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x69, 0x6e, 0x73, 0x74, 0x61,
- 0x6e, 0x63, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x49, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x42,
- 0x03, 0xe0, 0x41, 0x02, 0x52, 0x08, 0x69, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x12, 0x3e,
- 0x0a, 0x0a, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x5f, 0x6d, 0x61, 0x73, 0x6b, 0x18, 0x02, 0x20, 0x01,
- 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74,
- 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x4d, 0x61, 0x73, 0x6b, 0x42, 0x03,
- 0xe0, 0x41, 0x02, 0x52, 0x09, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x4d, 0x61, 0x73, 0x6b, 0x22, 0x54,
- 0x0a, 0x15, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x49, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65,
- 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x3b, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18,
- 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x27, 0xe0, 0x41, 0x02, 0xfa, 0x41, 0x21, 0x0a, 0x1f, 0x73,
- 0x70, 0x61, 0x6e, 0x6e, 0x65, 0x72, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69,
- 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x49, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x52, 0x04,
- 0x6e, 0x61, 0x6d, 0x65, 0x22, 0x84, 0x03, 0x0a, 0x16, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x49,
- 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x12,
- 0x46, 0x0a, 0x08, 0x69, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28,
- 0x0b, 0x32, 0x2a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x70, 0x61, 0x6e, 0x6e,
- 0x65, 0x72, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x69, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63,
- 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x49, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x52, 0x08, 0x69,
- 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x12, 0x39, 0x0a, 0x0a, 0x73, 0x74, 0x61, 0x72, 0x74,
- 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f,
- 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69,
- 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x09, 0x73, 0x74, 0x61, 0x72, 0x74, 0x54, 0x69,
- 0x6d, 0x65, 0x12, 0x3b, 0x0a, 0x0b, 0x63, 0x61, 0x6e, 0x63, 0x65, 0x6c, 0x5f, 0x74, 0x69, 0x6d,
- 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65,
- 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74,
- 0x61, 0x6d, 0x70, 0x52, 0x0a, 0x63, 0x61, 0x6e, 0x63, 0x65, 0x6c, 0x54, 0x69, 0x6d, 0x65, 0x12,
- 0x35, 0x0a, 0x08, 0x65, 0x6e, 0x64, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28,
- 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f,
- 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x07, 0x65,
- 0x6e, 0x64, 0x54, 0x69, 0x6d, 0x65, 0x12, 0x73, 0x0a, 0x1b, 0x65, 0x78, 0x70, 0x65, 0x63, 0x74,
- 0x65, 0x64, 0x5f, 0x66, 0x75, 0x6c, 0x66, 0x69, 0x6c, 0x6c, 0x6d, 0x65, 0x6e, 0x74, 0x5f, 0x70,
- 0x65, 0x72, 0x69, 0x6f, 0x64, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x33, 0x2e, 0x67, 0x6f,
- 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x70, 0x61, 0x6e, 0x6e, 0x65, 0x72, 0x2e, 0x61, 0x64, 0x6d,
- 0x69, 0x6e, 0x2e, 0x69, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x46,
- 0x75, 0x6c, 0x66, 0x69, 0x6c, 0x6c, 0x6d, 0x65, 0x6e, 0x74, 0x50, 0x65, 0x72, 0x69, 0x6f, 0x64,
- 0x52, 0x19, 0x65, 0x78, 0x70, 0x65, 0x63, 0x74, 0x65, 0x64, 0x46, 0x75, 0x6c, 0x66, 0x69, 0x6c,
- 0x6c, 0x6d, 0x65, 0x6e, 0x74, 0x50, 0x65, 0x72, 0x69, 0x6f, 0x64, 0x22, 0x84, 0x03, 0x0a, 0x16,
- 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x49, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x4d, 0x65,
- 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x12, 0x46, 0x0a, 0x08, 0x69, 0x6e, 0x73, 0x74, 0x61, 0x6e,
- 0x63, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c,
- 0x65, 0x2e, 0x73, 0x70, 0x61, 0x6e, 0x6e, 0x65, 0x72, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e,
- 0x69, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x49, 0x6e, 0x73, 0x74,
- 0x61, 0x6e, 0x63, 0x65, 0x52, 0x08, 0x69, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x12, 0x39,
- 0x0a, 0x0a, 0x73, 0x74, 0x61, 0x72, 0x74, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x18, 0x02, 0x20, 0x01,
- 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74,
- 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x09,
- 0x73, 0x74, 0x61, 0x72, 0x74, 0x54, 0x69, 0x6d, 0x65, 0x12, 0x3b, 0x0a, 0x0b, 0x63, 0x61, 0x6e,
- 0x63, 0x65, 0x6c, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a,
- 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66,
- 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x0a, 0x63, 0x61, 0x6e, 0x63,
- 0x65, 0x6c, 0x54, 0x69, 0x6d, 0x65, 0x12, 0x35, 0x0a, 0x08, 0x65, 0x6e, 0x64, 0x5f, 0x74, 0x69,
- 0x6d, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c,
- 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73,
- 0x74, 0x61, 0x6d, 0x70, 0x52, 0x07, 0x65, 0x6e, 0x64, 0x54, 0x69, 0x6d, 0x65, 0x12, 0x73, 0x0a,
- 0x1b, 0x65, 0x78, 0x70, 0x65, 0x63, 0x74, 0x65, 0x64, 0x5f, 0x66, 0x75, 0x6c, 0x66, 0x69, 0x6c,
- 0x6c, 0x6d, 0x65, 0x6e, 0x74, 0x5f, 0x70, 0x65, 0x72, 0x69, 0x6f, 0x64, 0x18, 0x05, 0x20, 0x01,
- 0x28, 0x0e, 0x32, 0x33, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x70, 0x61, 0x6e,
- 0x6e, 0x65, 0x72, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x69, 0x6e, 0x73, 0x74, 0x61, 0x6e,
- 0x63, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x46, 0x75, 0x6c, 0x66, 0x69, 0x6c, 0x6c, 0x6d, 0x65, 0x6e,
- 0x74, 0x50, 0x65, 0x72, 0x69, 0x6f, 0x64, 0x52, 0x19, 0x65, 0x78, 0x70, 0x65, 0x63, 0x74, 0x65,
- 0x64, 0x46, 0x75, 0x6c, 0x66, 0x69, 0x6c, 0x6c, 0x6d, 0x65, 0x6e, 0x74, 0x50, 0x65, 0x72, 0x69,
- 0x6f, 0x64, 0x22, 0x87, 0x02, 0x0a, 0x1c, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x49, 0x6e, 0x73,
- 0x74, 0x61, 0x6e, 0x63, 0x65, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x4d, 0x65, 0x74, 0x61, 0x64,
- 0x61, 0x74, 0x61, 0x12, 0x59, 0x0a, 0x0f, 0x69, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x5f,
- 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x30, 0x2e, 0x67,
- 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x70, 0x61, 0x6e, 0x6e, 0x65, 0x72, 0x2e, 0x61, 0x64,
- 0x6d, 0x69, 0x6e, 0x2e, 0x69, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x2e, 0x76, 0x31, 0x2e,
- 0x49, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x0e,
- 0x69, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x4f,
- 0x0a, 0x08, 0x70, 0x72, 0x6f, 0x67, 0x72, 0x65, 0x73, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b,
- 0x32, 0x33, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x70, 0x61, 0x6e, 0x6e, 0x65,
- 0x72, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x69, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65,
- 0x2e, 0x76, 0x31, 0x2e, 0x4f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x50, 0x72, 0x6f,
- 0x67, 0x72, 0x65, 0x73, 0x73, 0x52, 0x08, 0x70, 0x72, 0x6f, 0x67, 0x72, 0x65, 0x73, 0x73, 0x12,
- 0x3b, 0x0a, 0x0b, 0x63, 0x61, 0x6e, 0x63, 0x65, 0x6c, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x18, 0x03,
- 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72,
- 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70,
- 0x52, 0x0a, 0x63, 0x61, 0x6e, 0x63, 0x65, 0x6c, 0x54, 0x69, 0x6d, 0x65, 0x22, 0x87, 0x02, 0x0a,
- 0x1c, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x49, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x43,
- 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x12, 0x59, 0x0a,
- 0x0f, 0x69, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67,
- 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x30, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e,
- 0x73, 0x70, 0x61, 0x6e, 0x6e, 0x65, 0x72, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x69, 0x6e,
- 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x49, 0x6e, 0x73, 0x74, 0x61, 0x6e,
- 0x63, 0x65, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x0e, 0x69, 0x6e, 0x73, 0x74, 0x61, 0x6e,
- 0x63, 0x65, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x4f, 0x0a, 0x08, 0x70, 0x72, 0x6f, 0x67,
- 0x72, 0x65, 0x73, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x33, 0x2e, 0x67, 0x6f, 0x6f,
- 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x70, 0x61, 0x6e, 0x6e, 0x65, 0x72, 0x2e, 0x61, 0x64, 0x6d, 0x69,
- 0x6e, 0x2e, 0x69, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x4f, 0x70,
- 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x50, 0x72, 0x6f, 0x67, 0x72, 0x65, 0x73, 0x73, 0x52,
- 0x08, 0x70, 0x72, 0x6f, 0x67, 0x72, 0x65, 0x73, 0x73, 0x12, 0x3b, 0x0a, 0x0b, 0x63, 0x61, 0x6e,
- 0x63, 0x65, 0x6c, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a,
- 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66,
- 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x0a, 0x63, 0x61, 0x6e, 0x63,
- 0x65, 0x6c, 0x54, 0x69, 0x6d, 0x65, 0x22, 0x94, 0x06, 0x0a, 0x11, 0x49, 0x6e, 0x73, 0x74, 0x61,
- 0x6e, 0x63, 0x65, 0x50, 0x61, 0x72, 0x74, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x17, 0x0a, 0x04,
- 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52,
- 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x45, 0x0a, 0x06, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x18,
- 0x02, 0x20, 0x01, 0x28, 0x09, 0x42, 0x2d, 0xe0, 0x41, 0x02, 0xfa, 0x41, 0x27, 0x0a, 0x25, 0x73,
- 0x70, 0x61, 0x6e, 0x6e, 0x65, 0x72, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69,
- 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x49, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x43, 0x6f,
- 0x6e, 0x66, 0x69, 0x67, 0x52, 0x06, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x26, 0x0a, 0x0c,
- 0x64, 0x69, 0x73, 0x70, 0x6c, 0x61, 0x79, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x03, 0x20, 0x01,
- 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x0b, 0x64, 0x69, 0x73, 0x70, 0x6c, 0x61, 0x79,
- 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x1f, 0x0a, 0x0a, 0x6e, 0x6f, 0x64, 0x65, 0x5f, 0x63, 0x6f, 0x75,
- 0x6e, 0x74, 0x18, 0x05, 0x20, 0x01, 0x28, 0x05, 0x48, 0x00, 0x52, 0x09, 0x6e, 0x6f, 0x64, 0x65,
- 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x12, 0x2b, 0x0a, 0x10, 0x70, 0x72, 0x6f, 0x63, 0x65, 0x73, 0x73,
- 0x69, 0x6e, 0x67, 0x5f, 0x75, 0x6e, 0x69, 0x74, 0x73, 0x18, 0x06, 0x20, 0x01, 0x28, 0x05, 0x48,
- 0x00, 0x52, 0x0f, 0x70, 0x72, 0x6f, 0x63, 0x65, 0x73, 0x73, 0x69, 0x6e, 0x67, 0x55, 0x6e, 0x69,
- 0x74, 0x73, 0x12, 0x54, 0x0a, 0x05, 0x73, 0x74, 0x61, 0x74, 0x65, 0x18, 0x07, 0x20, 0x01, 0x28,
- 0x0e, 0x32, 0x39, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x70, 0x61, 0x6e, 0x6e,
- 0x65, 0x72, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x69, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63,
- 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x49, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x50, 0x61, 0x72,
- 0x74, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x53, 0x74, 0x61, 0x74, 0x65, 0x42, 0x03, 0xe0, 0x41,
- 0x03, 0x52, 0x05, 0x73, 0x74, 0x61, 0x74, 0x65, 0x12, 0x40, 0x0a, 0x0b, 0x63, 0x72, 0x65, 0x61,
- 0x74, 0x65, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x18, 0x08, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e,
- 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e,
- 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x42, 0x03, 0xe0, 0x41, 0x03, 0x52, 0x0a,
- 0x63, 0x72, 0x65, 0x61, 0x74, 0x65, 0x54, 0x69, 0x6d, 0x65, 0x12, 0x40, 0x0a, 0x0b, 0x75, 0x70,
- 0x64, 0x61, 0x74, 0x65, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x18, 0x09, 0x20, 0x01, 0x28, 0x0b, 0x32,
- 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75,
- 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x42, 0x03, 0xe0, 0x41, 0x03,
- 0x52, 0x0a, 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, 0x54, 0x69, 0x6d, 0x65, 0x12, 0x38, 0x0a, 0x15,
- 0x72, 0x65, 0x66, 0x65, 0x72, 0x65, 0x6e, 0x63, 0x69, 0x6e, 0x67, 0x5f, 0x64, 0x61, 0x74, 0x61,
- 0x62, 0x61, 0x73, 0x65, 0x73, 0x18, 0x0a, 0x20, 0x03, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x03,
- 0x52, 0x14, 0x72, 0x65, 0x66, 0x65, 0x72, 0x65, 0x6e, 0x63, 0x69, 0x6e, 0x67, 0x44, 0x61, 0x74,
- 0x61, 0x62, 0x61, 0x73, 0x65, 0x73, 0x12, 0x34, 0x0a, 0x13, 0x72, 0x65, 0x66, 0x65, 0x72, 0x65,
- 0x6e, 0x63, 0x69, 0x6e, 0x67, 0x5f, 0x62, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x73, 0x18, 0x0b, 0x20,
- 0x03, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x03, 0x52, 0x12, 0x72, 0x65, 0x66, 0x65, 0x72, 0x65,
- 0x6e, 0x63, 0x69, 0x6e, 0x67, 0x42, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x73, 0x12, 0x12, 0x0a, 0x04,
- 0x65, 0x74, 0x61, 0x67, 0x18, 0x0c, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x65, 0x74, 0x61, 0x67,
- 0x22, 0x37, 0x0a, 0x05, 0x53, 0x74, 0x61, 0x74, 0x65, 0x12, 0x15, 0x0a, 0x11, 0x53, 0x54, 0x41,
- 0x54, 0x45, 0x5f, 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, 0x49, 0x45, 0x44, 0x10, 0x00,
- 0x12, 0x0c, 0x0a, 0x08, 0x43, 0x52, 0x45, 0x41, 0x54, 0x49, 0x4e, 0x47, 0x10, 0x01, 0x12, 0x09,
- 0x0a, 0x05, 0x52, 0x45, 0x41, 0x44, 0x59, 0x10, 0x02, 0x3a, 0x7e, 0xea, 0x41, 0x7b, 0x0a, 0x28,
- 0x73, 0x70, 0x61, 0x6e, 0x6e, 0x65, 0x72, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70,
- 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x49, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x50,
- 0x61, 0x72, 0x74, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x4f, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63,
- 0x74, 0x73, 0x2f, 0x7b, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x7d, 0x2f, 0x69, 0x6e, 0x73,
- 0x74, 0x61, 0x6e, 0x63, 0x65, 0x73, 0x2f, 0x7b, 0x69, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65,
- 0x7d, 0x2f, 0x69, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x50, 0x61, 0x72, 0x74, 0x69, 0x74,
- 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x7b, 0x69, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x5f, 0x70,
- 0x61, 0x72, 0x74, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x7d, 0x42, 0x12, 0x0a, 0x10, 0x63, 0x6f, 0x6d,
- 0x70, 0x75, 0x74, 0x65, 0x5f, 0x63, 0x61, 0x70, 0x61, 0x63, 0x69, 0x74, 0x79, 0x22, 0xb4, 0x02,
- 0x0a, 0x1f, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x49, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65,
- 0x50, 0x61, 0x72, 0x74, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74,
- 0x61, 0x12, 0x62, 0x0a, 0x12, 0x69, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x5f, 0x70, 0x61,
- 0x72, 0x74, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x33, 0x2e,
- 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x70, 0x61, 0x6e, 0x6e, 0x65, 0x72, 0x2e, 0x61,
- 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x69, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x2e, 0x76, 0x31,
- 0x2e, 0x49, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x50, 0x61, 0x72, 0x74, 0x69, 0x74, 0x69,
- 0x6f, 0x6e, 0x52, 0x11, 0x69, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x50, 0x61, 0x72, 0x74,
- 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x39, 0x0a, 0x0a, 0x73, 0x74, 0x61, 0x72, 0x74, 0x5f, 0x74,
- 0x69, 0x6d, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67,
- 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65,
- 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x09, 0x73, 0x74, 0x61, 0x72, 0x74, 0x54, 0x69, 0x6d, 0x65,
- 0x12, 0x3b, 0x0a, 0x0b, 0x63, 0x61, 0x6e, 0x63, 0x65, 0x6c, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x18,
- 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70,
- 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d,
- 0x70, 0x52, 0x0a, 0x63, 0x61, 0x6e, 0x63, 0x65, 0x6c, 0x54, 0x69, 0x6d, 0x65, 0x12, 0x35, 0x0a,
- 0x08, 0x65, 0x6e, 0x64, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32,
- 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75,
- 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x07, 0x65, 0x6e, 0x64,
- 0x54, 0x69, 0x6d, 0x65, 0x22, 0x83, 0x02, 0x0a, 0x1e, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x49,
- 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x50, 0x61, 0x72, 0x74, 0x69, 0x74, 0x69, 0x6f, 0x6e,
- 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x3f, 0x0a, 0x06, 0x70, 0x61, 0x72, 0x65, 0x6e,
- 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x27, 0xe0, 0x41, 0x02, 0xfa, 0x41, 0x21, 0x0a,
- 0x1f, 0x73, 0x70, 0x61, 0x6e, 0x6e, 0x65, 0x72, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61,
- 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x49, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65,
- 0x52, 0x06, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x12, 0x37, 0x0a, 0x15, 0x69, 0x6e, 0x73, 0x74,
- 0x61, 0x6e, 0x63, 0x65, 0x5f, 0x70, 0x61, 0x72, 0x74, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x69,
- 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x13, 0x69, 0x6e,
- 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x50, 0x61, 0x72, 0x74, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x49,
- 0x64, 0x12, 0x67, 0x0a, 0x12, 0x69, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x5f, 0x70, 0x61,
- 0x72, 0x74, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x33, 0x2e,
- 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x70, 0x61, 0x6e, 0x6e, 0x65, 0x72, 0x2e, 0x61,
- 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x69, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x2e, 0x76, 0x31,
- 0x2e, 0x49, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x50, 0x61, 0x72, 0x74, 0x69, 0x74, 0x69,
- 0x6f, 0x6e, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x11, 0x69, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63,
- 0x65, 0x50, 0x61, 0x72, 0x74, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x22, 0x7a, 0x0a, 0x1e, 0x44, 0x65,
- 0x6c, 0x65, 0x74, 0x65, 0x49, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x50, 0x61, 0x72, 0x74,
- 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x44, 0x0a, 0x04,
- 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x30, 0xe0, 0x41, 0x02, 0xfa,
- 0x41, 0x2a, 0x0a, 0x28, 0x73, 0x70, 0x61, 0x6e, 0x6e, 0x65, 0x72, 0x2e, 0x67, 0x6f, 0x6f, 0x67,
- 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x49, 0x6e, 0x73, 0x74, 0x61,
- 0x6e, 0x63, 0x65, 0x50, 0x61, 0x72, 0x74, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x04, 0x6e, 0x61,
- 0x6d, 0x65, 0x12, 0x12, 0x0a, 0x04, 0x65, 0x74, 0x61, 0x67, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09,
- 0x52, 0x04, 0x65, 0x74, 0x61, 0x67, 0x22, 0x63, 0x0a, 0x1b, 0x47, 0x65, 0x74, 0x49, 0x6e, 0x73,
- 0x74, 0x61, 0x6e, 0x63, 0x65, 0x50, 0x61, 0x72, 0x74, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65,
- 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x44, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20,
- 0x01, 0x28, 0x09, 0x42, 0x30, 0xe0, 0x41, 0x02, 0xfa, 0x41, 0x2a, 0x0a, 0x28, 0x73, 0x70, 0x61,
- 0x6e, 0x6e, 0x65, 0x72, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e,
- 0x63, 0x6f, 0x6d, 0x2f, 0x49, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x50, 0x61, 0x72, 0x74,
- 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x22, 0xc9, 0x01, 0x0a, 0x1e,
- 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x49, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x50, 0x61,
- 0x72, 0x74, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x67,
- 0x0a, 0x12, 0x69, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x5f, 0x70, 0x61, 0x72, 0x74, 0x69,
- 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x33, 0x2e, 0x67, 0x6f, 0x6f,
- 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x70, 0x61, 0x6e, 0x6e, 0x65, 0x72, 0x2e, 0x61, 0x64, 0x6d, 0x69,
- 0x6e, 0x2e, 0x69, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x49, 0x6e,
- 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x50, 0x61, 0x72, 0x74, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x42,
- 0x03, 0xe0, 0x41, 0x02, 0x52, 0x11, 0x69, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x50, 0x61,
- 0x72, 0x74, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x3e, 0x0a, 0x0a, 0x66, 0x69, 0x65, 0x6c, 0x64,
- 0x5f, 0x6d, 0x61, 0x73, 0x6b, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f,
- 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69,
- 0x65, 0x6c, 0x64, 0x4d, 0x61, 0x73, 0x6b, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x09, 0x66, 0x69,
- 0x65, 0x6c, 0x64, 0x4d, 0x61, 0x73, 0x6b, 0x22, 0xb4, 0x02, 0x0a, 0x1f, 0x55, 0x70, 0x64, 0x61,
- 0x74, 0x65, 0x49, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x50, 0x61, 0x72, 0x74, 0x69, 0x74,
- 0x69, 0x6f, 0x6e, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x12, 0x62, 0x0a, 0x12, 0x69,
- 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x5f, 0x70, 0x61, 0x72, 0x74, 0x69, 0x74, 0x69, 0x6f,
- 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x33, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65,
- 0x2e, 0x73, 0x70, 0x61, 0x6e, 0x6e, 0x65, 0x72, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x69,
- 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x49, 0x6e, 0x73, 0x74, 0x61,
- 0x6e, 0x63, 0x65, 0x50, 0x61, 0x72, 0x74, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x11, 0x69, 0x6e,
- 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x50, 0x61, 0x72, 0x74, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x12,
- 0x39, 0x0a, 0x0a, 0x73, 0x74, 0x61, 0x72, 0x74, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x18, 0x02, 0x20,
- 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f,
- 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52,
- 0x09, 0x73, 0x74, 0x61, 0x72, 0x74, 0x54, 0x69, 0x6d, 0x65, 0x12, 0x3b, 0x0a, 0x0b, 0x63, 0x61,
- 0x6e, 0x63, 0x65, 0x6c, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32,
- 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75,
- 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x0a, 0x63, 0x61, 0x6e,
- 0x63, 0x65, 0x6c, 0x54, 0x69, 0x6d, 0x65, 0x12, 0x35, 0x0a, 0x08, 0x65, 0x6e, 0x64, 0x5f, 0x74,
- 0x69, 0x6d, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67,
- 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65,
- 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x07, 0x65, 0x6e, 0x64, 0x54, 0x69, 0x6d, 0x65, 0x22, 0xfd,
- 0x01, 0x0a, 0x1d, 0x4c, 0x69, 0x73, 0x74, 0x49, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x50,
- 0x61, 0x72, 0x74, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74,
- 0x12, 0x3f, 0x0a, 0x06, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09,
- 0x42, 0x27, 0xe0, 0x41, 0x02, 0xfa, 0x41, 0x21, 0x0a, 0x1f, 0x73, 0x70, 0x61, 0x6e, 0x6e, 0x65,
- 0x72, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d,
- 0x2f, 0x49, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x52, 0x06, 0x70, 0x61, 0x72, 0x65, 0x6e,
- 0x74, 0x12, 0x1b, 0x0a, 0x09, 0x70, 0x61, 0x67, 0x65, 0x5f, 0x73, 0x69, 0x7a, 0x65, 0x18, 0x02,
- 0x20, 0x01, 0x28, 0x05, 0x52, 0x08, 0x70, 0x61, 0x67, 0x65, 0x53, 0x69, 0x7a, 0x65, 0x12, 0x1d,
- 0x0a, 0x0a, 0x70, 0x61, 0x67, 0x65, 0x5f, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x18, 0x03, 0x20, 0x01,
- 0x28, 0x09, 0x52, 0x09, 0x70, 0x61, 0x67, 0x65, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x12, 0x5f, 0x0a,
- 0x1b, 0x69, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x5f, 0x70, 0x61, 0x72, 0x74, 0x69, 0x74,
- 0x69, 0x6f, 0x6e, 0x5f, 0x64, 0x65, 0x61, 0x64, 0x6c, 0x69, 0x6e, 0x65, 0x18, 0x04, 0x20, 0x01,
- 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74,
- 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x42, 0x03,
- 0xe0, 0x41, 0x01, 0x52, 0x19, 0x69, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x50, 0x61, 0x72,
- 0x74, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x44, 0x65, 0x61, 0x64, 0x6c, 0x69, 0x6e, 0x65, 0x22, 0xd0,
- 0x01, 0x0a, 0x1e, 0x4c, 0x69, 0x73, 0x74, 0x49, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x50,
- 0x61, 0x72, 0x74, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73,
- 0x65, 0x12, 0x64, 0x0a, 0x13, 0x69, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x5f, 0x70, 0x61,
- 0x72, 0x74, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x33,
- 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x70, 0x61, 0x6e, 0x6e, 0x65, 0x72, 0x2e,
- 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x69, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x2e, 0x76,
- 0x31, 0x2e, 0x49, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x50, 0x61, 0x72, 0x74, 0x69, 0x74,
- 0x69, 0x6f, 0x6e, 0x52, 0x12, 0x69, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x50, 0x61, 0x72,
- 0x74, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x26, 0x0a, 0x0f, 0x6e, 0x65, 0x78, 0x74, 0x5f,
- 0x70, 0x61, 0x67, 0x65, 0x5f, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09,
- 0x52, 0x0d, 0x6e, 0x65, 0x78, 0x74, 0x50, 0x61, 0x67, 0x65, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x12,
- 0x20, 0x0a, 0x0b, 0x75, 0x6e, 0x72, 0x65, 0x61, 0x63, 0x68, 0x61, 0x62, 0x6c, 0x65, 0x18, 0x03,
- 0x20, 0x03, 0x28, 0x09, 0x52, 0x0b, 0x75, 0x6e, 0x72, 0x65, 0x61, 0x63, 0x68, 0x61, 0x62, 0x6c,
- 0x65, 0x22, 0xad, 0x02, 0x0a, 0x26, 0x4c, 0x69, 0x73, 0x74, 0x49, 0x6e, 0x73, 0x74, 0x61, 0x6e,
- 0x63, 0x65, 0x50, 0x61, 0x72, 0x74, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x4f, 0x70, 0x65, 0x72, 0x61,
- 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x3f, 0x0a, 0x06,
- 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x27, 0xe0, 0x41,
- 0x02, 0xfa, 0x41, 0x21, 0x0a, 0x1f, 0x73, 0x70, 0x61, 0x6e, 0x6e, 0x65, 0x72, 0x2e, 0x67, 0x6f,
- 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x49, 0x6e, 0x73,
- 0x74, 0x61, 0x6e, 0x63, 0x65, 0x52, 0x06, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x12, 0x1b, 0x0a,
- 0x06, 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0,
- 0x41, 0x01, 0x52, 0x06, 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x12, 0x20, 0x0a, 0x09, 0x70, 0x61,
- 0x67, 0x65, 0x5f, 0x73, 0x69, 0x7a, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x05, 0x42, 0x03, 0xe0,
- 0x41, 0x01, 0x52, 0x08, 0x70, 0x61, 0x67, 0x65, 0x53, 0x69, 0x7a, 0x65, 0x12, 0x22, 0x0a, 0x0a,
- 0x70, 0x61, 0x67, 0x65, 0x5f, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09,
- 0x42, 0x03, 0xe0, 0x41, 0x01, 0x52, 0x09, 0x70, 0x61, 0x67, 0x65, 0x54, 0x6f, 0x6b, 0x65, 0x6e,
- 0x12, 0x5f, 0x0a, 0x1b, 0x69, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x5f, 0x70, 0x61, 0x72,
- 0x74, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x64, 0x65, 0x61, 0x64, 0x6c, 0x69, 0x6e, 0x65, 0x18,
- 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70,
- 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d,
- 0x70, 0x42, 0x03, 0xe0, 0x41, 0x01, 0x52, 0x19, 0x69, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65,
- 0x50, 0x61, 0x72, 0x74, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x44, 0x65, 0x61, 0x64, 0x6c, 0x69, 0x6e,
- 0x65, 0x22, 0xd8, 0x01, 0x0a, 0x27, 0x4c, 0x69, 0x73, 0x74, 0x49, 0x6e, 0x73, 0x74, 0x61, 0x6e,
- 0x63, 0x65, 0x50, 0x61, 0x72, 0x74, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x4f, 0x70, 0x65, 0x72, 0x61,
- 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x3d, 0x0a,
- 0x0a, 0x6f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28,
- 0x0b, 0x32, 0x1d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6c, 0x6f, 0x6e, 0x67, 0x72,
- 0x75, 0x6e, 0x6e, 0x69, 0x6e, 0x67, 0x2e, 0x4f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e,
- 0x52, 0x0a, 0x6f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x26, 0x0a, 0x0f,
- 0x6e, 0x65, 0x78, 0x74, 0x5f, 0x70, 0x61, 0x67, 0x65, 0x5f, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x18,
- 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0d, 0x6e, 0x65, 0x78, 0x74, 0x50, 0x61, 0x67, 0x65, 0x54,
- 0x6f, 0x6b, 0x65, 0x6e, 0x12, 0x46, 0x0a, 0x1f, 0x75, 0x6e, 0x72, 0x65, 0x61, 0x63, 0x68, 0x61,
- 0x62, 0x6c, 0x65, 0x5f, 0x69, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x5f, 0x70, 0x61, 0x72,
- 0x74, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x09, 0x52, 0x1d, 0x75,
- 0x6e, 0x72, 0x65, 0x61, 0x63, 0x68, 0x61, 0x62, 0x6c, 0x65, 0x49, 0x6e, 0x73, 0x74, 0x61, 0x6e,
- 0x63, 0x65, 0x50, 0x61, 0x72, 0x74, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x22, 0xa6, 0x01, 0x0a,
- 0x13, 0x4d, 0x6f, 0x76, 0x65, 0x49, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x52, 0x65, 0x71,
- 0x75, 0x65, 0x73, 0x74, 0x12, 0x3b, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01,
- 0x28, 0x09, 0x42, 0x27, 0xe0, 0x41, 0x02, 0xfa, 0x41, 0x21, 0x0a, 0x1f, 0x73, 0x70, 0x61, 0x6e,
- 0x6e, 0x65, 0x72, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63,
- 0x6f, 0x6d, 0x2f, 0x49, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x52, 0x04, 0x6e, 0x61, 0x6d,
- 0x65, 0x12, 0x52, 0x0a, 0x0d, 0x74, 0x61, 0x72, 0x67, 0x65, 0x74, 0x5f, 0x63, 0x6f, 0x6e, 0x66,
- 0x69, 0x67, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x42, 0x2d, 0xe0, 0x41, 0x02, 0xfa, 0x41, 0x27,
- 0x0a, 0x25, 0x73, 0x70, 0x61, 0x6e, 0x6e, 0x65, 0x72, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65,
- 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x49, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63,
- 0x65, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x0c, 0x74, 0x61, 0x72, 0x67, 0x65, 0x74, 0x43,
- 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x22, 0x16, 0x0a, 0x14, 0x4d, 0x6f, 0x76, 0x65, 0x49, 0x6e, 0x73,
- 0x74, 0x61, 0x6e, 0x63, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0xc9, 0x01,
- 0x0a, 0x14, 0x4d, 0x6f, 0x76, 0x65, 0x49, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x4d, 0x65,
- 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x12, 0x23, 0x0a, 0x0d, 0x74, 0x61, 0x72, 0x67, 0x65, 0x74,
- 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, 0x74,
- 0x61, 0x72, 0x67, 0x65, 0x74, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x4f, 0x0a, 0x08, 0x70,
- 0x72, 0x6f, 0x67, 0x72, 0x65, 0x73, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x33, 0x2e,
- 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x70, 0x61, 0x6e, 0x6e, 0x65, 0x72, 0x2e, 0x61,
- 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x69, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x2e, 0x76, 0x31,
- 0x2e, 0x4f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x50, 0x72, 0x6f, 0x67, 0x72, 0x65,
- 0x73, 0x73, 0x52, 0x08, 0x70, 0x72, 0x6f, 0x67, 0x72, 0x65, 0x73, 0x73, 0x12, 0x3b, 0x0a, 0x0b,
- 0x63, 0x61, 0x6e, 0x63, 0x65, 0x6c, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28,
- 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f,
- 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x0a, 0x63,
- 0x61, 0x6e, 0x63, 0x65, 0x6c, 0x54, 0x69, 0x6d, 0x65, 0x32, 0xda, 0x27, 0x0a, 0x0d, 0x49, 0x6e,
- 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x41, 0x64, 0x6d, 0x69, 0x6e, 0x12, 0xcc, 0x01, 0x0a, 0x13,
- 0x4c, 0x69, 0x73, 0x74, 0x49, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x43, 0x6f, 0x6e, 0x66,
- 0x69, 0x67, 0x73, 0x12, 0x3c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x70, 0x61,
- 0x6e, 0x6e, 0x65, 0x72, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x69, 0x6e, 0x73, 0x74, 0x61,
- 0x6e, 0x63, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x49, 0x6e, 0x73, 0x74, 0x61,
- 0x6e, 0x63, 0x65, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73,
- 0x74, 0x1a, 0x3d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x70, 0x61, 0x6e, 0x6e,
- 0x65, 0x72, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x69, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63,
- 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x49, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63,
- 0x65, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65,
- 0x22, 0x38, 0xda, 0x41, 0x06, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x82, 0xd3, 0xe4, 0x93, 0x02,
- 0x29, 0x12, 0x27, 0x2f, 0x76, 0x31, 0x2f, 0x7b, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x3d, 0x70,
- 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x2a, 0x7d, 0x2f, 0x69, 0x6e, 0x73, 0x74, 0x61,
- 0x6e, 0x63, 0x65, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x73, 0x12, 0xb9, 0x01, 0x0a, 0x11, 0x47,
- 0x65, 0x74, 0x49, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67,
- 0x12, 0x3a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x70, 0x61, 0x6e, 0x6e, 0x65,
- 0x72, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x69, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65,
- 0x2e, 0x76, 0x31, 0x2e, 0x47, 0x65, 0x74, 0x49, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x43,
- 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x30, 0x2e, 0x67,
- 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x70, 0x61, 0x6e, 0x6e, 0x65, 0x72, 0x2e, 0x61, 0x64,
- 0x6d, 0x69, 0x6e, 0x2e, 0x69, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x2e, 0x76, 0x31, 0x2e,
- 0x49, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x22, 0x36,
- 0xda, 0x41, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x29, 0x12, 0x27, 0x2f,
- 0x76, 0x31, 0x2f, 0x7b, 0x6e, 0x61, 0x6d, 0x65, 0x3d, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74,
- 0x73, 0x2f, 0x2a, 0x2f, 0x69, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x43, 0x6f, 0x6e, 0x66,
- 0x69, 0x67, 0x73, 0x2f, 0x2a, 0x7d, 0x12, 0xc8, 0x02, 0x0a, 0x14, 0x43, 0x72, 0x65, 0x61, 0x74,
- 0x65, 0x49, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12,
- 0x3d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x70, 0x61, 0x6e, 0x6e, 0x65, 0x72,
- 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x69, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x2e,
- 0x76, 0x31, 0x2e, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x49, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63,
- 0x65, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1d,
- 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6c, 0x6f, 0x6e, 0x67, 0x72, 0x75, 0x6e, 0x6e,
- 0x69, 0x6e, 0x67, 0x2e, 0x4f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x22, 0xd1, 0x01,
- 0xca, 0x41, 0x70, 0x0a, 0x2f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x70, 0x61, 0x6e,
- 0x6e, 0x65, 0x72, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x69, 0x6e, 0x73, 0x74, 0x61, 0x6e,
- 0x63, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x49, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x43, 0x6f,
- 0x6e, 0x66, 0x69, 0x67, 0x12, 0x3d, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x70, 0x61,
- 0x6e, 0x6e, 0x65, 0x72, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x69, 0x6e, 0x73, 0x74, 0x61,
- 0x6e, 0x63, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x49, 0x6e, 0x73,
- 0x74, 0x61, 0x6e, 0x63, 0x65, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x4d, 0x65, 0x74, 0x61, 0x64,
- 0x61, 0x74, 0x61, 0xda, 0x41, 0x29, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x2c, 0x69, 0x6e, 0x73,
- 0x74, 0x61, 0x6e, 0x63, 0x65, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2c, 0x69, 0x6e, 0x73,
- 0x74, 0x61, 0x6e, 0x63, 0x65, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x5f, 0x69, 0x64, 0x82,
- 0xd3, 0xe4, 0x93, 0x02, 0x2c, 0x3a, 0x01, 0x2a, 0x22, 0x27, 0x2f, 0x76, 0x31, 0x2f, 0x7b, 0x70,
- 0x61, 0x72, 0x65, 0x6e, 0x74, 0x3d, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x2a,
- 0x7d, 0x2f, 0x69, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67,
- 0x73, 0x12, 0xca, 0x02, 0x0a, 0x14, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x49, 0x6e, 0x73, 0x74,
- 0x61, 0x6e, 0x63, 0x65, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x3d, 0x2e, 0x67, 0x6f, 0x6f,
- 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x70, 0x61, 0x6e, 0x6e, 0x65, 0x72, 0x2e, 0x61, 0x64, 0x6d, 0x69,
- 0x6e, 0x2e, 0x69, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x55, 0x70,
- 0x64, 0x61, 0x74, 0x65, 0x49, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x43, 0x6f, 0x6e, 0x66,
- 0x69, 0x67, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1d, 0x2e, 0x67, 0x6f, 0x6f, 0x67,
- 0x6c, 0x65, 0x2e, 0x6c, 0x6f, 0x6e, 0x67, 0x72, 0x75, 0x6e, 0x6e, 0x69, 0x6e, 0x67, 0x2e, 0x4f,
- 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x22, 0xd3, 0x01, 0xca, 0x41, 0x70, 0x0a, 0x2f,
- 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x70, 0x61, 0x6e, 0x6e, 0x65, 0x72, 0x2e, 0x61,
- 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x69, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x2e, 0x76, 0x31,
- 0x2e, 0x49, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12,
- 0x3d, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x70, 0x61, 0x6e, 0x6e, 0x65, 0x72, 0x2e,
- 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x69, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x2e, 0x76,
- 0x31, 0x2e, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x49, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65,
- 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0xda, 0x41,
- 0x1b, 0x69, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67,
- 0x2c, 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, 0x5f, 0x6d, 0x61, 0x73, 0x6b, 0x82, 0xd3, 0xe4, 0x93,
- 0x02, 0x3c, 0x3a, 0x01, 0x2a, 0x32, 0x37, 0x2f, 0x76, 0x31, 0x2f, 0x7b, 0x69, 0x6e, 0x73, 0x74,
- 0x61, 0x6e, 0x63, 0x65, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x6e, 0x61, 0x6d, 0x65,
- 0x3d, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x2a, 0x2f, 0x69, 0x6e, 0x73, 0x74,
- 0x61, 0x6e, 0x63, 0x65, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x73, 0x2f, 0x2a, 0x7d, 0x12, 0xa5,
- 0x01, 0x0a, 0x14, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x49, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63,
- 0x65, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x3d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65,
- 0x2e, 0x73, 0x70, 0x61, 0x6e, 0x6e, 0x65, 0x72, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x69,
- 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x44, 0x65, 0x6c, 0x65, 0x74,
- 0x65, 0x49, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52,
- 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x16, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e,
- 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x22, 0x36,
- 0xda, 0x41, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x29, 0x2a, 0x27, 0x2f,
- 0x76, 0x31, 0x2f, 0x7b, 0x6e, 0x61, 0x6d, 0x65, 0x3d, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74,
- 0x73, 0x2f, 0x2a, 0x2f, 0x69, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x43, 0x6f, 0x6e, 0x66,
- 0x69, 0x67, 0x73, 0x2f, 0x2a, 0x7d, 0x12, 0xf0, 0x01, 0x0a, 0x1c, 0x4c, 0x69, 0x73, 0x74, 0x49,
- 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x4f, 0x70, 0x65,
- 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x45, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65,
- 0x2e, 0x73, 0x70, 0x61, 0x6e, 0x6e, 0x65, 0x72, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x69,
- 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x49,
- 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x4f, 0x70, 0x65,
- 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x46,
- 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x70, 0x61, 0x6e, 0x6e, 0x65, 0x72, 0x2e,
- 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x69, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x2e, 0x76,
- 0x31, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x49, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x43, 0x6f,
- 0x6e, 0x66, 0x69, 0x67, 0x4f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x65,
- 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x41, 0xda, 0x41, 0x06, 0x70, 0x61, 0x72, 0x65, 0x6e,
- 0x74, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x32, 0x12, 0x30, 0x2f, 0x76, 0x31, 0x2f, 0x7b, 0x70, 0x61,
- 0x72, 0x65, 0x6e, 0x74, 0x3d, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x2a, 0x7d,
- 0x2f, 0x69, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x4f,
- 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0xb4, 0x01, 0x0a, 0x0d, 0x4c, 0x69,
- 0x73, 0x74, 0x49, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x73, 0x12, 0x36, 0x2e, 0x67, 0x6f,
- 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x70, 0x61, 0x6e, 0x6e, 0x65, 0x72, 0x2e, 0x61, 0x64, 0x6d,
- 0x69, 0x6e, 0x2e, 0x69, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x4c,
- 0x69, 0x73, 0x74, 0x49, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x73, 0x52, 0x65, 0x71, 0x75,
- 0x65, 0x73, 0x74, 0x1a, 0x37, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x70, 0x61,
- 0x6e, 0x6e, 0x65, 0x72, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x69, 0x6e, 0x73, 0x74, 0x61,
- 0x6e, 0x63, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x49, 0x6e, 0x73, 0x74, 0x61,
- 0x6e, 0x63, 0x65, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x32, 0xda, 0x41,
- 0x06, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x23, 0x12, 0x21, 0x2f,
- 0x76, 0x31, 0x2f, 0x7b, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x3d, 0x70, 0x72, 0x6f, 0x6a, 0x65,
- 0x63, 0x74, 0x73, 0x2f, 0x2a, 0x7d, 0x2f, 0x69, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x73,
- 0x12, 0xe4, 0x01, 0x0a, 0x16, 0x4c, 0x69, 0x73, 0x74, 0x49, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63,
- 0x65, 0x50, 0x61, 0x72, 0x74, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x3f, 0x2e, 0x67, 0x6f,
- 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x70, 0x61, 0x6e, 0x6e, 0x65, 0x72, 0x2e, 0x61, 0x64, 0x6d,
- 0x69, 0x6e, 0x2e, 0x69, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x4c,
- 0x69, 0x73, 0x74, 0x49, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x50, 0x61, 0x72, 0x74, 0x69,
- 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x40, 0x2e, 0x67,
- 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x70, 0x61, 0x6e, 0x6e, 0x65, 0x72, 0x2e, 0x61, 0x64,
- 0x6d, 0x69, 0x6e, 0x2e, 0x69, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x2e, 0x76, 0x31, 0x2e,
- 0x4c, 0x69, 0x73, 0x74, 0x49, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x50, 0x61, 0x72, 0x74,
- 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x47,
- 0xda, 0x41, 0x06, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x38, 0x12,
- 0x36, 0x2f, 0x76, 0x31, 0x2f, 0x7b, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x3d, 0x70, 0x72, 0x6f,
- 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x2a, 0x2f, 0x69, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65,
- 0x73, 0x2f, 0x2a, 0x7d, 0x2f, 0x69, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x50, 0x61, 0x72,
- 0x74, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0xa1, 0x01, 0x0a, 0x0b, 0x47, 0x65, 0x74, 0x49,
- 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x12, 0x34, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65,
- 0x2e, 0x73, 0x70, 0x61, 0x6e, 0x6e, 0x65, 0x72, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x69,
- 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x47, 0x65, 0x74, 0x49, 0x6e,
- 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x2a, 0x2e,
- 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x70, 0x61, 0x6e, 0x6e, 0x65, 0x72, 0x2e, 0x61,
- 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x69, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x2e, 0x76, 0x31,
- 0x2e, 0x49, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x22, 0x30, 0xda, 0x41, 0x04, 0x6e, 0x61,
- 0x6d, 0x65, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x23, 0x12, 0x21, 0x2f, 0x76, 0x31, 0x2f, 0x7b, 0x6e,
- 0x61, 0x6d, 0x65, 0x3d, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x2a, 0x2f, 0x69,
- 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x73, 0x2f, 0x2a, 0x7d, 0x12, 0x9c, 0x02, 0x0a, 0x0e,
- 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x49, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x12, 0x37,
- 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x70, 0x61, 0x6e, 0x6e, 0x65, 0x72, 0x2e,
- 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x69, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x2e, 0x76,
- 0x31, 0x2e, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x49, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65,
- 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65,
- 0x2e, 0x6c, 0x6f, 0x6e, 0x67, 0x72, 0x75, 0x6e, 0x6e, 0x69, 0x6e, 0x67, 0x2e, 0x4f, 0x70, 0x65,
- 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x22, 0xb1, 0x01, 0xca, 0x41, 0x64, 0x0a, 0x29, 0x67, 0x6f,
- 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x70, 0x61, 0x6e, 0x6e, 0x65, 0x72, 0x2e, 0x61, 0x64, 0x6d,
- 0x69, 0x6e, 0x2e, 0x69, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x49,
- 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x12, 0x37, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e,
- 0x73, 0x70, 0x61, 0x6e, 0x6e, 0x65, 0x72, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x69, 0x6e,
- 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65,
- 0x49, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61,
- 0xda, 0x41, 0x1b, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x2c, 0x69, 0x6e, 0x73, 0x74, 0x61, 0x6e,
- 0x63, 0x65, 0x5f, 0x69, 0x64, 0x2c, 0x69, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x82, 0xd3,
- 0xe4, 0x93, 0x02, 0x26, 0x3a, 0x01, 0x2a, 0x22, 0x21, 0x2f, 0x76, 0x31, 0x2f, 0x7b, 0x70, 0x61,
- 0x72, 0x65, 0x6e, 0x74, 0x3d, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x2a, 0x7d,
- 0x2f, 0x69, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x73, 0x12, 0x9d, 0x02, 0x0a, 0x0e, 0x55,
- 0x70, 0x64, 0x61, 0x74, 0x65, 0x49, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x12, 0x37, 0x2e,
- 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x70, 0x61, 0x6e, 0x6e, 0x65, 0x72, 0x2e, 0x61,
- 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x69, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x2e, 0x76, 0x31,
- 0x2e, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x49, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x52,
- 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e,
- 0x6c, 0x6f, 0x6e, 0x67, 0x72, 0x75, 0x6e, 0x6e, 0x69, 0x6e, 0x67, 0x2e, 0x4f, 0x70, 0x65, 0x72,
- 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x22, 0xb2, 0x01, 0xca, 0x41, 0x64, 0x0a, 0x29, 0x67, 0x6f, 0x6f,
- 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x70, 0x61, 0x6e, 0x6e, 0x65, 0x72, 0x2e, 0x61, 0x64, 0x6d, 0x69,
- 0x6e, 0x2e, 0x69, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x49, 0x6e,
- 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x12, 0x37, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73,
- 0x70, 0x61, 0x6e, 0x6e, 0x65, 0x72, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x69, 0x6e, 0x73,
- 0x74, 0x61, 0x6e, 0x63, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x49,
- 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0xda,
- 0x41, 0x13, 0x69, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x2c, 0x66, 0x69, 0x65, 0x6c, 0x64,
- 0x5f, 0x6d, 0x61, 0x73, 0x6b, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x2f, 0x3a, 0x01, 0x2a, 0x32, 0x2a,
- 0x2f, 0x76, 0x31, 0x2f, 0x7b, 0x69, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x2e, 0x6e, 0x61,
- 0x6d, 0x65, 0x3d, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x2a, 0x2f, 0x69, 0x6e,
- 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x73, 0x2f, 0x2a, 0x7d, 0x12, 0x93, 0x01, 0x0a, 0x0e, 0x44,
- 0x65, 0x6c, 0x65, 0x74, 0x65, 0x49, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x12, 0x37, 0x2e,
- 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x70, 0x61, 0x6e, 0x6e, 0x65, 0x72, 0x2e, 0x61,
- 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x69, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x2e, 0x76, 0x31,
- 0x2e, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x49, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x52,
- 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x16, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e,
- 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x22, 0x30,
- 0xda, 0x41, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x23, 0x2a, 0x21, 0x2f,
- 0x76, 0x31, 0x2f, 0x7b, 0x6e, 0x61, 0x6d, 0x65, 0x3d, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74,
- 0x73, 0x2f, 0x2a, 0x2f, 0x69, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x73, 0x2f, 0x2a, 0x7d,
- 0x12, 0x9a, 0x01, 0x0a, 0x0c, 0x53, 0x65, 0x74, 0x49, 0x61, 0x6d, 0x50, 0x6f, 0x6c, 0x69, 0x63,
- 0x79, 0x12, 0x22, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x69, 0x61, 0x6d, 0x2e, 0x76,
- 0x31, 0x2e, 0x53, 0x65, 0x74, 0x49, 0x61, 0x6d, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x52, 0x65,
- 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x15, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x69,
- 0x61, 0x6d, 0x2e, 0x76, 0x31, 0x2e, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x22, 0x4f, 0xda, 0x41,
- 0x0f, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x2c, 0x70, 0x6f, 0x6c, 0x69, 0x63, 0x79,
- 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x37, 0x3a, 0x01, 0x2a, 0x22, 0x32, 0x2f, 0x76, 0x31, 0x2f, 0x7b,
- 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x3d, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74,
- 0x73, 0x2f, 0x2a, 0x2f, 0x69, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x73, 0x2f, 0x2a, 0x7d,
- 0x3a, 0x73, 0x65, 0x74, 0x49, 0x61, 0x6d, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x12, 0x93, 0x01,
- 0x0a, 0x0c, 0x47, 0x65, 0x74, 0x49, 0x61, 0x6d, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x12, 0x22,
- 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x69, 0x61, 0x6d, 0x2e, 0x76, 0x31, 0x2e, 0x47,
- 0x65, 0x74, 0x49, 0x61, 0x6d, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x52, 0x65, 0x71, 0x75, 0x65,
- 0x73, 0x74, 0x1a, 0x15, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x69, 0x61, 0x6d, 0x2e,
- 0x76, 0x31, 0x2e, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x22, 0x48, 0xda, 0x41, 0x08, 0x72, 0x65,
- 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x37, 0x3a, 0x01, 0x2a, 0x22,
- 0x32, 0x2f, 0x76, 0x31, 0x2f, 0x7b, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x3d, 0x70,
- 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x2a, 0x2f, 0x69, 0x6e, 0x73, 0x74, 0x61, 0x6e,
- 0x63, 0x65, 0x73, 0x2f, 0x2a, 0x7d, 0x3a, 0x67, 0x65, 0x74, 0x49, 0x61, 0x6d, 0x50, 0x6f, 0x6c,
- 0x69, 0x63, 0x79, 0x12, 0xc5, 0x01, 0x0a, 0x12, 0x54, 0x65, 0x73, 0x74, 0x49, 0x61, 0x6d, 0x50,
- 0x65, 0x72, 0x6d, 0x69, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x28, 0x2e, 0x67, 0x6f, 0x6f,
- 0x67, 0x6c, 0x65, 0x2e, 0x69, 0x61, 0x6d, 0x2e, 0x76, 0x31, 0x2e, 0x54, 0x65, 0x73, 0x74, 0x49,
- 0x61, 0x6d, 0x50, 0x65, 0x72, 0x6d, 0x69, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x65, 0x71,
- 0x75, 0x65, 0x73, 0x74, 0x1a, 0x29, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x69, 0x61,
- 0x6d, 0x2e, 0x76, 0x31, 0x2e, 0x54, 0x65, 0x73, 0x74, 0x49, 0x61, 0x6d, 0x50, 0x65, 0x72, 0x6d,
- 0x69, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22,
- 0x5a, 0xda, 0x41, 0x14, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x2c, 0x70, 0x65, 0x72,
- 0x6d, 0x69, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x3d, 0x3a, 0x01,
- 0x2a, 0x22, 0x38, 0x2f, 0x76, 0x31, 0x2f, 0x7b, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65,
- 0x3d, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x2a, 0x2f, 0x69, 0x6e, 0x73, 0x74,
- 0x61, 0x6e, 0x63, 0x65, 0x73, 0x2f, 0x2a, 0x7d, 0x3a, 0x74, 0x65, 0x73, 0x74, 0x49, 0x61, 0x6d,
- 0x50, 0x65, 0x72, 0x6d, 0x69, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0xd1, 0x01, 0x0a, 0x14,
- 0x47, 0x65, 0x74, 0x49, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x50, 0x61, 0x72, 0x74, 0x69,
- 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x3d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x70,
- 0x61, 0x6e, 0x6e, 0x65, 0x72, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x69, 0x6e, 0x73, 0x74,
- 0x61, 0x6e, 0x63, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x47, 0x65, 0x74, 0x49, 0x6e, 0x73, 0x74, 0x61,
- 0x6e, 0x63, 0x65, 0x50, 0x61, 0x72, 0x74, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x75,
- 0x65, 0x73, 0x74, 0x1a, 0x33, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x70, 0x61,
- 0x6e, 0x6e, 0x65, 0x72, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x69, 0x6e, 0x73, 0x74, 0x61,
- 0x6e, 0x63, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x49, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x50,
- 0x61, 0x72, 0x74, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x22, 0x45, 0xda, 0x41, 0x04, 0x6e, 0x61, 0x6d,
- 0x65, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x38, 0x12, 0x36, 0x2f, 0x76, 0x31, 0x2f, 0x7b, 0x6e, 0x61,
- 0x6d, 0x65, 0x3d, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x2a, 0x2f, 0x69, 0x6e,
- 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x73, 0x2f, 0x2a, 0x2f, 0x69, 0x6e, 0x73, 0x74, 0x61, 0x6e,
- 0x63, 0x65, 0x50, 0x61, 0x72, 0x74, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x2a, 0x7d, 0x12,
- 0xe9, 0x02, 0x0a, 0x17, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x49, 0x6e, 0x73, 0x74, 0x61, 0x6e,
- 0x63, 0x65, 0x50, 0x61, 0x72, 0x74, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x40, 0x2e, 0x67, 0x6f,
- 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x70, 0x61, 0x6e, 0x6e, 0x65, 0x72, 0x2e, 0x61, 0x64, 0x6d,
- 0x69, 0x6e, 0x2e, 0x69, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x43,
- 0x72, 0x65, 0x61, 0x74, 0x65, 0x49, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x50, 0x61, 0x72,
- 0x74, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1d, 0x2e,
- 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6c, 0x6f, 0x6e, 0x67, 0x72, 0x75, 0x6e, 0x6e, 0x69,
- 0x6e, 0x67, 0x2e, 0x4f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x22, 0xec, 0x01, 0xca,
- 0x41, 0x76, 0x0a, 0x32, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x70, 0x61, 0x6e, 0x6e,
- 0x65, 0x72, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x69, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63,
- 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x49, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x50, 0x61, 0x72,
- 0x74, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x40, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73,
- 0x70, 0x61, 0x6e, 0x6e, 0x65, 0x72, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x69, 0x6e, 0x73,
- 0x74, 0x61, 0x6e, 0x63, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x49,
- 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x50, 0x61, 0x72, 0x74, 0x69, 0x74, 0x69, 0x6f, 0x6e,
- 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0xda, 0x41, 0x2f, 0x70, 0x61, 0x72, 0x65, 0x6e,
- 0x74, 0x2c, 0x69, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x5f, 0x70, 0x61, 0x72, 0x74, 0x69,
- 0x74, 0x69, 0x6f, 0x6e, 0x2c, 0x69, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x5f, 0x70, 0x61,
- 0x72, 0x74, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x69, 0x64, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x3b,
- 0x3a, 0x01, 0x2a, 0x22, 0x36, 0x2f, 0x76, 0x31, 0x2f, 0x7b, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74,
- 0x3d, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x2a, 0x2f, 0x69, 0x6e, 0x73, 0x74,
- 0x61, 0x6e, 0x63, 0x65, 0x73, 0x2f, 0x2a, 0x7d, 0x2f, 0x69, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63,
- 0x65, 0x50, 0x61, 0x72, 0x74, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0xba, 0x01, 0x0a, 0x17,
- 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x49, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x50, 0x61,
- 0x72, 0x74, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x40, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65,
- 0x2e, 0x73, 0x70, 0x61, 0x6e, 0x6e, 0x65, 0x72, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x69,
- 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x44, 0x65, 0x6c, 0x65, 0x74,
- 0x65, 0x49, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x50, 0x61, 0x72, 0x74, 0x69, 0x74, 0x69,
- 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x16, 0x2e, 0x67, 0x6f, 0x6f, 0x67,
- 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6d, 0x70, 0x74,
- 0x79, 0x22, 0x45, 0xda, 0x41, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x38,
- 0x2a, 0x36, 0x2f, 0x76, 0x31, 0x2f, 0x7b, 0x6e, 0x61, 0x6d, 0x65, 0x3d, 0x70, 0x72, 0x6f, 0x6a,
- 0x65, 0x63, 0x74, 0x73, 0x2f, 0x2a, 0x2f, 0x69, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x73,
- 0x2f, 0x2a, 0x2f, 0x69, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x50, 0x61, 0x72, 0x74, 0x69,
- 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x2a, 0x7d, 0x12, 0xea, 0x02, 0x0a, 0x17, 0x55, 0x70, 0x64,
- 0x61, 0x74, 0x65, 0x49, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x50, 0x61, 0x72, 0x74, 0x69,
- 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x40, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x70,
- 0x61, 0x6e, 0x6e, 0x65, 0x72, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x69, 0x6e, 0x73, 0x74,
- 0x61, 0x6e, 0x63, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x49, 0x6e,
- 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x50, 0x61, 0x72, 0x74, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x52,
- 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e,
- 0x6c, 0x6f, 0x6e, 0x67, 0x72, 0x75, 0x6e, 0x6e, 0x69, 0x6e, 0x67, 0x2e, 0x4f, 0x70, 0x65, 0x72,
- 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x22, 0xed, 0x01, 0xca, 0x41, 0x76, 0x0a, 0x32, 0x67, 0x6f, 0x6f,
- 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x70, 0x61, 0x6e, 0x6e, 0x65, 0x72, 0x2e, 0x61, 0x64, 0x6d, 0x69,
- 0x6e, 0x2e, 0x69, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x49, 0x6e,
- 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x50, 0x61, 0x72, 0x74, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x12,
- 0x40, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x70, 0x61, 0x6e, 0x6e, 0x65, 0x72, 0x2e,
- 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x69, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x2e, 0x76,
- 0x31, 0x2e, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x49, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65,
- 0x50, 0x61, 0x72, 0x74, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74,
- 0x61, 0xda, 0x41, 0x1d, 0x69, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x5f, 0x70, 0x61, 0x72,
- 0x74, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x2c, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x5f, 0x6d, 0x61, 0x73,
- 0x6b, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x4e, 0x3a, 0x01, 0x2a, 0x32, 0x49, 0x2f, 0x76, 0x31, 0x2f,
- 0x7b, 0x69, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x5f, 0x70, 0x61, 0x72, 0x74, 0x69, 0x74,
- 0x69, 0x6f, 0x6e, 0x2e, 0x6e, 0x61, 0x6d, 0x65, 0x3d, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74,
- 0x73, 0x2f, 0x2a, 0x2f, 0x69, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x73, 0x2f, 0x2a, 0x2f,
- 0x69, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x50, 0x61, 0x72, 0x74, 0x69, 0x74, 0x69, 0x6f,
- 0x6e, 0x73, 0x2f, 0x2a, 0x7d, 0x12, 0x88, 0x02, 0x0a, 0x1f, 0x4c, 0x69, 0x73, 0x74, 0x49, 0x6e,
- 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x50, 0x61, 0x72, 0x74, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x4f,
- 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x48, 0x2e, 0x67, 0x6f, 0x6f, 0x67,
- 0x6c, 0x65, 0x2e, 0x73, 0x70, 0x61, 0x6e, 0x6e, 0x65, 0x72, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e,
- 0x2e, 0x69, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x4c, 0x69, 0x73,
- 0x74, 0x49, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x50, 0x61, 0x72, 0x74, 0x69, 0x74, 0x69,
- 0x6f, 0x6e, 0x4f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x65, 0x71, 0x75,
- 0x65, 0x73, 0x74, 0x1a, 0x49, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x70, 0x61,
- 0x6e, 0x6e, 0x65, 0x72, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x69, 0x6e, 0x73, 0x74, 0x61,
- 0x6e, 0x63, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x49, 0x6e, 0x73, 0x74, 0x61,
- 0x6e, 0x63, 0x65, 0x50, 0x61, 0x72, 0x74, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x4f, 0x70, 0x65, 0x72,
- 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x50,
- 0xda, 0x41, 0x06, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x41, 0x12,
- 0x3f, 0x2f, 0x76, 0x31, 0x2f, 0x7b, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x3d, 0x70, 0x72, 0x6f,
- 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x2a, 0x2f, 0x69, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65,
- 0x73, 0x2f, 0x2a, 0x7d, 0x2f, 0x69, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x50, 0x61, 0x72,
- 0x74, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x4f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73,
- 0x12, 0x89, 0x02, 0x0a, 0x0c, 0x4d, 0x6f, 0x76, 0x65, 0x49, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63,
- 0x65, 0x12, 0x35, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x70, 0x61, 0x6e, 0x6e,
- 0x65, 0x72, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x69, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63,
- 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x4d, 0x6f, 0x76, 0x65, 0x49, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63,
- 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c,
- 0x65, 0x2e, 0x6c, 0x6f, 0x6e, 0x67, 0x72, 0x75, 0x6e, 0x6e, 0x69, 0x6e, 0x67, 0x2e, 0x4f, 0x70,
- 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x22, 0xa2, 0x01, 0xca, 0x41, 0x6e, 0x0a, 0x35, 0x67,
- 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x70, 0x61, 0x6e, 0x6e, 0x65, 0x72, 0x2e, 0x61, 0x64,
- 0x6d, 0x69, 0x6e, 0x2e, 0x69, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x2e, 0x76, 0x31, 0x2e,
- 0x4d, 0x6f, 0x76, 0x65, 0x49, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x52, 0x65, 0x73, 0x70,
- 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x35, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x70, 0x61,
- 0x6e, 0x6e, 0x65, 0x72, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x69, 0x6e, 0x73, 0x74, 0x61,
- 0x6e, 0x63, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x4d, 0x6f, 0x76, 0x65, 0x49, 0x6e, 0x73, 0x74, 0x61,
- 0x6e, 0x63, 0x65, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x82, 0xd3, 0xe4, 0x93, 0x02,
- 0x2b, 0x3a, 0x01, 0x2a, 0x22, 0x26, 0x2f, 0x76, 0x31, 0x2f, 0x7b, 0x6e, 0x61, 0x6d, 0x65, 0x3d,
- 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x2a, 0x2f, 0x69, 0x6e, 0x73, 0x74, 0x61,
- 0x6e, 0x63, 0x65, 0x73, 0x2f, 0x2a, 0x7d, 0x3a, 0x6d, 0x6f, 0x76, 0x65, 0x1a, 0x78, 0xca, 0x41,
- 0x16, 0x73, 0x70, 0x61, 0x6e, 0x6e, 0x65, 0x72, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61,
- 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0xd2, 0x41, 0x5c, 0x68, 0x74, 0x74, 0x70, 0x73, 0x3a,
- 0x2f, 0x2f, 0x77, 0x77, 0x77, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73,
- 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x61, 0x75, 0x74, 0x68, 0x2f, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2d,
- 0x70, 0x6c, 0x61, 0x74, 0x66, 0x6f, 0x72, 0x6d, 0x2c, 0x68, 0x74, 0x74, 0x70, 0x73, 0x3a, 0x2f,
- 0x2f, 0x77, 0x77, 0x77, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e,
- 0x63, 0x6f, 0x6d, 0x2f, 0x61, 0x75, 0x74, 0x68, 0x2f, 0x73, 0x70, 0x61, 0x6e, 0x6e, 0x65, 0x72,
- 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x42, 0x8b, 0x02, 0x0a, 0x24, 0x63, 0x6f, 0x6d, 0x2e, 0x67,
- 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x70, 0x61, 0x6e, 0x6e, 0x65, 0x72, 0x2e, 0x61, 0x64,
- 0x6d, 0x69, 0x6e, 0x2e, 0x69, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x2e, 0x76, 0x31, 0x42,
- 0x19, 0x53, 0x70, 0x61, 0x6e, 0x6e, 0x65, 0x72, 0x49, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65,
- 0x41, 0x64, 0x6d, 0x69, 0x6e, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x46, 0x63, 0x6c,
- 0x6f, 0x75, 0x64, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x67,
- 0x6f, 0x2f, 0x73, 0x70, 0x61, 0x6e, 0x6e, 0x65, 0x72, 0x2f, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2f,
- 0x69, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x2f, 0x61, 0x70, 0x69, 0x76, 0x31, 0x2f, 0x69,
- 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x70, 0x62, 0x3b, 0x69, 0x6e, 0x73, 0x74, 0x61, 0x6e,
- 0x63, 0x65, 0x70, 0x62, 0xaa, 0x02, 0x26, 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x43, 0x6c,
- 0x6f, 0x75, 0x64, 0x2e, 0x53, 0x70, 0x61, 0x6e, 0x6e, 0x65, 0x72, 0x2e, 0x41, 0x64, 0x6d, 0x69,
- 0x6e, 0x2e, 0x49, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x2e, 0x56, 0x31, 0xca, 0x02, 0x26,
- 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x5c, 0x43, 0x6c, 0x6f, 0x75, 0x64, 0x5c, 0x53, 0x70, 0x61,
- 0x6e, 0x6e, 0x65, 0x72, 0x5c, 0x41, 0x64, 0x6d, 0x69, 0x6e, 0x5c, 0x49, 0x6e, 0x73, 0x74, 0x61,
- 0x6e, 0x63, 0x65, 0x5c, 0x56, 0x31, 0xea, 0x02, 0x2b, 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x3a,
- 0x3a, 0x43, 0x6c, 0x6f, 0x75, 0x64, 0x3a, 0x3a, 0x53, 0x70, 0x61, 0x6e, 0x6e, 0x65, 0x72, 0x3a,
- 0x3a, 0x41, 0x64, 0x6d, 0x69, 0x6e, 0x3a, 0x3a, 0x49, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65,
- 0x3a, 0x3a, 0x56, 0x31, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33,
-}
-
-var (
- file_google_spanner_admin_instance_v1_spanner_instance_admin_proto_rawDescOnce sync.Once
- file_google_spanner_admin_instance_v1_spanner_instance_admin_proto_rawDescData = file_google_spanner_admin_instance_v1_spanner_instance_admin_proto_rawDesc
-)
-
-func file_google_spanner_admin_instance_v1_spanner_instance_admin_proto_rawDescGZIP() []byte {
- file_google_spanner_admin_instance_v1_spanner_instance_admin_proto_rawDescOnce.Do(func() {
- file_google_spanner_admin_instance_v1_spanner_instance_admin_proto_rawDescData = protoimpl.X.CompressGZIP(file_google_spanner_admin_instance_v1_spanner_instance_admin_proto_rawDescData)
- })
- return file_google_spanner_admin_instance_v1_spanner_instance_admin_proto_rawDescData
-}
-
-var file_google_spanner_admin_instance_v1_spanner_instance_admin_proto_enumTypes = make([]protoimpl.EnumInfo, 6)
-var file_google_spanner_admin_instance_v1_spanner_instance_admin_proto_msgTypes = make([]protoimpl.MessageInfo, 40)
-var file_google_spanner_admin_instance_v1_spanner_instance_admin_proto_goTypes = []any{
- (ReplicaInfo_ReplicaType)(0), // 0: google.spanner.admin.instance.v1.ReplicaInfo.ReplicaType
- (InstanceConfig_Type)(0), // 1: google.spanner.admin.instance.v1.InstanceConfig.Type
- (InstanceConfig_State)(0), // 2: google.spanner.admin.instance.v1.InstanceConfig.State
- (Instance_State)(0), // 3: google.spanner.admin.instance.v1.Instance.State
- (Instance_Edition)(0), // 4: google.spanner.admin.instance.v1.Instance.Edition
- (InstancePartition_State)(0), // 5: google.spanner.admin.instance.v1.InstancePartition.State
- (*ReplicaInfo)(nil), // 6: google.spanner.admin.instance.v1.ReplicaInfo
- (*InstanceConfig)(nil), // 7: google.spanner.admin.instance.v1.InstanceConfig
- (*AutoscalingConfig)(nil), // 8: google.spanner.admin.instance.v1.AutoscalingConfig
- (*Instance)(nil), // 9: google.spanner.admin.instance.v1.Instance
- (*ListInstanceConfigsRequest)(nil), // 10: google.spanner.admin.instance.v1.ListInstanceConfigsRequest
- (*ListInstanceConfigsResponse)(nil), // 11: google.spanner.admin.instance.v1.ListInstanceConfigsResponse
- (*GetInstanceConfigRequest)(nil), // 12: google.spanner.admin.instance.v1.GetInstanceConfigRequest
- (*CreateInstanceConfigRequest)(nil), // 13: google.spanner.admin.instance.v1.CreateInstanceConfigRequest
- (*UpdateInstanceConfigRequest)(nil), // 14: google.spanner.admin.instance.v1.UpdateInstanceConfigRequest
- (*DeleteInstanceConfigRequest)(nil), // 15: google.spanner.admin.instance.v1.DeleteInstanceConfigRequest
- (*ListInstanceConfigOperationsRequest)(nil), // 16: google.spanner.admin.instance.v1.ListInstanceConfigOperationsRequest
- (*ListInstanceConfigOperationsResponse)(nil), // 17: google.spanner.admin.instance.v1.ListInstanceConfigOperationsResponse
- (*GetInstanceRequest)(nil), // 18: google.spanner.admin.instance.v1.GetInstanceRequest
- (*CreateInstanceRequest)(nil), // 19: google.spanner.admin.instance.v1.CreateInstanceRequest
- (*ListInstancesRequest)(nil), // 20: google.spanner.admin.instance.v1.ListInstancesRequest
- (*ListInstancesResponse)(nil), // 21: google.spanner.admin.instance.v1.ListInstancesResponse
- (*UpdateInstanceRequest)(nil), // 22: google.spanner.admin.instance.v1.UpdateInstanceRequest
- (*DeleteInstanceRequest)(nil), // 23: google.spanner.admin.instance.v1.DeleteInstanceRequest
- (*CreateInstanceMetadata)(nil), // 24: google.spanner.admin.instance.v1.CreateInstanceMetadata
- (*UpdateInstanceMetadata)(nil), // 25: google.spanner.admin.instance.v1.UpdateInstanceMetadata
- (*CreateInstanceConfigMetadata)(nil), // 26: google.spanner.admin.instance.v1.CreateInstanceConfigMetadata
- (*UpdateInstanceConfigMetadata)(nil), // 27: google.spanner.admin.instance.v1.UpdateInstanceConfigMetadata
- (*InstancePartition)(nil), // 28: google.spanner.admin.instance.v1.InstancePartition
- (*CreateInstancePartitionMetadata)(nil), // 29: google.spanner.admin.instance.v1.CreateInstancePartitionMetadata
- (*CreateInstancePartitionRequest)(nil), // 30: google.spanner.admin.instance.v1.CreateInstancePartitionRequest
- (*DeleteInstancePartitionRequest)(nil), // 31: google.spanner.admin.instance.v1.DeleteInstancePartitionRequest
- (*GetInstancePartitionRequest)(nil), // 32: google.spanner.admin.instance.v1.GetInstancePartitionRequest
- (*UpdateInstancePartitionRequest)(nil), // 33: google.spanner.admin.instance.v1.UpdateInstancePartitionRequest
- (*UpdateInstancePartitionMetadata)(nil), // 34: google.spanner.admin.instance.v1.UpdateInstancePartitionMetadata
- (*ListInstancePartitionsRequest)(nil), // 35: google.spanner.admin.instance.v1.ListInstancePartitionsRequest
- (*ListInstancePartitionsResponse)(nil), // 36: google.spanner.admin.instance.v1.ListInstancePartitionsResponse
- (*ListInstancePartitionOperationsRequest)(nil), // 37: google.spanner.admin.instance.v1.ListInstancePartitionOperationsRequest
- (*ListInstancePartitionOperationsResponse)(nil), // 38: google.spanner.admin.instance.v1.ListInstancePartitionOperationsResponse
- (*MoveInstanceRequest)(nil), // 39: google.spanner.admin.instance.v1.MoveInstanceRequest
- (*MoveInstanceResponse)(nil), // 40: google.spanner.admin.instance.v1.MoveInstanceResponse
- (*MoveInstanceMetadata)(nil), // 41: google.spanner.admin.instance.v1.MoveInstanceMetadata
- nil, // 42: google.spanner.admin.instance.v1.InstanceConfig.LabelsEntry
- (*AutoscalingConfig_AutoscalingLimits)(nil), // 43: google.spanner.admin.instance.v1.AutoscalingConfig.AutoscalingLimits
- (*AutoscalingConfig_AutoscalingTargets)(nil), // 44: google.spanner.admin.instance.v1.AutoscalingConfig.AutoscalingTargets
- nil, // 45: google.spanner.admin.instance.v1.Instance.LabelsEntry
- (*timestamppb.Timestamp)(nil), // 46: google.protobuf.Timestamp
- (*fieldmaskpb.FieldMask)(nil), // 47: google.protobuf.FieldMask
- (*longrunningpb.Operation)(nil), // 48: google.longrunning.Operation
- (FulfillmentPeriod)(0), // 49: google.spanner.admin.instance.v1.FulfillmentPeriod
- (*OperationProgress)(nil), // 50: google.spanner.admin.instance.v1.OperationProgress
- (*iampb.SetIamPolicyRequest)(nil), // 51: google.iam.v1.SetIamPolicyRequest
- (*iampb.GetIamPolicyRequest)(nil), // 52: google.iam.v1.GetIamPolicyRequest
- (*iampb.TestIamPermissionsRequest)(nil), // 53: google.iam.v1.TestIamPermissionsRequest
- (*emptypb.Empty)(nil), // 54: google.protobuf.Empty
- (*iampb.Policy)(nil), // 55: google.iam.v1.Policy
- (*iampb.TestIamPermissionsResponse)(nil), // 56: google.iam.v1.TestIamPermissionsResponse
-}
-var file_google_spanner_admin_instance_v1_spanner_instance_admin_proto_depIdxs = []int32{
- 0, // 0: google.spanner.admin.instance.v1.ReplicaInfo.type:type_name -> google.spanner.admin.instance.v1.ReplicaInfo.ReplicaType
- 1, // 1: google.spanner.admin.instance.v1.InstanceConfig.config_type:type_name -> google.spanner.admin.instance.v1.InstanceConfig.Type
- 6, // 2: google.spanner.admin.instance.v1.InstanceConfig.replicas:type_name -> google.spanner.admin.instance.v1.ReplicaInfo
- 6, // 3: google.spanner.admin.instance.v1.InstanceConfig.optional_replicas:type_name -> google.spanner.admin.instance.v1.ReplicaInfo
- 42, // 4: google.spanner.admin.instance.v1.InstanceConfig.labels:type_name -> google.spanner.admin.instance.v1.InstanceConfig.LabelsEntry
- 2, // 5: google.spanner.admin.instance.v1.InstanceConfig.state:type_name -> google.spanner.admin.instance.v1.InstanceConfig.State
- 43, // 6: google.spanner.admin.instance.v1.AutoscalingConfig.autoscaling_limits:type_name -> google.spanner.admin.instance.v1.AutoscalingConfig.AutoscalingLimits
- 44, // 7: google.spanner.admin.instance.v1.AutoscalingConfig.autoscaling_targets:type_name -> google.spanner.admin.instance.v1.AutoscalingConfig.AutoscalingTargets
- 8, // 8: google.spanner.admin.instance.v1.Instance.autoscaling_config:type_name -> google.spanner.admin.instance.v1.AutoscalingConfig
- 3, // 9: google.spanner.admin.instance.v1.Instance.state:type_name -> google.spanner.admin.instance.v1.Instance.State
- 45, // 10: google.spanner.admin.instance.v1.Instance.labels:type_name -> google.spanner.admin.instance.v1.Instance.LabelsEntry
- 46, // 11: google.spanner.admin.instance.v1.Instance.create_time:type_name -> google.protobuf.Timestamp
- 46, // 12: google.spanner.admin.instance.v1.Instance.update_time:type_name -> google.protobuf.Timestamp
- 4, // 13: google.spanner.admin.instance.v1.Instance.edition:type_name -> google.spanner.admin.instance.v1.Instance.Edition
- 7, // 14: google.spanner.admin.instance.v1.ListInstanceConfigsResponse.instance_configs:type_name -> google.spanner.admin.instance.v1.InstanceConfig
- 7, // 15: google.spanner.admin.instance.v1.CreateInstanceConfigRequest.instance_config:type_name -> google.spanner.admin.instance.v1.InstanceConfig
- 7, // 16: google.spanner.admin.instance.v1.UpdateInstanceConfigRequest.instance_config:type_name -> google.spanner.admin.instance.v1.InstanceConfig
- 47, // 17: google.spanner.admin.instance.v1.UpdateInstanceConfigRequest.update_mask:type_name -> google.protobuf.FieldMask
- 48, // 18: google.spanner.admin.instance.v1.ListInstanceConfigOperationsResponse.operations:type_name -> google.longrunning.Operation
- 47, // 19: google.spanner.admin.instance.v1.GetInstanceRequest.field_mask:type_name -> google.protobuf.FieldMask
- 9, // 20: google.spanner.admin.instance.v1.CreateInstanceRequest.instance:type_name -> google.spanner.admin.instance.v1.Instance
- 46, // 21: google.spanner.admin.instance.v1.ListInstancesRequest.instance_deadline:type_name -> google.protobuf.Timestamp
- 9, // 22: google.spanner.admin.instance.v1.ListInstancesResponse.instances:type_name -> google.spanner.admin.instance.v1.Instance
- 9, // 23: google.spanner.admin.instance.v1.UpdateInstanceRequest.instance:type_name -> google.spanner.admin.instance.v1.Instance
- 47, // 24: google.spanner.admin.instance.v1.UpdateInstanceRequest.field_mask:type_name -> google.protobuf.FieldMask
- 9, // 25: google.spanner.admin.instance.v1.CreateInstanceMetadata.instance:type_name -> google.spanner.admin.instance.v1.Instance
- 46, // 26: google.spanner.admin.instance.v1.CreateInstanceMetadata.start_time:type_name -> google.protobuf.Timestamp
- 46, // 27: google.spanner.admin.instance.v1.CreateInstanceMetadata.cancel_time:type_name -> google.protobuf.Timestamp
- 46, // 28: google.spanner.admin.instance.v1.CreateInstanceMetadata.end_time:type_name -> google.protobuf.Timestamp
- 49, // 29: google.spanner.admin.instance.v1.CreateInstanceMetadata.expected_fulfillment_period:type_name -> google.spanner.admin.instance.v1.FulfillmentPeriod
- 9, // 30: google.spanner.admin.instance.v1.UpdateInstanceMetadata.instance:type_name -> google.spanner.admin.instance.v1.Instance
- 46, // 31: google.spanner.admin.instance.v1.UpdateInstanceMetadata.start_time:type_name -> google.protobuf.Timestamp
- 46, // 32: google.spanner.admin.instance.v1.UpdateInstanceMetadata.cancel_time:type_name -> google.protobuf.Timestamp
- 46, // 33: google.spanner.admin.instance.v1.UpdateInstanceMetadata.end_time:type_name -> google.protobuf.Timestamp
- 49, // 34: google.spanner.admin.instance.v1.UpdateInstanceMetadata.expected_fulfillment_period:type_name -> google.spanner.admin.instance.v1.FulfillmentPeriod
- 7, // 35: google.spanner.admin.instance.v1.CreateInstanceConfigMetadata.instance_config:type_name -> google.spanner.admin.instance.v1.InstanceConfig
- 50, // 36: google.spanner.admin.instance.v1.CreateInstanceConfigMetadata.progress:type_name -> google.spanner.admin.instance.v1.OperationProgress
- 46, // 37: google.spanner.admin.instance.v1.CreateInstanceConfigMetadata.cancel_time:type_name -> google.protobuf.Timestamp
- 7, // 38: google.spanner.admin.instance.v1.UpdateInstanceConfigMetadata.instance_config:type_name -> google.spanner.admin.instance.v1.InstanceConfig
- 50, // 39: google.spanner.admin.instance.v1.UpdateInstanceConfigMetadata.progress:type_name -> google.spanner.admin.instance.v1.OperationProgress
- 46, // 40: google.spanner.admin.instance.v1.UpdateInstanceConfigMetadata.cancel_time:type_name -> google.protobuf.Timestamp
- 5, // 41: google.spanner.admin.instance.v1.InstancePartition.state:type_name -> google.spanner.admin.instance.v1.InstancePartition.State
- 46, // 42: google.spanner.admin.instance.v1.InstancePartition.create_time:type_name -> google.protobuf.Timestamp
- 46, // 43: google.spanner.admin.instance.v1.InstancePartition.update_time:type_name -> google.protobuf.Timestamp
- 28, // 44: google.spanner.admin.instance.v1.CreateInstancePartitionMetadata.instance_partition:type_name -> google.spanner.admin.instance.v1.InstancePartition
- 46, // 45: google.spanner.admin.instance.v1.CreateInstancePartitionMetadata.start_time:type_name -> google.protobuf.Timestamp
- 46, // 46: google.spanner.admin.instance.v1.CreateInstancePartitionMetadata.cancel_time:type_name -> google.protobuf.Timestamp
- 46, // 47: google.spanner.admin.instance.v1.CreateInstancePartitionMetadata.end_time:type_name -> google.protobuf.Timestamp
- 28, // 48: google.spanner.admin.instance.v1.CreateInstancePartitionRequest.instance_partition:type_name -> google.spanner.admin.instance.v1.InstancePartition
- 28, // 49: google.spanner.admin.instance.v1.UpdateInstancePartitionRequest.instance_partition:type_name -> google.spanner.admin.instance.v1.InstancePartition
- 47, // 50: google.spanner.admin.instance.v1.UpdateInstancePartitionRequest.field_mask:type_name -> google.protobuf.FieldMask
- 28, // 51: google.spanner.admin.instance.v1.UpdateInstancePartitionMetadata.instance_partition:type_name -> google.spanner.admin.instance.v1.InstancePartition
- 46, // 52: google.spanner.admin.instance.v1.UpdateInstancePartitionMetadata.start_time:type_name -> google.protobuf.Timestamp
- 46, // 53: google.spanner.admin.instance.v1.UpdateInstancePartitionMetadata.cancel_time:type_name -> google.protobuf.Timestamp
- 46, // 54: google.spanner.admin.instance.v1.UpdateInstancePartitionMetadata.end_time:type_name -> google.protobuf.Timestamp
- 46, // 55: google.spanner.admin.instance.v1.ListInstancePartitionsRequest.instance_partition_deadline:type_name -> google.protobuf.Timestamp
- 28, // 56: google.spanner.admin.instance.v1.ListInstancePartitionsResponse.instance_partitions:type_name -> google.spanner.admin.instance.v1.InstancePartition
- 46, // 57: google.spanner.admin.instance.v1.ListInstancePartitionOperationsRequest.instance_partition_deadline:type_name -> google.protobuf.Timestamp
- 48, // 58: google.spanner.admin.instance.v1.ListInstancePartitionOperationsResponse.operations:type_name -> google.longrunning.Operation
- 50, // 59: google.spanner.admin.instance.v1.MoveInstanceMetadata.progress:type_name -> google.spanner.admin.instance.v1.OperationProgress
- 46, // 60: google.spanner.admin.instance.v1.MoveInstanceMetadata.cancel_time:type_name -> google.protobuf.Timestamp
- 10, // 61: google.spanner.admin.instance.v1.InstanceAdmin.ListInstanceConfigs:input_type -> google.spanner.admin.instance.v1.ListInstanceConfigsRequest
- 12, // 62: google.spanner.admin.instance.v1.InstanceAdmin.GetInstanceConfig:input_type -> google.spanner.admin.instance.v1.GetInstanceConfigRequest
- 13, // 63: google.spanner.admin.instance.v1.InstanceAdmin.CreateInstanceConfig:input_type -> google.spanner.admin.instance.v1.CreateInstanceConfigRequest
- 14, // 64: google.spanner.admin.instance.v1.InstanceAdmin.UpdateInstanceConfig:input_type -> google.spanner.admin.instance.v1.UpdateInstanceConfigRequest
- 15, // 65: google.spanner.admin.instance.v1.InstanceAdmin.DeleteInstanceConfig:input_type -> google.spanner.admin.instance.v1.DeleteInstanceConfigRequest
- 16, // 66: google.spanner.admin.instance.v1.InstanceAdmin.ListInstanceConfigOperations:input_type -> google.spanner.admin.instance.v1.ListInstanceConfigOperationsRequest
- 20, // 67: google.spanner.admin.instance.v1.InstanceAdmin.ListInstances:input_type -> google.spanner.admin.instance.v1.ListInstancesRequest
- 35, // 68: google.spanner.admin.instance.v1.InstanceAdmin.ListInstancePartitions:input_type -> google.spanner.admin.instance.v1.ListInstancePartitionsRequest
- 18, // 69: google.spanner.admin.instance.v1.InstanceAdmin.GetInstance:input_type -> google.spanner.admin.instance.v1.GetInstanceRequest
- 19, // 70: google.spanner.admin.instance.v1.InstanceAdmin.CreateInstance:input_type -> google.spanner.admin.instance.v1.CreateInstanceRequest
- 22, // 71: google.spanner.admin.instance.v1.InstanceAdmin.UpdateInstance:input_type -> google.spanner.admin.instance.v1.UpdateInstanceRequest
- 23, // 72: google.spanner.admin.instance.v1.InstanceAdmin.DeleteInstance:input_type -> google.spanner.admin.instance.v1.DeleteInstanceRequest
- 51, // 73: google.spanner.admin.instance.v1.InstanceAdmin.SetIamPolicy:input_type -> google.iam.v1.SetIamPolicyRequest
- 52, // 74: google.spanner.admin.instance.v1.InstanceAdmin.GetIamPolicy:input_type -> google.iam.v1.GetIamPolicyRequest
- 53, // 75: google.spanner.admin.instance.v1.InstanceAdmin.TestIamPermissions:input_type -> google.iam.v1.TestIamPermissionsRequest
- 32, // 76: google.spanner.admin.instance.v1.InstanceAdmin.GetInstancePartition:input_type -> google.spanner.admin.instance.v1.GetInstancePartitionRequest
- 30, // 77: google.spanner.admin.instance.v1.InstanceAdmin.CreateInstancePartition:input_type -> google.spanner.admin.instance.v1.CreateInstancePartitionRequest
- 31, // 78: google.spanner.admin.instance.v1.InstanceAdmin.DeleteInstancePartition:input_type -> google.spanner.admin.instance.v1.DeleteInstancePartitionRequest
- 33, // 79: google.spanner.admin.instance.v1.InstanceAdmin.UpdateInstancePartition:input_type -> google.spanner.admin.instance.v1.UpdateInstancePartitionRequest
- 37, // 80: google.spanner.admin.instance.v1.InstanceAdmin.ListInstancePartitionOperations:input_type -> google.spanner.admin.instance.v1.ListInstancePartitionOperationsRequest
- 39, // 81: google.spanner.admin.instance.v1.InstanceAdmin.MoveInstance:input_type -> google.spanner.admin.instance.v1.MoveInstanceRequest
- 11, // 82: google.spanner.admin.instance.v1.InstanceAdmin.ListInstanceConfigs:output_type -> google.spanner.admin.instance.v1.ListInstanceConfigsResponse
- 7, // 83: google.spanner.admin.instance.v1.InstanceAdmin.GetInstanceConfig:output_type -> google.spanner.admin.instance.v1.InstanceConfig
- 48, // 84: google.spanner.admin.instance.v1.InstanceAdmin.CreateInstanceConfig:output_type -> google.longrunning.Operation
- 48, // 85: google.spanner.admin.instance.v1.InstanceAdmin.UpdateInstanceConfig:output_type -> google.longrunning.Operation
- 54, // 86: google.spanner.admin.instance.v1.InstanceAdmin.DeleteInstanceConfig:output_type -> google.protobuf.Empty
- 17, // 87: google.spanner.admin.instance.v1.InstanceAdmin.ListInstanceConfigOperations:output_type -> google.spanner.admin.instance.v1.ListInstanceConfigOperationsResponse
- 21, // 88: google.spanner.admin.instance.v1.InstanceAdmin.ListInstances:output_type -> google.spanner.admin.instance.v1.ListInstancesResponse
- 36, // 89: google.spanner.admin.instance.v1.InstanceAdmin.ListInstancePartitions:output_type -> google.spanner.admin.instance.v1.ListInstancePartitionsResponse
- 9, // 90: google.spanner.admin.instance.v1.InstanceAdmin.GetInstance:output_type -> google.spanner.admin.instance.v1.Instance
- 48, // 91: google.spanner.admin.instance.v1.InstanceAdmin.CreateInstance:output_type -> google.longrunning.Operation
- 48, // 92: google.spanner.admin.instance.v1.InstanceAdmin.UpdateInstance:output_type -> google.longrunning.Operation
- 54, // 93: google.spanner.admin.instance.v1.InstanceAdmin.DeleteInstance:output_type -> google.protobuf.Empty
- 55, // 94: google.spanner.admin.instance.v1.InstanceAdmin.SetIamPolicy:output_type -> google.iam.v1.Policy
- 55, // 95: google.spanner.admin.instance.v1.InstanceAdmin.GetIamPolicy:output_type -> google.iam.v1.Policy
- 56, // 96: google.spanner.admin.instance.v1.InstanceAdmin.TestIamPermissions:output_type -> google.iam.v1.TestIamPermissionsResponse
- 28, // 97: google.spanner.admin.instance.v1.InstanceAdmin.GetInstancePartition:output_type -> google.spanner.admin.instance.v1.InstancePartition
- 48, // 98: google.spanner.admin.instance.v1.InstanceAdmin.CreateInstancePartition:output_type -> google.longrunning.Operation
- 54, // 99: google.spanner.admin.instance.v1.InstanceAdmin.DeleteInstancePartition:output_type -> google.protobuf.Empty
- 48, // 100: google.spanner.admin.instance.v1.InstanceAdmin.UpdateInstancePartition:output_type -> google.longrunning.Operation
- 38, // 101: google.spanner.admin.instance.v1.InstanceAdmin.ListInstancePartitionOperations:output_type -> google.spanner.admin.instance.v1.ListInstancePartitionOperationsResponse
- 48, // 102: google.spanner.admin.instance.v1.InstanceAdmin.MoveInstance:output_type -> google.longrunning.Operation
- 82, // [82:103] is the sub-list for method output_type
- 61, // [61:82] is the sub-list for method input_type
- 61, // [61:61] is the sub-list for extension type_name
- 61, // [61:61] is the sub-list for extension extendee
- 0, // [0:61] is the sub-list for field type_name
-}
-
-func init() { file_google_spanner_admin_instance_v1_spanner_instance_admin_proto_init() }
-func file_google_spanner_admin_instance_v1_spanner_instance_admin_proto_init() {
- if File_google_spanner_admin_instance_v1_spanner_instance_admin_proto != nil {
- return
- }
- file_google_spanner_admin_instance_v1_common_proto_init()
- if !protoimpl.UnsafeEnabled {
- file_google_spanner_admin_instance_v1_spanner_instance_admin_proto_msgTypes[0].Exporter = func(v any, i int) any {
- switch v := v.(*ReplicaInfo); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_google_spanner_admin_instance_v1_spanner_instance_admin_proto_msgTypes[1].Exporter = func(v any, i int) any {
- switch v := v.(*InstanceConfig); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_google_spanner_admin_instance_v1_spanner_instance_admin_proto_msgTypes[2].Exporter = func(v any, i int) any {
- switch v := v.(*AutoscalingConfig); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_google_spanner_admin_instance_v1_spanner_instance_admin_proto_msgTypes[3].Exporter = func(v any, i int) any {
- switch v := v.(*Instance); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_google_spanner_admin_instance_v1_spanner_instance_admin_proto_msgTypes[4].Exporter = func(v any, i int) any {
- switch v := v.(*ListInstanceConfigsRequest); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_google_spanner_admin_instance_v1_spanner_instance_admin_proto_msgTypes[5].Exporter = func(v any, i int) any {
- switch v := v.(*ListInstanceConfigsResponse); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_google_spanner_admin_instance_v1_spanner_instance_admin_proto_msgTypes[6].Exporter = func(v any, i int) any {
- switch v := v.(*GetInstanceConfigRequest); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_google_spanner_admin_instance_v1_spanner_instance_admin_proto_msgTypes[7].Exporter = func(v any, i int) any {
- switch v := v.(*CreateInstanceConfigRequest); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_google_spanner_admin_instance_v1_spanner_instance_admin_proto_msgTypes[8].Exporter = func(v any, i int) any {
- switch v := v.(*UpdateInstanceConfigRequest); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_google_spanner_admin_instance_v1_spanner_instance_admin_proto_msgTypes[9].Exporter = func(v any, i int) any {
- switch v := v.(*DeleteInstanceConfigRequest); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_google_spanner_admin_instance_v1_spanner_instance_admin_proto_msgTypes[10].Exporter = func(v any, i int) any {
- switch v := v.(*ListInstanceConfigOperationsRequest); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_google_spanner_admin_instance_v1_spanner_instance_admin_proto_msgTypes[11].Exporter = func(v any, i int) any {
- switch v := v.(*ListInstanceConfigOperationsResponse); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_google_spanner_admin_instance_v1_spanner_instance_admin_proto_msgTypes[12].Exporter = func(v any, i int) any {
- switch v := v.(*GetInstanceRequest); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_google_spanner_admin_instance_v1_spanner_instance_admin_proto_msgTypes[13].Exporter = func(v any, i int) any {
- switch v := v.(*CreateInstanceRequest); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_google_spanner_admin_instance_v1_spanner_instance_admin_proto_msgTypes[14].Exporter = func(v any, i int) any {
- switch v := v.(*ListInstancesRequest); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_google_spanner_admin_instance_v1_spanner_instance_admin_proto_msgTypes[15].Exporter = func(v any, i int) any {
- switch v := v.(*ListInstancesResponse); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_google_spanner_admin_instance_v1_spanner_instance_admin_proto_msgTypes[16].Exporter = func(v any, i int) any {
- switch v := v.(*UpdateInstanceRequest); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_google_spanner_admin_instance_v1_spanner_instance_admin_proto_msgTypes[17].Exporter = func(v any, i int) any {
- switch v := v.(*DeleteInstanceRequest); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_google_spanner_admin_instance_v1_spanner_instance_admin_proto_msgTypes[18].Exporter = func(v any, i int) any {
- switch v := v.(*CreateInstanceMetadata); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_google_spanner_admin_instance_v1_spanner_instance_admin_proto_msgTypes[19].Exporter = func(v any, i int) any {
- switch v := v.(*UpdateInstanceMetadata); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_google_spanner_admin_instance_v1_spanner_instance_admin_proto_msgTypes[20].Exporter = func(v any, i int) any {
- switch v := v.(*CreateInstanceConfigMetadata); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_google_spanner_admin_instance_v1_spanner_instance_admin_proto_msgTypes[21].Exporter = func(v any, i int) any {
- switch v := v.(*UpdateInstanceConfigMetadata); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_google_spanner_admin_instance_v1_spanner_instance_admin_proto_msgTypes[22].Exporter = func(v any, i int) any {
- switch v := v.(*InstancePartition); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_google_spanner_admin_instance_v1_spanner_instance_admin_proto_msgTypes[23].Exporter = func(v any, i int) any {
- switch v := v.(*CreateInstancePartitionMetadata); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_google_spanner_admin_instance_v1_spanner_instance_admin_proto_msgTypes[24].Exporter = func(v any, i int) any {
- switch v := v.(*CreateInstancePartitionRequest); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_google_spanner_admin_instance_v1_spanner_instance_admin_proto_msgTypes[25].Exporter = func(v any, i int) any {
- switch v := v.(*DeleteInstancePartitionRequest); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_google_spanner_admin_instance_v1_spanner_instance_admin_proto_msgTypes[26].Exporter = func(v any, i int) any {
- switch v := v.(*GetInstancePartitionRequest); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_google_spanner_admin_instance_v1_spanner_instance_admin_proto_msgTypes[27].Exporter = func(v any, i int) any {
- switch v := v.(*UpdateInstancePartitionRequest); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_google_spanner_admin_instance_v1_spanner_instance_admin_proto_msgTypes[28].Exporter = func(v any, i int) any {
- switch v := v.(*UpdateInstancePartitionMetadata); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_google_spanner_admin_instance_v1_spanner_instance_admin_proto_msgTypes[29].Exporter = func(v any, i int) any {
- switch v := v.(*ListInstancePartitionsRequest); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_google_spanner_admin_instance_v1_spanner_instance_admin_proto_msgTypes[30].Exporter = func(v any, i int) any {
- switch v := v.(*ListInstancePartitionsResponse); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_google_spanner_admin_instance_v1_spanner_instance_admin_proto_msgTypes[31].Exporter = func(v any, i int) any {
- switch v := v.(*ListInstancePartitionOperationsRequest); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_google_spanner_admin_instance_v1_spanner_instance_admin_proto_msgTypes[32].Exporter = func(v any, i int) any {
- switch v := v.(*ListInstancePartitionOperationsResponse); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_google_spanner_admin_instance_v1_spanner_instance_admin_proto_msgTypes[33].Exporter = func(v any, i int) any {
- switch v := v.(*MoveInstanceRequest); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_google_spanner_admin_instance_v1_spanner_instance_admin_proto_msgTypes[34].Exporter = func(v any, i int) any {
- switch v := v.(*MoveInstanceResponse); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_google_spanner_admin_instance_v1_spanner_instance_admin_proto_msgTypes[35].Exporter = func(v any, i int) any {
- switch v := v.(*MoveInstanceMetadata); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_google_spanner_admin_instance_v1_spanner_instance_admin_proto_msgTypes[37].Exporter = func(v any, i int) any {
- switch v := v.(*AutoscalingConfig_AutoscalingLimits); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_google_spanner_admin_instance_v1_spanner_instance_admin_proto_msgTypes[38].Exporter = func(v any, i int) any {
- switch v := v.(*AutoscalingConfig_AutoscalingTargets); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- }
- file_google_spanner_admin_instance_v1_spanner_instance_admin_proto_msgTypes[22].OneofWrappers = []any{
- (*InstancePartition_NodeCount)(nil),
- (*InstancePartition_ProcessingUnits)(nil),
- }
- file_google_spanner_admin_instance_v1_spanner_instance_admin_proto_msgTypes[37].OneofWrappers = []any{
- (*AutoscalingConfig_AutoscalingLimits_MinNodes)(nil),
- (*AutoscalingConfig_AutoscalingLimits_MinProcessingUnits)(nil),
- (*AutoscalingConfig_AutoscalingLimits_MaxNodes)(nil),
- (*AutoscalingConfig_AutoscalingLimits_MaxProcessingUnits)(nil),
- }
- type x struct{}
- out := protoimpl.TypeBuilder{
- File: protoimpl.DescBuilder{
- GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
- RawDescriptor: file_google_spanner_admin_instance_v1_spanner_instance_admin_proto_rawDesc,
- NumEnums: 6,
- NumMessages: 40,
- NumExtensions: 0,
- NumServices: 1,
- },
- GoTypes: file_google_spanner_admin_instance_v1_spanner_instance_admin_proto_goTypes,
- DependencyIndexes: file_google_spanner_admin_instance_v1_spanner_instance_admin_proto_depIdxs,
- EnumInfos: file_google_spanner_admin_instance_v1_spanner_instance_admin_proto_enumTypes,
- MessageInfos: file_google_spanner_admin_instance_v1_spanner_instance_admin_proto_msgTypes,
- }.Build()
- File_google_spanner_admin_instance_v1_spanner_instance_admin_proto = out.File
- file_google_spanner_admin_instance_v1_spanner_instance_admin_proto_rawDesc = nil
- file_google_spanner_admin_instance_v1_spanner_instance_admin_proto_goTypes = nil
- file_google_spanner_admin_instance_v1_spanner_instance_admin_proto_depIdxs = nil
-}
-
-// Reference imports to suppress errors if they are not otherwise used.
-var _ context.Context
-var _ grpc.ClientConnInterface
-
-// This is a compile-time assertion to ensure that this generated file
-// is compatible with the grpc package it is being compiled against.
-const _ = grpc.SupportPackageIsVersion6
-
-// InstanceAdminClient is the client API for InstanceAdmin service.
-//
-// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream.
-type InstanceAdminClient interface {
- // Lists the supported instance configurations for a given project.
- ListInstanceConfigs(ctx context.Context, in *ListInstanceConfigsRequest, opts ...grpc.CallOption) (*ListInstanceConfigsResponse, error)
- // Gets information about a particular instance configuration.
- GetInstanceConfig(ctx context.Context, in *GetInstanceConfigRequest, opts ...grpc.CallOption) (*InstanceConfig, error)
- // Creates an instance configuration and begins preparing it to be used. The
- // returned [long-running operation][google.longrunning.Operation]
- // can be used to track the progress of preparing the new
- // instance configuration. The instance configuration name is assigned by the
- // caller. If the named instance configuration already exists,
- // `CreateInstanceConfig` returns `ALREADY_EXISTS`.
- //
- // Immediately after the request returns:
- //
- // - The instance configuration is readable via the API, with all requested
- // attributes. The instance configuration's
- // [reconciling][google.spanner.admin.instance.v1.InstanceConfig.reconciling]
- // field is set to true. Its state is `CREATING`.
- //
- // While the operation is pending:
- //
- // - Cancelling the operation renders the instance configuration immediately
- // unreadable via the API.
- // - Except for deleting the creating resource, all other attempts to modify
- // the instance configuration are rejected.
- //
- // Upon completion of the returned operation:
- //
- // - Instances can be created using the instance configuration.
- // - The instance configuration's
- // [reconciling][google.spanner.admin.instance.v1.InstanceConfig.reconciling]
- // field becomes false. Its state becomes `READY`.
- //
- // The returned [long-running operation][google.longrunning.Operation] will
- // have a name of the format
- // `<instance_config_name>/operations/<operation_id>` and can be used to track
- // creation of the instance configuration. The
- // [metadata][google.longrunning.Operation.metadata] field type is
- // [CreateInstanceConfigMetadata][google.spanner.admin.instance.v1.CreateInstanceConfigMetadata].
- // The [response][google.longrunning.Operation.response] field type is
- // [InstanceConfig][google.spanner.admin.instance.v1.InstanceConfig], if
- // successful.
- //
- // Authorization requires `spanner.instanceConfigs.create` permission on
- // the resource
- // [parent][google.spanner.admin.instance.v1.CreateInstanceConfigRequest.parent].
- CreateInstanceConfig(ctx context.Context, in *CreateInstanceConfigRequest, opts ...grpc.CallOption) (*longrunningpb.Operation, error)
- // Updates an instance configuration. The returned
- // [long-running operation][google.longrunning.Operation] can be used to track
- // the progress of updating the instance. If the named instance configuration
- // does not exist, returns `NOT_FOUND`.
- //
- // Only user-managed configurations can be updated.
- //
- // Immediately after the request returns:
- //
- // - The instance configuration's
- // [reconciling][google.spanner.admin.instance.v1.InstanceConfig.reconciling]
- // field is set to true.
- //
- // While the operation is pending:
- //
- // - Cancelling the operation sets its metadata's
- // [cancel_time][google.spanner.admin.instance.v1.UpdateInstanceConfigMetadata.cancel_time].
- // The operation is guaranteed to succeed at undoing all changes, after
- // which point it terminates with a `CANCELLED` status.
- // - All other attempts to modify the instance configuration are rejected.
- // - Reading the instance configuration via the API continues to give the
- // pre-request values.
- //
- // Upon completion of the returned operation:
- //
- // - Creating instances using the instance configuration uses the new
- // values.
- // - The new values of the instance configuration are readable via the API.
- // - The instance configuration's
- // [reconciling][google.spanner.admin.instance.v1.InstanceConfig.reconciling]
- // field becomes false.
- //
- // The returned [long-running operation][google.longrunning.Operation] will
- // have a name of the format
- // `<instance_config_name>/operations/<operation_id>` and can be used to track
- // the instance configuration modification. The
- // [metadata][google.longrunning.Operation.metadata] field type is
- // [UpdateInstanceConfigMetadata][google.spanner.admin.instance.v1.UpdateInstanceConfigMetadata].
- // The [response][google.longrunning.Operation.response] field type is
- // [InstanceConfig][google.spanner.admin.instance.v1.InstanceConfig], if
- // successful.
- //
- // Authorization requires `spanner.instanceConfigs.update` permission on
- // the resource [name][google.spanner.admin.instance.v1.InstanceConfig.name].
- UpdateInstanceConfig(ctx context.Context, in *UpdateInstanceConfigRequest, opts ...grpc.CallOption) (*longrunningpb.Operation, error)
- // Deletes the instance configuration. Deletion is only allowed when no
- // instances are using the configuration. If any instances are using
- // the configuration, returns `FAILED_PRECONDITION`.
- //
- // Only user-managed configurations can be deleted.
- //
- // Authorization requires `spanner.instanceConfigs.delete` permission on
- // the resource [name][google.spanner.admin.instance.v1.InstanceConfig.name].
- DeleteInstanceConfig(ctx context.Context, in *DeleteInstanceConfigRequest, opts ...grpc.CallOption) (*emptypb.Empty, error)
- // Lists the user-managed instance configuration [long-running
- // operations][google.longrunning.Operation] in the given project. An instance
- // configuration operation has a name of the form
- // `projects/<project>/instanceConfigs/<instance_config>/operations/<operation>`.
- // The long-running operation
- // [metadata][google.longrunning.Operation.metadata] field type
- // `metadata.type_url` describes the type of the metadata. Operations returned
- // include those that have completed/failed/canceled within the last 7 days,
- // and pending operations. Operations returned are ordered by
- // `operation.metadata.value.start_time` in descending order starting
- // from the most recently started operation.
- ListInstanceConfigOperations(ctx context.Context, in *ListInstanceConfigOperationsRequest, opts ...grpc.CallOption) (*ListInstanceConfigOperationsResponse, error)
- // Lists all instances in the given project.
- ListInstances(ctx context.Context, in *ListInstancesRequest, opts ...grpc.CallOption) (*ListInstancesResponse, error)
- // Lists all instance partitions for the given instance.
- ListInstancePartitions(ctx context.Context, in *ListInstancePartitionsRequest, opts ...grpc.CallOption) (*ListInstancePartitionsResponse, error)
- // Gets information about a particular instance.
- GetInstance(ctx context.Context, in *GetInstanceRequest, opts ...grpc.CallOption) (*Instance, error)
- // Creates an instance and begins preparing it to begin serving. The
- // returned [long-running operation][google.longrunning.Operation]
- // can be used to track the progress of preparing the new
- // instance. The instance name is assigned by the caller. If the
- // named instance already exists, `CreateInstance` returns
- // `ALREADY_EXISTS`.
- //
- // Immediately upon completion of this request:
- //
- // - The instance is readable via the API, with all requested attributes
- // but no allocated resources. Its state is `CREATING`.
- //
- // Until completion of the returned operation:
- //
- // - Cancelling the operation renders the instance immediately unreadable
- // via the API.
- // - The instance can be deleted.
- // - All other attempts to modify the instance are rejected.
- //
- // Upon completion of the returned operation:
- //
- // - Billing for all successfully-allocated resources begins (some types
- // may have lower than the requested levels).
- // - Databases can be created in the instance.
- // - The instance's allocated resource levels are readable via the API.
- // - The instance's state becomes `READY`.
- //
- // The returned [long-running operation][google.longrunning.Operation] will
- // have a name of the format `<instance_name>/operations/<operation_id>` and
- // can be used to track creation of the instance. The
- // [metadata][google.longrunning.Operation.metadata] field type is
- // [CreateInstanceMetadata][google.spanner.admin.instance.v1.CreateInstanceMetadata].
- // The [response][google.longrunning.Operation.response] field type is
- // [Instance][google.spanner.admin.instance.v1.Instance], if successful.
- CreateInstance(ctx context.Context, in *CreateInstanceRequest, opts ...grpc.CallOption) (*longrunningpb.Operation, error)
- // Updates an instance, and begins allocating or releasing resources
- // as requested. The returned [long-running
- // operation][google.longrunning.Operation] can be used to track the
- // progress of updating the instance. If the named instance does not
- // exist, returns `NOT_FOUND`.
- //
- // Immediately upon completion of this request:
- //
- // - For resource types for which a decrease in the instance's allocation
- // has been requested, billing is based on the newly-requested level.
- //
- // Until completion of the returned operation:
- //
- // - Cancelling the operation sets its metadata's
- // [cancel_time][google.spanner.admin.instance.v1.UpdateInstanceMetadata.cancel_time],
- // and begins restoring resources to their pre-request values. The
- // operation is guaranteed to succeed at undoing all resource changes,
- // after which point it terminates with a `CANCELLED` status.
- // - All other attempts to modify the instance are rejected.
- // - Reading the instance via the API continues to give the pre-request
- // resource levels.
- //
- // Upon completion of the returned operation:
- //
- // - Billing begins for all successfully-allocated resources (some types
- // may have lower than the requested levels).
- // - All newly-reserved resources are available for serving the instance's
- // tables.
- // - The instance's new resource levels are readable via the API.
- //
- // The returned [long-running operation][google.longrunning.Operation] will
- // have a name of the format `<instance_name>/operations/<operation_id>` and
- // can be used to track the instance modification. The
- // [metadata][google.longrunning.Operation.metadata] field type is
- // [UpdateInstanceMetadata][google.spanner.admin.instance.v1.UpdateInstanceMetadata].
- // The [response][google.longrunning.Operation.response] field type is
- // [Instance][google.spanner.admin.instance.v1.Instance], if successful.
- //
- // Authorization requires `spanner.instances.update` permission on
- // the resource [name][google.spanner.admin.instance.v1.Instance.name].
- UpdateInstance(ctx context.Context, in *UpdateInstanceRequest, opts ...grpc.CallOption) (*longrunningpb.Operation, error)
- // Deletes an instance.
- //
- // Immediately upon completion of the request:
- //
- // - Billing ceases for all of the instance's reserved resources.
- //
- // Soon afterward:
- //
- // - The instance and *all of its databases* immediately and
- // irrevocably disappear from the API. All data in the databases
- // is permanently deleted.
- DeleteInstance(ctx context.Context, in *DeleteInstanceRequest, opts ...grpc.CallOption) (*emptypb.Empty, error)
- // Sets the access control policy on an instance resource. Replaces any
- // existing policy.
- //
- // Authorization requires `spanner.instances.setIamPolicy` on
- // [resource][google.iam.v1.SetIamPolicyRequest.resource].
- SetIamPolicy(ctx context.Context, in *iampb.SetIamPolicyRequest, opts ...grpc.CallOption) (*iampb.Policy, error)
- // Gets the access control policy for an instance resource. Returns an empty
- // policy if an instance exists but does not have a policy set.
- //
- // Authorization requires `spanner.instances.getIamPolicy` on
- // [resource][google.iam.v1.GetIamPolicyRequest.resource].
- GetIamPolicy(ctx context.Context, in *iampb.GetIamPolicyRequest, opts ...grpc.CallOption) (*iampb.Policy, error)
- // Returns permissions that the caller has on the specified instance resource.
- //
- // Attempting this RPC on a non-existent Cloud Spanner instance resource will
- // result in a NOT_FOUND error if the user has `spanner.instances.list`
- // permission on the containing Google Cloud Project. Otherwise returns an
- // empty set of permissions.
- TestIamPermissions(ctx context.Context, in *iampb.TestIamPermissionsRequest, opts ...grpc.CallOption) (*iampb.TestIamPermissionsResponse, error)
- // Gets information about a particular instance partition.
- GetInstancePartition(ctx context.Context, in *GetInstancePartitionRequest, opts ...grpc.CallOption) (*InstancePartition, error)
- // Creates an instance partition and begins preparing it to be used. The
- // returned [long-running operation][google.longrunning.Operation]
- // can be used to track the progress of preparing the new instance partition.
- // The instance partition name is assigned by the caller. If the named
- // instance partition already exists, `CreateInstancePartition` returns
- // `ALREADY_EXISTS`.
- //
- // Immediately upon completion of this request:
- //
- // - The instance partition is readable via the API, with all requested
- // attributes but no allocated resources. Its state is `CREATING`.
- //
- // Until completion of the returned operation:
- //
- // - Cancelling the operation renders the instance partition immediately
- // unreadable via the API.
- // - The instance partition can be deleted.
- // - All other attempts to modify the instance partition are rejected.
- //
- // Upon completion of the returned operation:
- //
- // - Billing for all successfully-allocated resources begins (some types
- // may have lower than the requested levels).
- // - Databases can start using this instance partition.
- // - The instance partition's allocated resource levels are readable via the
- // API.
- // - The instance partition's state becomes `READY`.
- //
- // The returned [long-running operation][google.longrunning.Operation] will
- // have a name of the format
- // `<instance_partition_name>/operations/<operation_id>` and can be used to
- // track creation of the instance partition. The
- // [metadata][google.longrunning.Operation.metadata] field type is
- // [CreateInstancePartitionMetadata][google.spanner.admin.instance.v1.CreateInstancePartitionMetadata].
- // The [response][google.longrunning.Operation.response] field type is
- // [InstancePartition][google.spanner.admin.instance.v1.InstancePartition], if
- // successful.
- CreateInstancePartition(ctx context.Context, in *CreateInstancePartitionRequest, opts ...grpc.CallOption) (*longrunningpb.Operation, error)
- // Deletes an existing instance partition. Requires that the
- // instance partition is not used by any database or backup and is not the
- // default instance partition of an instance.
- //
- // Authorization requires `spanner.instancePartitions.delete` permission on
- // the resource
- // [name][google.spanner.admin.instance.v1.InstancePartition.name].
- DeleteInstancePartition(ctx context.Context, in *DeleteInstancePartitionRequest, opts ...grpc.CallOption) (*emptypb.Empty, error)
- // Updates an instance partition, and begins allocating or releasing resources
- // as requested. The returned [long-running
- // operation][google.longrunning.Operation] can be used to track the
- // progress of updating the instance partition. If the named instance
- // partition does not exist, returns `NOT_FOUND`.
- //
- // Immediately upon completion of this request:
- //
- // - For resource types for which a decrease in the instance partition's
- // allocation has been requested, billing is based on the newly-requested
- // level.
- //
- // Until completion of the returned operation:
- //
- // - Cancelling the operation sets its metadata's
- // [cancel_time][google.spanner.admin.instance.v1.UpdateInstancePartitionMetadata.cancel_time],
- // and begins restoring resources to their pre-request values. The
- // operation is guaranteed to succeed at undoing all resource changes,
- // after which point it terminates with a `CANCELLED` status.
- // - All other attempts to modify the instance partition are rejected.
- // - Reading the instance partition via the API continues to give the
- // pre-request resource levels.
- //
- // Upon completion of the returned operation:
- //
- // - Billing begins for all successfully-allocated resources (some types
- // may have lower than the requested levels).
- // - All newly-reserved resources are available for serving the instance
- // partition's tables.
- // - The instance partition's new resource levels are readable via the API.
- //
- // The returned [long-running operation][google.longrunning.Operation] will
- // have a name of the format
- // `<instance_partition_name>/operations/<operation_id>` and can be used to
- // track the instance partition modification. The
- // [metadata][google.longrunning.Operation.metadata] field type is
- // [UpdateInstancePartitionMetadata][google.spanner.admin.instance.v1.UpdateInstancePartitionMetadata].
- // The [response][google.longrunning.Operation.response] field type is
- // [InstancePartition][google.spanner.admin.instance.v1.InstancePartition], if
- // successful.
- //
- // Authorization requires `spanner.instancePartitions.update` permission on
- // the resource
- // [name][google.spanner.admin.instance.v1.InstancePartition.name].
- UpdateInstancePartition(ctx context.Context, in *UpdateInstancePartitionRequest, opts ...grpc.CallOption) (*longrunningpb.Operation, error)
- // Lists instance partition [long-running
- // operations][google.longrunning.Operation] in the given instance.
- // An instance partition operation has a name of the form
- // `projects/<project>/instances/<instance>/instancePartitions/<instance_partition>/operations/<operation>`.
- // The long-running operation
- // [metadata][google.longrunning.Operation.metadata] field type
- // `metadata.type_url` describes the type of the metadata. Operations returned
- // include those that have completed/failed/canceled within the last 7 days,
- // and pending operations. Operations returned are ordered by
- // `operation.metadata.value.start_time` in descending order starting from the
- // most recently started operation.
- //
- // Authorization requires `spanner.instancePartitionOperations.list`
- // permission on the resource
- // [parent][google.spanner.admin.instance.v1.ListInstancePartitionOperationsRequest.parent].
- ListInstancePartitionOperations(ctx context.Context, in *ListInstancePartitionOperationsRequest, opts ...grpc.CallOption) (*ListInstancePartitionOperationsResponse, error)
- // Moves an instance to the target instance configuration. You can use the
- // returned [long-running operation][google.longrunning.Operation] to track
- // the progress of moving the instance.
- //
- // `MoveInstance` returns `FAILED_PRECONDITION` if the instance meets any of
- // the following criteria:
- //
- // - Is undergoing a move to a different instance configuration
- // - Has backups
- // - Has an ongoing update
- // - Contains any CMEK-enabled databases
- // - Is a free trial instance
- //
- // While the operation is pending:
- //
- // - All other attempts to modify the instance, including changes to its
- // compute capacity, are rejected.
- //
- // - The following database and backup admin operations are rejected:
- //
- // - `DatabaseAdmin.CreateDatabase`
- //
- // - `DatabaseAdmin.UpdateDatabaseDdl` (disabled if default_leader is
- // specified in the request.)
- //
- // - `DatabaseAdmin.RestoreDatabase`
- //
- // - `DatabaseAdmin.CreateBackup`
- //
- // - `DatabaseAdmin.CopyBackup`
- //
- // - Both the source and target instance configurations are subject to
- // hourly compute and storage charges.
- //
- // - The instance might experience higher read-write latencies and a higher
- // transaction abort rate. However, moving an instance doesn't cause any
- // downtime.
- //
- // The returned [long-running operation][google.longrunning.Operation] has
- // a name of the format
- // `<instance_name>/operations/<operation_id>` and can be used to track
- // the move instance operation. The
- // [metadata][google.longrunning.Operation.metadata] field type is
- // [MoveInstanceMetadata][google.spanner.admin.instance.v1.MoveInstanceMetadata].
- // The [response][google.longrunning.Operation.response] field type is
- // [Instance][google.spanner.admin.instance.v1.Instance],
- // if successful.
- // Cancelling the operation sets its metadata's
- // [cancel_time][google.spanner.admin.instance.v1.MoveInstanceMetadata.cancel_time].
- // Cancellation is not immediate because it involves moving any data
- // previously moved to the target instance configuration back to the original
- // instance configuration. You can use this operation to track the progress of
- // the cancellation. Upon successful completion of the cancellation, the
- // operation terminates with `CANCELLED` status.
- //
- // If not cancelled, upon completion of the returned operation:
- //
- // - The instance successfully moves to the target instance
- // configuration.
- // - You are billed for compute and storage in target instance
- // configuration.
- //
- // Authorization requires the `spanner.instances.update` permission on
- // the resource [instance][google.spanner.admin.instance.v1.Instance].
- //
- // For more details, see
- // [Move an instance](https://cloud.google.com/spanner/docs/move-instance).
- MoveInstance(ctx context.Context, in *MoveInstanceRequest, opts ...grpc.CallOption) (*longrunningpb.Operation, error)
-}
-
-type instanceAdminClient struct {
- cc grpc.ClientConnInterface
-}
-
-func NewInstanceAdminClient(cc grpc.ClientConnInterface) InstanceAdminClient {
- return &instanceAdminClient{cc}
-}
-
-func (c *instanceAdminClient) ListInstanceConfigs(ctx context.Context, in *ListInstanceConfigsRequest, opts ...grpc.CallOption) (*ListInstanceConfigsResponse, error) {
- out := new(ListInstanceConfigsResponse)
- err := c.cc.Invoke(ctx, "/google.spanner.admin.instance.v1.InstanceAdmin/ListInstanceConfigs", in, out, opts...)
- if err != nil {
- return nil, err
- }
- return out, nil
-}
-
-func (c *instanceAdminClient) GetInstanceConfig(ctx context.Context, in *GetInstanceConfigRequest, opts ...grpc.CallOption) (*InstanceConfig, error) {
- out := new(InstanceConfig)
- err := c.cc.Invoke(ctx, "/google.spanner.admin.instance.v1.InstanceAdmin/GetInstanceConfig", in, out, opts...)
- if err != nil {
- return nil, err
- }
- return out, nil
-}
-
-func (c *instanceAdminClient) CreateInstanceConfig(ctx context.Context, in *CreateInstanceConfigRequest, opts ...grpc.CallOption) (*longrunningpb.Operation, error) {
- out := new(longrunningpb.Operation)
- err := c.cc.Invoke(ctx, "/google.spanner.admin.instance.v1.InstanceAdmin/CreateInstanceConfig", in, out, opts...)
- if err != nil {
- return nil, err
- }
- return out, nil
-}
-
-func (c *instanceAdminClient) UpdateInstanceConfig(ctx context.Context, in *UpdateInstanceConfigRequest, opts ...grpc.CallOption) (*longrunningpb.Operation, error) {
- out := new(longrunningpb.Operation)
- err := c.cc.Invoke(ctx, "/google.spanner.admin.instance.v1.InstanceAdmin/UpdateInstanceConfig", in, out, opts...)
- if err != nil {
- return nil, err
- }
- return out, nil
-}
-
-func (c *instanceAdminClient) DeleteInstanceConfig(ctx context.Context, in *DeleteInstanceConfigRequest, opts ...grpc.CallOption) (*emptypb.Empty, error) {
- out := new(emptypb.Empty)
- err := c.cc.Invoke(ctx, "/google.spanner.admin.instance.v1.InstanceAdmin/DeleteInstanceConfig", in, out, opts...)
- if err != nil {
- return nil, err
- }
- return out, nil
-}
-
-func (c *instanceAdminClient) ListInstanceConfigOperations(ctx context.Context, in *ListInstanceConfigOperationsRequest, opts ...grpc.CallOption) (*ListInstanceConfigOperationsResponse, error) {
- out := new(ListInstanceConfigOperationsResponse)
- err := c.cc.Invoke(ctx, "/google.spanner.admin.instance.v1.InstanceAdmin/ListInstanceConfigOperations", in, out, opts...)
- if err != nil {
- return nil, err
- }
- return out, nil
-}
-
-func (c *instanceAdminClient) ListInstances(ctx context.Context, in *ListInstancesRequest, opts ...grpc.CallOption) (*ListInstancesResponse, error) {
- out := new(ListInstancesResponse)
- err := c.cc.Invoke(ctx, "/google.spanner.admin.instance.v1.InstanceAdmin/ListInstances", in, out, opts...)
- if err != nil {
- return nil, err
- }
- return out, nil
-}
-
-func (c *instanceAdminClient) ListInstancePartitions(ctx context.Context, in *ListInstancePartitionsRequest, opts ...grpc.CallOption) (*ListInstancePartitionsResponse, error) {
- out := new(ListInstancePartitionsResponse)
- err := c.cc.Invoke(ctx, "/google.spanner.admin.instance.v1.InstanceAdmin/ListInstancePartitions", in, out, opts...)
- if err != nil {
- return nil, err
- }
- return out, nil
-}
-
-func (c *instanceAdminClient) GetInstance(ctx context.Context, in *GetInstanceRequest, opts ...grpc.CallOption) (*Instance, error) {
- out := new(Instance)
- err := c.cc.Invoke(ctx, "/google.spanner.admin.instance.v1.InstanceAdmin/GetInstance", in, out, opts...)
- if err != nil {
- return nil, err
- }
- return out, nil
-}
-
-func (c *instanceAdminClient) CreateInstance(ctx context.Context, in *CreateInstanceRequest, opts ...grpc.CallOption) (*longrunningpb.Operation, error) {
- out := new(longrunningpb.Operation)
- err := c.cc.Invoke(ctx, "/google.spanner.admin.instance.v1.InstanceAdmin/CreateInstance", in, out, opts...)
- if err != nil {
- return nil, err
- }
- return out, nil
-}
-
-func (c *instanceAdminClient) UpdateInstance(ctx context.Context, in *UpdateInstanceRequest, opts ...grpc.CallOption) (*longrunningpb.Operation, error) {
- out := new(longrunningpb.Operation)
- err := c.cc.Invoke(ctx, "/google.spanner.admin.instance.v1.InstanceAdmin/UpdateInstance", in, out, opts...)
- if err != nil {
- return nil, err
- }
- return out, nil
-}
-
-func (c *instanceAdminClient) DeleteInstance(ctx context.Context, in *DeleteInstanceRequest, opts ...grpc.CallOption) (*emptypb.Empty, error) {
- out := new(emptypb.Empty)
- err := c.cc.Invoke(ctx, "/google.spanner.admin.instance.v1.InstanceAdmin/DeleteInstance", in, out, opts...)
- if err != nil {
- return nil, err
- }
- return out, nil
-}
-
-func (c *instanceAdminClient) SetIamPolicy(ctx context.Context, in *iampb.SetIamPolicyRequest, opts ...grpc.CallOption) (*iampb.Policy, error) {
- out := new(iampb.Policy)
- err := c.cc.Invoke(ctx, "/google.spanner.admin.instance.v1.InstanceAdmin/SetIamPolicy", in, out, opts...)
- if err != nil {
- return nil, err
- }
- return out, nil
-}
-
-func (c *instanceAdminClient) GetIamPolicy(ctx context.Context, in *iampb.GetIamPolicyRequest, opts ...grpc.CallOption) (*iampb.Policy, error) {
- out := new(iampb.Policy)
- err := c.cc.Invoke(ctx, "/google.spanner.admin.instance.v1.InstanceAdmin/GetIamPolicy", in, out, opts...)
- if err != nil {
- return nil, err
- }
- return out, nil
-}
-
-func (c *instanceAdminClient) TestIamPermissions(ctx context.Context, in *iampb.TestIamPermissionsRequest, opts ...grpc.CallOption) (*iampb.TestIamPermissionsResponse, error) {
- out := new(iampb.TestIamPermissionsResponse)
- err := c.cc.Invoke(ctx, "/google.spanner.admin.instance.v1.InstanceAdmin/TestIamPermissions", in, out, opts...)
- if err != nil {
- return nil, err
- }
- return out, nil
-}
-
-func (c *instanceAdminClient) GetInstancePartition(ctx context.Context, in *GetInstancePartitionRequest, opts ...grpc.CallOption) (*InstancePartition, error) {
- out := new(InstancePartition)
- err := c.cc.Invoke(ctx, "/google.spanner.admin.instance.v1.InstanceAdmin/GetInstancePartition", in, out, opts...)
- if err != nil {
- return nil, err
- }
- return out, nil
-}
-
-func (c *instanceAdminClient) CreateInstancePartition(ctx context.Context, in *CreateInstancePartitionRequest, opts ...grpc.CallOption) (*longrunningpb.Operation, error) {
- out := new(longrunningpb.Operation)
- err := c.cc.Invoke(ctx, "/google.spanner.admin.instance.v1.InstanceAdmin/CreateInstancePartition", in, out, opts...)
- if err != nil {
- return nil, err
- }
- return out, nil
-}
-
-func (c *instanceAdminClient) DeleteInstancePartition(ctx context.Context, in *DeleteInstancePartitionRequest, opts ...grpc.CallOption) (*emptypb.Empty, error) {
- out := new(emptypb.Empty)
- err := c.cc.Invoke(ctx, "/google.spanner.admin.instance.v1.InstanceAdmin/DeleteInstancePartition", in, out, opts...)
- if err != nil {
- return nil, err
- }
- return out, nil
-}
-
-func (c *instanceAdminClient) UpdateInstancePartition(ctx context.Context, in *UpdateInstancePartitionRequest, opts ...grpc.CallOption) (*longrunningpb.Operation, error) {
- out := new(longrunningpb.Operation)
- err := c.cc.Invoke(ctx, "/google.spanner.admin.instance.v1.InstanceAdmin/UpdateInstancePartition", in, out, opts...)
- if err != nil {
- return nil, err
- }
- return out, nil
-}
-
-func (c *instanceAdminClient) ListInstancePartitionOperations(ctx context.Context, in *ListInstancePartitionOperationsRequest, opts ...grpc.CallOption) (*ListInstancePartitionOperationsResponse, error) {
- out := new(ListInstancePartitionOperationsResponse)
- err := c.cc.Invoke(ctx, "/google.spanner.admin.instance.v1.InstanceAdmin/ListInstancePartitionOperations", in, out, opts...)
- if err != nil {
- return nil, err
- }
- return out, nil
-}
-
-func (c *instanceAdminClient) MoveInstance(ctx context.Context, in *MoveInstanceRequest, opts ...grpc.CallOption) (*longrunningpb.Operation, error) {
- out := new(longrunningpb.Operation)
- err := c.cc.Invoke(ctx, "/google.spanner.admin.instance.v1.InstanceAdmin/MoveInstance", in, out, opts...)
- if err != nil {
- return nil, err
- }
- return out, nil
-}
-
-// InstanceAdminServer is the server API for InstanceAdmin service.
-type InstanceAdminServer interface {
- // Lists the supported instance configurations for a given project.
- ListInstanceConfigs(context.Context, *ListInstanceConfigsRequest) (*ListInstanceConfigsResponse, error)
- // Gets information about a particular instance configuration.
- GetInstanceConfig(context.Context, *GetInstanceConfigRequest) (*InstanceConfig, error)
- // Creates an instance configuration and begins preparing it to be used. The
- // returned [long-running operation][google.longrunning.Operation]
- // can be used to track the progress of preparing the new
- // instance configuration. The instance configuration name is assigned by the
- // caller. If the named instance configuration already exists,
- // `CreateInstanceConfig` returns `ALREADY_EXISTS`.
- //
- // Immediately after the request returns:
- //
- // - The instance configuration is readable via the API, with all requested
- // attributes. The instance configuration's
- // [reconciling][google.spanner.admin.instance.v1.InstanceConfig.reconciling]
- // field is set to true. Its state is `CREATING`.
- //
- // While the operation is pending:
- //
- // - Cancelling the operation renders the instance configuration immediately
- // unreadable via the API.
- // - Except for deleting the creating resource, all other attempts to modify
- // the instance configuration are rejected.
- //
- // Upon completion of the returned operation:
- //
- // - Instances can be created using the instance configuration.
- // - The instance configuration's
- // [reconciling][google.spanner.admin.instance.v1.InstanceConfig.reconciling]
- // field becomes false. Its state becomes `READY`.
- //
- // The returned [long-running operation][google.longrunning.Operation] will
- // have a name of the format
- // `<instance_config_name>/operations/<operation_id>` and can be used to track
- // creation of the instance configuration. The
- // [metadata][google.longrunning.Operation.metadata] field type is
- // [CreateInstanceConfigMetadata][google.spanner.admin.instance.v1.CreateInstanceConfigMetadata].
- // The [response][google.longrunning.Operation.response] field type is
- // [InstanceConfig][google.spanner.admin.instance.v1.InstanceConfig], if
- // successful.
- //
- // Authorization requires `spanner.instanceConfigs.create` permission on
- // the resource
- // [parent][google.spanner.admin.instance.v1.CreateInstanceConfigRequest.parent].
- CreateInstanceConfig(context.Context, *CreateInstanceConfigRequest) (*longrunningpb.Operation, error)
- // Updates an instance configuration. The returned
- // [long-running operation][google.longrunning.Operation] can be used to track
- // the progress of updating the instance. If the named instance configuration
- // does not exist, returns `NOT_FOUND`.
- //
- // Only user-managed configurations can be updated.
- //
- // Immediately after the request returns:
- //
- // - The instance configuration's
- // [reconciling][google.spanner.admin.instance.v1.InstanceConfig.reconciling]
- // field is set to true.
- //
- // While the operation is pending:
- //
- // - Cancelling the operation sets its metadata's
- // [cancel_time][google.spanner.admin.instance.v1.UpdateInstanceConfigMetadata.cancel_time].
- // The operation is guaranteed to succeed at undoing all changes, after
- // which point it terminates with a `CANCELLED` status.
- // - All other attempts to modify the instance configuration are rejected.
- // - Reading the instance configuration via the API continues to give the
- // pre-request values.
- //
- // Upon completion of the returned operation:
- //
- // - Creating instances using the instance configuration uses the new
- // values.
- // - The new values of the instance configuration are readable via the API.
- // - The instance configuration's
- // [reconciling][google.spanner.admin.instance.v1.InstanceConfig.reconciling]
- // field becomes false.
- //
- // The returned [long-running operation][google.longrunning.Operation] will
- // have a name of the format
- // `<instance_config_name>/operations/<operation_id>` and can be used to track
- // the instance configuration modification. The
- // [metadata][google.longrunning.Operation.metadata] field type is
- // [UpdateInstanceConfigMetadata][google.spanner.admin.instance.v1.UpdateInstanceConfigMetadata].
- // The [response][google.longrunning.Operation.response] field type is
- // [InstanceConfig][google.spanner.admin.instance.v1.InstanceConfig], if
- // successful.
- //
- // Authorization requires `spanner.instanceConfigs.update` permission on
- // the resource [name][google.spanner.admin.instance.v1.InstanceConfig.name].
- UpdateInstanceConfig(context.Context, *UpdateInstanceConfigRequest) (*longrunningpb.Operation, error)
- // Deletes the instance configuration. Deletion is only allowed when no
- // instances are using the configuration. If any instances are using
- // the configuration, returns `FAILED_PRECONDITION`.
- //
- // Only user-managed configurations can be deleted.
- //
- // Authorization requires `spanner.instanceConfigs.delete` permission on
- // the resource [name][google.spanner.admin.instance.v1.InstanceConfig.name].
- DeleteInstanceConfig(context.Context, *DeleteInstanceConfigRequest) (*emptypb.Empty, error)
- // Lists the user-managed instance configuration [long-running
- // operations][google.longrunning.Operation] in the given project. An instance
- // configuration operation has a name of the form
- // `projects/<project>/instanceConfigs/<instance_config>/operations/<operation>`.
- // The long-running operation
- // [metadata][google.longrunning.Operation.metadata] field type
- // `metadata.type_url` describes the type of the metadata. Operations returned
- // include those that have completed/failed/canceled within the last 7 days,
- // and pending operations. Operations returned are ordered by
- // `operation.metadata.value.start_time` in descending order starting
- // from the most recently started operation.
- ListInstanceConfigOperations(context.Context, *ListInstanceConfigOperationsRequest) (*ListInstanceConfigOperationsResponse, error)
- // Lists all instances in the given project.
- ListInstances(context.Context, *ListInstancesRequest) (*ListInstancesResponse, error)
- // Lists all instance partitions for the given instance.
- ListInstancePartitions(context.Context, *ListInstancePartitionsRequest) (*ListInstancePartitionsResponse, error)
- // Gets information about a particular instance.
- GetInstance(context.Context, *GetInstanceRequest) (*Instance, error)
- // Creates an instance and begins preparing it to begin serving. The
- // returned [long-running operation][google.longrunning.Operation]
- // can be used to track the progress of preparing the new
- // instance. The instance name is assigned by the caller. If the
- // named instance already exists, `CreateInstance` returns
- // `ALREADY_EXISTS`.
- //
- // Immediately upon completion of this request:
- //
- // - The instance is readable via the API, with all requested attributes
- // but no allocated resources. Its state is `CREATING`.
- //
- // Until completion of the returned operation:
- //
- // - Cancelling the operation renders the instance immediately unreadable
- // via the API.
- // - The instance can be deleted.
- // - All other attempts to modify the instance are rejected.
- //
- // Upon completion of the returned operation:
- //
- // - Billing for all successfully-allocated resources begins (some types
- // may have lower than the requested levels).
- // - Databases can be created in the instance.
- // - The instance's allocated resource levels are readable via the API.
- // - The instance's state becomes `READY`.
- //
- // The returned [long-running operation][google.longrunning.Operation] will
- // have a name of the format `<instance_name>/operations/<operation_id>` and
- // can be used to track creation of the instance. The
- // [metadata][google.longrunning.Operation.metadata] field type is
- // [CreateInstanceMetadata][google.spanner.admin.instance.v1.CreateInstanceMetadata].
- // The [response][google.longrunning.Operation.response] field type is
- // [Instance][google.spanner.admin.instance.v1.Instance], if successful.
- CreateInstance(context.Context, *CreateInstanceRequest) (*longrunningpb.Operation, error)
- // Updates an instance, and begins allocating or releasing resources
- // as requested. The returned [long-running
- // operation][google.longrunning.Operation] can be used to track the
- // progress of updating the instance. If the named instance does not
- // exist, returns `NOT_FOUND`.
- //
- // Immediately upon completion of this request:
- //
- // - For resource types for which a decrease in the instance's allocation
- // has been requested, billing is based on the newly-requested level.
- //
- // Until completion of the returned operation:
- //
- // - Cancelling the operation sets its metadata's
- // [cancel_time][google.spanner.admin.instance.v1.UpdateInstanceMetadata.cancel_time],
- // and begins restoring resources to their pre-request values. The
- // operation is guaranteed to succeed at undoing all resource changes,
- // after which point it terminates with a `CANCELLED` status.
- // - All other attempts to modify the instance are rejected.
- // - Reading the instance via the API continues to give the pre-request
- // resource levels.
- //
- // Upon completion of the returned operation:
- //
- // - Billing begins for all successfully-allocated resources (some types
- // may have lower than the requested levels).
- // - All newly-reserved resources are available for serving the instance's
- // tables.
- // - The instance's new resource levels are readable via the API.
- //
- // The returned [long-running operation][google.longrunning.Operation] will
- // have a name of the format `<instance_name>/operations/<operation_id>` and
- // can be used to track the instance modification. The
- // [metadata][google.longrunning.Operation.metadata] field type is
- // [UpdateInstanceMetadata][google.spanner.admin.instance.v1.UpdateInstanceMetadata].
- // The [response][google.longrunning.Operation.response] field type is
- // [Instance][google.spanner.admin.instance.v1.Instance], if successful.
- //
- // Authorization requires `spanner.instances.update` permission on
- // the resource [name][google.spanner.admin.instance.v1.Instance.name].
- UpdateInstance(context.Context, *UpdateInstanceRequest) (*longrunningpb.Operation, error)
- // Deletes an instance.
- //
- // Immediately upon completion of the request:
- //
- // - Billing ceases for all of the instance's reserved resources.
- //
- // Soon afterward:
- //
- // - The instance and *all of its databases* immediately and
- // irrevocably disappear from the API. All data in the databases
- // is permanently deleted.
- DeleteInstance(context.Context, *DeleteInstanceRequest) (*emptypb.Empty, error)
- // Sets the access control policy on an instance resource. Replaces any
- // existing policy.
- //
- // Authorization requires `spanner.instances.setIamPolicy` on
- // [resource][google.iam.v1.SetIamPolicyRequest.resource].
- SetIamPolicy(context.Context, *iampb.SetIamPolicyRequest) (*iampb.Policy, error)
- // Gets the access control policy for an instance resource. Returns an empty
- // policy if an instance exists but does not have a policy set.
- //
- // Authorization requires `spanner.instances.getIamPolicy` on
- // [resource][google.iam.v1.GetIamPolicyRequest.resource].
- GetIamPolicy(context.Context, *iampb.GetIamPolicyRequest) (*iampb.Policy, error)
- // Returns permissions that the caller has on the specified instance resource.
- //
- // Attempting this RPC on a non-existent Cloud Spanner instance resource will
- // result in a NOT_FOUND error if the user has `spanner.instances.list`
- // permission on the containing Google Cloud Project. Otherwise returns an
- // empty set of permissions.
- TestIamPermissions(context.Context, *iampb.TestIamPermissionsRequest) (*iampb.TestIamPermissionsResponse, error)
- // Gets information about a particular instance partition.
- GetInstancePartition(context.Context, *GetInstancePartitionRequest) (*InstancePartition, error)
- // Creates an instance partition and begins preparing it to be used. The
- // returned [long-running operation][google.longrunning.Operation]
- // can be used to track the progress of preparing the new instance partition.
- // The instance partition name is assigned by the caller. If the named
- // instance partition already exists, `CreateInstancePartition` returns
- // `ALREADY_EXISTS`.
- //
- // Immediately upon completion of this request:
- //
- // - The instance partition is readable via the API, with all requested
- // attributes but no allocated resources. Its state is `CREATING`.
- //
- // Until completion of the returned operation:
- //
- // - Cancelling the operation renders the instance partition immediately
- // unreadable via the API.
- // - The instance partition can be deleted.
- // - All other attempts to modify the instance partition are rejected.
- //
- // Upon completion of the returned operation:
- //
- // - Billing for all successfully-allocated resources begins (some types
- // may have lower than the requested levels).
- // - Databases can start using this instance partition.
- // - The instance partition's allocated resource levels are readable via the
- // API.
- // - The instance partition's state becomes `READY`.
- //
- // The returned [long-running operation][google.longrunning.Operation] will
- // have a name of the format
- // `<instance_partition_name>/operations/<operation_id>` and can be used to
- // track creation of the instance partition. The
- // [metadata][google.longrunning.Operation.metadata] field type is
- // [CreateInstancePartitionMetadata][google.spanner.admin.instance.v1.CreateInstancePartitionMetadata].
- // The [response][google.longrunning.Operation.response] field type is
- // [InstancePartition][google.spanner.admin.instance.v1.InstancePartition], if
- // successful.
- CreateInstancePartition(context.Context, *CreateInstancePartitionRequest) (*longrunningpb.Operation, error)
- // Deletes an existing instance partition. Requires that the
- // instance partition is not used by any database or backup and is not the
- // default instance partition of an instance.
- //
- // Authorization requires `spanner.instancePartitions.delete` permission on
- // the resource
- // [name][google.spanner.admin.instance.v1.InstancePartition.name].
- DeleteInstancePartition(context.Context, *DeleteInstancePartitionRequest) (*emptypb.Empty, error)
- // Updates an instance partition, and begins allocating or releasing resources
- // as requested. The returned [long-running
- // operation][google.longrunning.Operation] can be used to track the
- // progress of updating the instance partition. If the named instance
- // partition does not exist, returns `NOT_FOUND`.
- //
- // Immediately upon completion of this request:
- //
- // - For resource types for which a decrease in the instance partition's
- // allocation has been requested, billing is based on the newly-requested
- // level.
- //
- // Until completion of the returned operation:
- //
- // - Cancelling the operation sets its metadata's
- // [cancel_time][google.spanner.admin.instance.v1.UpdateInstancePartitionMetadata.cancel_time],
- // and begins restoring resources to their pre-request values. The
- // operation is guaranteed to succeed at undoing all resource changes,
- // after which point it terminates with a `CANCELLED` status.
- // - All other attempts to modify the instance partition are rejected.
- // - Reading the instance partition via the API continues to give the
- // pre-request resource levels.
- //
- // Upon completion of the returned operation:
- //
- // - Billing begins for all successfully-allocated resources (some types
- // may have lower than the requested levels).
- // - All newly-reserved resources are available for serving the instance
- // partition's tables.
- // - The instance partition's new resource levels are readable via the API.
- //
- // The returned [long-running operation][google.longrunning.Operation] will
- // have a name of the format
- // `<instance_partition_name>/operations/<operation_id>` and can be used to
- // track the instance partition modification. The
- // [metadata][google.longrunning.Operation.metadata] field type is
- // [UpdateInstancePartitionMetadata][google.spanner.admin.instance.v1.UpdateInstancePartitionMetadata].
- // The [response][google.longrunning.Operation.response] field type is
- // [InstancePartition][google.spanner.admin.instance.v1.InstancePartition], if
- // successful.
- //
- // Authorization requires `spanner.instancePartitions.update` permission on
- // the resource
- // [name][google.spanner.admin.instance.v1.InstancePartition.name].
- UpdateInstancePartition(context.Context, *UpdateInstancePartitionRequest) (*longrunningpb.Operation, error)
- // Lists instance partition [long-running
- // operations][google.longrunning.Operation] in the given instance.
- // An instance partition operation has a name of the form
- // `projects/<project>/instances/<instance>/instancePartitions/<instance_partition>/operations/<operation>`.
- // The long-running operation
- // [metadata][google.longrunning.Operation.metadata] field type
- // `metadata.type_url` describes the type of the metadata. Operations returned
- // include those that have completed/failed/canceled within the last 7 days,
- // and pending operations. Operations returned are ordered by
- // `operation.metadata.value.start_time` in descending order starting from the
- // most recently started operation.
- //
- // Authorization requires `spanner.instancePartitionOperations.list`
- // permission on the resource
- // [parent][google.spanner.admin.instance.v1.ListInstancePartitionOperationsRequest.parent].
- ListInstancePartitionOperations(context.Context, *ListInstancePartitionOperationsRequest) (*ListInstancePartitionOperationsResponse, error)
- // Moves an instance to the target instance configuration. You can use the
- // returned [long-running operation][google.longrunning.Operation] to track
- // the progress of moving the instance.
- //
- // `MoveInstance` returns `FAILED_PRECONDITION` if the instance meets any of
- // the following criteria:
- //
- // - Is undergoing a move to a different instance configuration
- // - Has backups
- // - Has an ongoing update
- // - Contains any CMEK-enabled databases
- // - Is a free trial instance
- //
- // While the operation is pending:
- //
- // - All other attempts to modify the instance, including changes to its
- // compute capacity, are rejected.
- //
- // - The following database and backup admin operations are rejected:
- //
- // - `DatabaseAdmin.CreateDatabase`
- //
- // - `DatabaseAdmin.UpdateDatabaseDdl` (disabled if default_leader is
- // specified in the request.)
- //
- // - `DatabaseAdmin.RestoreDatabase`
- //
- // - `DatabaseAdmin.CreateBackup`
- //
- // - `DatabaseAdmin.CopyBackup`
- //
- // - Both the source and target instance configurations are subject to
- // hourly compute and storage charges.
- //
- // - The instance might experience higher read-write latencies and a higher
- // transaction abort rate. However, moving an instance doesn't cause any
- // downtime.
- //
- // The returned [long-running operation][google.longrunning.Operation] has
- // a name of the format
- // `<instance_name>/operations/<operation_id>` and can be used to track
- // the move instance operation. The
- // [metadata][google.longrunning.Operation.metadata] field type is
- // [MoveInstanceMetadata][google.spanner.admin.instance.v1.MoveInstanceMetadata].
- // The [response][google.longrunning.Operation.response] field type is
- // [Instance][google.spanner.admin.instance.v1.Instance],
- // if successful.
- // Cancelling the operation sets its metadata's
- // [cancel_time][google.spanner.admin.instance.v1.MoveInstanceMetadata.cancel_time].
- // Cancellation is not immediate because it involves moving any data
- // previously moved to the target instance configuration back to the original
- // instance configuration. You can use this operation to track the progress of
- // the cancellation. Upon successful completion of the cancellation, the
- // operation terminates with `CANCELLED` status.
- //
- // If not cancelled, upon completion of the returned operation:
- //
- // - The instance successfully moves to the target instance
- // configuration.
- // - You are billed for compute and storage in target instance
- // configuration.
- //
- // Authorization requires the `spanner.instances.update` permission on
- // the resource [instance][google.spanner.admin.instance.v1.Instance].
- //
- // For more details, see
- // [Move an instance](https://cloud.google.com/spanner/docs/move-instance).
- MoveInstance(context.Context, *MoveInstanceRequest) (*longrunningpb.Operation, error)
-}
-
-// UnimplementedInstanceAdminServer can be embedded to have forward compatible implementations.
-type UnimplementedInstanceAdminServer struct {
-}
-
-func (*UnimplementedInstanceAdminServer) ListInstanceConfigs(context.Context, *ListInstanceConfigsRequest) (*ListInstanceConfigsResponse, error) {
- return nil, status.Errorf(codes.Unimplemented, "method ListInstanceConfigs not implemented")
-}
-func (*UnimplementedInstanceAdminServer) GetInstanceConfig(context.Context, *GetInstanceConfigRequest) (*InstanceConfig, error) {
- return nil, status.Errorf(codes.Unimplemented, "method GetInstanceConfig not implemented")
-}
-func (*UnimplementedInstanceAdminServer) CreateInstanceConfig(context.Context, *CreateInstanceConfigRequest) (*longrunningpb.Operation, error) {
- return nil, status.Errorf(codes.Unimplemented, "method CreateInstanceConfig not implemented")
-}
-func (*UnimplementedInstanceAdminServer) UpdateInstanceConfig(context.Context, *UpdateInstanceConfigRequest) (*longrunningpb.Operation, error) {
- return nil, status.Errorf(codes.Unimplemented, "method UpdateInstanceConfig not implemented")
-}
-func (*UnimplementedInstanceAdminServer) DeleteInstanceConfig(context.Context, *DeleteInstanceConfigRequest) (*emptypb.Empty, error) {
- return nil, status.Errorf(codes.Unimplemented, "method DeleteInstanceConfig not implemented")
-}
-func (*UnimplementedInstanceAdminServer) ListInstanceConfigOperations(context.Context, *ListInstanceConfigOperationsRequest) (*ListInstanceConfigOperationsResponse, error) {
- return nil, status.Errorf(codes.Unimplemented, "method ListInstanceConfigOperations not implemented")
-}
-func (*UnimplementedInstanceAdminServer) ListInstances(context.Context, *ListInstancesRequest) (*ListInstancesResponse, error) {
- return nil, status.Errorf(codes.Unimplemented, "method ListInstances not implemented")
-}
-func (*UnimplementedInstanceAdminServer) ListInstancePartitions(context.Context, *ListInstancePartitionsRequest) (*ListInstancePartitionsResponse, error) {
- return nil, status.Errorf(codes.Unimplemented, "method ListInstancePartitions not implemented")
-}
-func (*UnimplementedInstanceAdminServer) GetInstance(context.Context, *GetInstanceRequest) (*Instance, error) {
- return nil, status.Errorf(codes.Unimplemented, "method GetInstance not implemented")
-}
-func (*UnimplementedInstanceAdminServer) CreateInstance(context.Context, *CreateInstanceRequest) (*longrunningpb.Operation, error) {
- return nil, status.Errorf(codes.Unimplemented, "method CreateInstance not implemented")
-}
-func (*UnimplementedInstanceAdminServer) UpdateInstance(context.Context, *UpdateInstanceRequest) (*longrunningpb.Operation, error) {
- return nil, status.Errorf(codes.Unimplemented, "method UpdateInstance not implemented")
-}
-func (*UnimplementedInstanceAdminServer) DeleteInstance(context.Context, *DeleteInstanceRequest) (*emptypb.Empty, error) {
- return nil, status.Errorf(codes.Unimplemented, "method DeleteInstance not implemented")
-}
-func (*UnimplementedInstanceAdminServer) SetIamPolicy(context.Context, *iampb.SetIamPolicyRequest) (*iampb.Policy, error) {
- return nil, status.Errorf(codes.Unimplemented, "method SetIamPolicy not implemented")
-}
-func (*UnimplementedInstanceAdminServer) GetIamPolicy(context.Context, *iampb.GetIamPolicyRequest) (*iampb.Policy, error) {
- return nil, status.Errorf(codes.Unimplemented, "method GetIamPolicy not implemented")
-}
-func (*UnimplementedInstanceAdminServer) TestIamPermissions(context.Context, *iampb.TestIamPermissionsRequest) (*iampb.TestIamPermissionsResponse, error) {
- return nil, status.Errorf(codes.Unimplemented, "method TestIamPermissions not implemented")
-}
-func (*UnimplementedInstanceAdminServer) GetInstancePartition(context.Context, *GetInstancePartitionRequest) (*InstancePartition, error) {
- return nil, status.Errorf(codes.Unimplemented, "method GetInstancePartition not implemented")
-}
-func (*UnimplementedInstanceAdminServer) CreateInstancePartition(context.Context, *CreateInstancePartitionRequest) (*longrunningpb.Operation, error) {
- return nil, status.Errorf(codes.Unimplemented, "method CreateInstancePartition not implemented")
-}
-func (*UnimplementedInstanceAdminServer) DeleteInstancePartition(context.Context, *DeleteInstancePartitionRequest) (*emptypb.Empty, error) {
- return nil, status.Errorf(codes.Unimplemented, "method DeleteInstancePartition not implemented")
-}
-func (*UnimplementedInstanceAdminServer) UpdateInstancePartition(context.Context, *UpdateInstancePartitionRequest) (*longrunningpb.Operation, error) {
- return nil, status.Errorf(codes.Unimplemented, "method UpdateInstancePartition not implemented")
-}
-func (*UnimplementedInstanceAdminServer) ListInstancePartitionOperations(context.Context, *ListInstancePartitionOperationsRequest) (*ListInstancePartitionOperationsResponse, error) {
- return nil, status.Errorf(codes.Unimplemented, "method ListInstancePartitionOperations not implemented")
-}
-func (*UnimplementedInstanceAdminServer) MoveInstance(context.Context, *MoveInstanceRequest) (*longrunningpb.Operation, error) {
- return nil, status.Errorf(codes.Unimplemented, "method MoveInstance not implemented")
-}
-
-func RegisterInstanceAdminServer(s *grpc.Server, srv InstanceAdminServer) {
- s.RegisterService(&_InstanceAdmin_serviceDesc, srv)
-}
-
-func _InstanceAdmin_ListInstanceConfigs_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
- in := new(ListInstanceConfigsRequest)
- if err := dec(in); err != nil {
- return nil, err
- }
- if interceptor == nil {
- return srv.(InstanceAdminServer).ListInstanceConfigs(ctx, in)
- }
- info := &grpc.UnaryServerInfo{
- Server: srv,
- FullMethod: "/google.spanner.admin.instance.v1.InstanceAdmin/ListInstanceConfigs",
- }
- handler := func(ctx context.Context, req interface{}) (interface{}, error) {
- return srv.(InstanceAdminServer).ListInstanceConfigs(ctx, req.(*ListInstanceConfigsRequest))
- }
- return interceptor(ctx, in, info, handler)
-}
-
-func _InstanceAdmin_GetInstanceConfig_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
- in := new(GetInstanceConfigRequest)
- if err := dec(in); err != nil {
- return nil, err
- }
- if interceptor == nil {
- return srv.(InstanceAdminServer).GetInstanceConfig(ctx, in)
- }
- info := &grpc.UnaryServerInfo{
- Server: srv,
- FullMethod: "/google.spanner.admin.instance.v1.InstanceAdmin/GetInstanceConfig",
- }
- handler := func(ctx context.Context, req interface{}) (interface{}, error) {
- return srv.(InstanceAdminServer).GetInstanceConfig(ctx, req.(*GetInstanceConfigRequest))
- }
- return interceptor(ctx, in, info, handler)
-}
-
-func _InstanceAdmin_CreateInstanceConfig_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
- in := new(CreateInstanceConfigRequest)
- if err := dec(in); err != nil {
- return nil, err
- }
- if interceptor == nil {
- return srv.(InstanceAdminServer).CreateInstanceConfig(ctx, in)
- }
- info := &grpc.UnaryServerInfo{
- Server: srv,
- FullMethod: "/google.spanner.admin.instance.v1.InstanceAdmin/CreateInstanceConfig",
- }
- handler := func(ctx context.Context, req interface{}) (interface{}, error) {
- return srv.(InstanceAdminServer).CreateInstanceConfig(ctx, req.(*CreateInstanceConfigRequest))
- }
- return interceptor(ctx, in, info, handler)
-}
-
-func _InstanceAdmin_UpdateInstanceConfig_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
- in := new(UpdateInstanceConfigRequest)
- if err := dec(in); err != nil {
- return nil, err
- }
- if interceptor == nil {
- return srv.(InstanceAdminServer).UpdateInstanceConfig(ctx, in)
- }
- info := &grpc.UnaryServerInfo{
- Server: srv,
- FullMethod: "/google.spanner.admin.instance.v1.InstanceAdmin/UpdateInstanceConfig",
- }
- handler := func(ctx context.Context, req interface{}) (interface{}, error) {
- return srv.(InstanceAdminServer).UpdateInstanceConfig(ctx, req.(*UpdateInstanceConfigRequest))
- }
- return interceptor(ctx, in, info, handler)
-}
-
-func _InstanceAdmin_DeleteInstanceConfig_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
- in := new(DeleteInstanceConfigRequest)
- if err := dec(in); err != nil {
- return nil, err
- }
- if interceptor == nil {
- return srv.(InstanceAdminServer).DeleteInstanceConfig(ctx, in)
- }
- info := &grpc.UnaryServerInfo{
- Server: srv,
- FullMethod: "/google.spanner.admin.instance.v1.InstanceAdmin/DeleteInstanceConfig",
- }
- handler := func(ctx context.Context, req interface{}) (interface{}, error) {
- return srv.(InstanceAdminServer).DeleteInstanceConfig(ctx, req.(*DeleteInstanceConfigRequest))
- }
- return interceptor(ctx, in, info, handler)
-}
-
-func _InstanceAdmin_ListInstanceConfigOperations_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
- in := new(ListInstanceConfigOperationsRequest)
- if err := dec(in); err != nil {
- return nil, err
- }
- if interceptor == nil {
- return srv.(InstanceAdminServer).ListInstanceConfigOperations(ctx, in)
- }
- info := &grpc.UnaryServerInfo{
- Server: srv,
- FullMethod: "/google.spanner.admin.instance.v1.InstanceAdmin/ListInstanceConfigOperations",
- }
- handler := func(ctx context.Context, req interface{}) (interface{}, error) {
- return srv.(InstanceAdminServer).ListInstanceConfigOperations(ctx, req.(*ListInstanceConfigOperationsRequest))
- }
- return interceptor(ctx, in, info, handler)
-}
-
-func _InstanceAdmin_ListInstances_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
- in := new(ListInstancesRequest)
- if err := dec(in); err != nil {
- return nil, err
- }
- if interceptor == nil {
- return srv.(InstanceAdminServer).ListInstances(ctx, in)
- }
- info := &grpc.UnaryServerInfo{
- Server: srv,
- FullMethod: "/google.spanner.admin.instance.v1.InstanceAdmin/ListInstances",
- }
- handler := func(ctx context.Context, req interface{}) (interface{}, error) {
- return srv.(InstanceAdminServer).ListInstances(ctx, req.(*ListInstancesRequest))
- }
- return interceptor(ctx, in, info, handler)
-}
-
-func _InstanceAdmin_ListInstancePartitions_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
- in := new(ListInstancePartitionsRequest)
- if err := dec(in); err != nil {
- return nil, err
- }
- if interceptor == nil {
- return srv.(InstanceAdminServer).ListInstancePartitions(ctx, in)
- }
- info := &grpc.UnaryServerInfo{
- Server: srv,
- FullMethod: "/google.spanner.admin.instance.v1.InstanceAdmin/ListInstancePartitions",
- }
- handler := func(ctx context.Context, req interface{}) (interface{}, error) {
- return srv.(InstanceAdminServer).ListInstancePartitions(ctx, req.(*ListInstancePartitionsRequest))
- }
- return interceptor(ctx, in, info, handler)
-}
-
-func _InstanceAdmin_GetInstance_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
- in := new(GetInstanceRequest)
- if err := dec(in); err != nil {
- return nil, err
- }
- if interceptor == nil {
- return srv.(InstanceAdminServer).GetInstance(ctx, in)
- }
- info := &grpc.UnaryServerInfo{
- Server: srv,
- FullMethod: "/google.spanner.admin.instance.v1.InstanceAdmin/GetInstance",
- }
- handler := func(ctx context.Context, req interface{}) (interface{}, error) {
- return srv.(InstanceAdminServer).GetInstance(ctx, req.(*GetInstanceRequest))
- }
- return interceptor(ctx, in, info, handler)
-}
-
-func _InstanceAdmin_CreateInstance_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
- in := new(CreateInstanceRequest)
- if err := dec(in); err != nil {
- return nil, err
- }
- if interceptor == nil {
- return srv.(InstanceAdminServer).CreateInstance(ctx, in)
- }
- info := &grpc.UnaryServerInfo{
- Server: srv,
- FullMethod: "/google.spanner.admin.instance.v1.InstanceAdmin/CreateInstance",
- }
- handler := func(ctx context.Context, req interface{}) (interface{}, error) {
- return srv.(InstanceAdminServer).CreateInstance(ctx, req.(*CreateInstanceRequest))
- }
- return interceptor(ctx, in, info, handler)
-}
-
-func _InstanceAdmin_UpdateInstance_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
- in := new(UpdateInstanceRequest)
- if err := dec(in); err != nil {
- return nil, err
- }
- if interceptor == nil {
- return srv.(InstanceAdminServer).UpdateInstance(ctx, in)
- }
- info := &grpc.UnaryServerInfo{
- Server: srv,
- FullMethod: "/google.spanner.admin.instance.v1.InstanceAdmin/UpdateInstance",
- }
- handler := func(ctx context.Context, req interface{}) (interface{}, error) {
- return srv.(InstanceAdminServer).UpdateInstance(ctx, req.(*UpdateInstanceRequest))
- }
- return interceptor(ctx, in, info, handler)
-}
-
-func _InstanceAdmin_DeleteInstance_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
- in := new(DeleteInstanceRequest)
- if err := dec(in); err != nil {
- return nil, err
- }
- if interceptor == nil {
- return srv.(InstanceAdminServer).DeleteInstance(ctx, in)
- }
- info := &grpc.UnaryServerInfo{
- Server: srv,
- FullMethod: "/google.spanner.admin.instance.v1.InstanceAdmin/DeleteInstance",
- }
- handler := func(ctx context.Context, req interface{}) (interface{}, error) {
- return srv.(InstanceAdminServer).DeleteInstance(ctx, req.(*DeleteInstanceRequest))
- }
- return interceptor(ctx, in, info, handler)
-}
-
-func _InstanceAdmin_SetIamPolicy_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
- in := new(iampb.SetIamPolicyRequest)
- if err := dec(in); err != nil {
- return nil, err
- }
- if interceptor == nil {
- return srv.(InstanceAdminServer).SetIamPolicy(ctx, in)
- }
- info := &grpc.UnaryServerInfo{
- Server: srv,
- FullMethod: "/google.spanner.admin.instance.v1.InstanceAdmin/SetIamPolicy",
- }
- handler := func(ctx context.Context, req interface{}) (interface{}, error) {
- return srv.(InstanceAdminServer).SetIamPolicy(ctx, req.(*iampb.SetIamPolicyRequest))
- }
- return interceptor(ctx, in, info, handler)
-}
-
-func _InstanceAdmin_GetIamPolicy_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
- in := new(iampb.GetIamPolicyRequest)
- if err := dec(in); err != nil {
- return nil, err
- }
- if interceptor == nil {
- return srv.(InstanceAdminServer).GetIamPolicy(ctx, in)
- }
- info := &grpc.UnaryServerInfo{
- Server: srv,
- FullMethod: "/google.spanner.admin.instance.v1.InstanceAdmin/GetIamPolicy",
- }
- handler := func(ctx context.Context, req interface{}) (interface{}, error) {
- return srv.(InstanceAdminServer).GetIamPolicy(ctx, req.(*iampb.GetIamPolicyRequest))
- }
- return interceptor(ctx, in, info, handler)
-}
-
-func _InstanceAdmin_TestIamPermissions_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
- in := new(iampb.TestIamPermissionsRequest)
- if err := dec(in); err != nil {
- return nil, err
- }
- if interceptor == nil {
- return srv.(InstanceAdminServer).TestIamPermissions(ctx, in)
- }
- info := &grpc.UnaryServerInfo{
- Server: srv,
- FullMethod: "/google.spanner.admin.instance.v1.InstanceAdmin/TestIamPermissions",
- }
- handler := func(ctx context.Context, req interface{}) (interface{}, error) {
- return srv.(InstanceAdminServer).TestIamPermissions(ctx, req.(*iampb.TestIamPermissionsRequest))
- }
- return interceptor(ctx, in, info, handler)
-}
-
-func _InstanceAdmin_GetInstancePartition_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
- in := new(GetInstancePartitionRequest)
- if err := dec(in); err != nil {
- return nil, err
- }
- if interceptor == nil {
- return srv.(InstanceAdminServer).GetInstancePartition(ctx, in)
- }
- info := &grpc.UnaryServerInfo{
- Server: srv,
- FullMethod: "/google.spanner.admin.instance.v1.InstanceAdmin/GetInstancePartition",
- }
- handler := func(ctx context.Context, req interface{}) (interface{}, error) {
- return srv.(InstanceAdminServer).GetInstancePartition(ctx, req.(*GetInstancePartitionRequest))
- }
- return interceptor(ctx, in, info, handler)
-}
-
-func _InstanceAdmin_CreateInstancePartition_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
- in := new(CreateInstancePartitionRequest)
- if err := dec(in); err != nil {
- return nil, err
- }
- if interceptor == nil {
- return srv.(InstanceAdminServer).CreateInstancePartition(ctx, in)
- }
- info := &grpc.UnaryServerInfo{
- Server: srv,
- FullMethod: "/google.spanner.admin.instance.v1.InstanceAdmin/CreateInstancePartition",
- }
- handler := func(ctx context.Context, req interface{}) (interface{}, error) {
- return srv.(InstanceAdminServer).CreateInstancePartition(ctx, req.(*CreateInstancePartitionRequest))
- }
- return interceptor(ctx, in, info, handler)
-}
-
-func _InstanceAdmin_DeleteInstancePartition_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
- in := new(DeleteInstancePartitionRequest)
- if err := dec(in); err != nil {
- return nil, err
- }
- if interceptor == nil {
- return srv.(InstanceAdminServer).DeleteInstancePartition(ctx, in)
- }
- info := &grpc.UnaryServerInfo{
- Server: srv,
- FullMethod: "/google.spanner.admin.instance.v1.InstanceAdmin/DeleteInstancePartition",
- }
- handler := func(ctx context.Context, req interface{}) (interface{}, error) {
- return srv.(InstanceAdminServer).DeleteInstancePartition(ctx, req.(*DeleteInstancePartitionRequest))
- }
- return interceptor(ctx, in, info, handler)
-}
-
-func _InstanceAdmin_UpdateInstancePartition_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
- in := new(UpdateInstancePartitionRequest)
- if err := dec(in); err != nil {
- return nil, err
- }
- if interceptor == nil {
- return srv.(InstanceAdminServer).UpdateInstancePartition(ctx, in)
- }
- info := &grpc.UnaryServerInfo{
- Server: srv,
- FullMethod: "/google.spanner.admin.instance.v1.InstanceAdmin/UpdateInstancePartition",
- }
- handler := func(ctx context.Context, req interface{}) (interface{}, error) {
- return srv.(InstanceAdminServer).UpdateInstancePartition(ctx, req.(*UpdateInstancePartitionRequest))
- }
- return interceptor(ctx, in, info, handler)
-}
-
-func _InstanceAdmin_ListInstancePartitionOperations_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
- in := new(ListInstancePartitionOperationsRequest)
- if err := dec(in); err != nil {
- return nil, err
- }
- if interceptor == nil {
- return srv.(InstanceAdminServer).ListInstancePartitionOperations(ctx, in)
- }
- info := &grpc.UnaryServerInfo{
- Server: srv,
- FullMethod: "/google.spanner.admin.instance.v1.InstanceAdmin/ListInstancePartitionOperations",
- }
- handler := func(ctx context.Context, req interface{}) (interface{}, error) {
- return srv.(InstanceAdminServer).ListInstancePartitionOperations(ctx, req.(*ListInstancePartitionOperationsRequest))
- }
- return interceptor(ctx, in, info, handler)
-}
-
-func _InstanceAdmin_MoveInstance_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
- in := new(MoveInstanceRequest)
- if err := dec(in); err != nil {
- return nil, err
- }
- if interceptor == nil {
- return srv.(InstanceAdminServer).MoveInstance(ctx, in)
- }
- info := &grpc.UnaryServerInfo{
- Server: srv,
- FullMethod: "/google.spanner.admin.instance.v1.InstanceAdmin/MoveInstance",
- }
- handler := func(ctx context.Context, req interface{}) (interface{}, error) {
- return srv.(InstanceAdminServer).MoveInstance(ctx, req.(*MoveInstanceRequest))
- }
- return interceptor(ctx, in, info, handler)
-}
-
-var _InstanceAdmin_serviceDesc = grpc.ServiceDesc{
- ServiceName: "google.spanner.admin.instance.v1.InstanceAdmin",
- HandlerType: (*InstanceAdminServer)(nil),
- Methods: []grpc.MethodDesc{
- {
- MethodName: "ListInstanceConfigs",
- Handler: _InstanceAdmin_ListInstanceConfigs_Handler,
- },
- {
- MethodName: "GetInstanceConfig",
- Handler: _InstanceAdmin_GetInstanceConfig_Handler,
- },
- {
- MethodName: "CreateInstanceConfig",
- Handler: _InstanceAdmin_CreateInstanceConfig_Handler,
- },
- {
- MethodName: "UpdateInstanceConfig",
- Handler: _InstanceAdmin_UpdateInstanceConfig_Handler,
- },
- {
- MethodName: "DeleteInstanceConfig",
- Handler: _InstanceAdmin_DeleteInstanceConfig_Handler,
- },
- {
- MethodName: "ListInstanceConfigOperations",
- Handler: _InstanceAdmin_ListInstanceConfigOperations_Handler,
- },
- {
- MethodName: "ListInstances",
- Handler: _InstanceAdmin_ListInstances_Handler,
- },
- {
- MethodName: "ListInstancePartitions",
- Handler: _InstanceAdmin_ListInstancePartitions_Handler,
- },
- {
- MethodName: "GetInstance",
- Handler: _InstanceAdmin_GetInstance_Handler,
- },
- {
- MethodName: "CreateInstance",
- Handler: _InstanceAdmin_CreateInstance_Handler,
- },
- {
- MethodName: "UpdateInstance",
- Handler: _InstanceAdmin_UpdateInstance_Handler,
- },
- {
- MethodName: "DeleteInstance",
- Handler: _InstanceAdmin_DeleteInstance_Handler,
- },
- {
- MethodName: "SetIamPolicy",
- Handler: _InstanceAdmin_SetIamPolicy_Handler,
- },
- {
- MethodName: "GetIamPolicy",
- Handler: _InstanceAdmin_GetIamPolicy_Handler,
- },
- {
- MethodName: "TestIamPermissions",
- Handler: _InstanceAdmin_TestIamPermissions_Handler,
- },
- {
- MethodName: "GetInstancePartition",
- Handler: _InstanceAdmin_GetInstancePartition_Handler,
- },
- {
- MethodName: "CreateInstancePartition",
- Handler: _InstanceAdmin_CreateInstancePartition_Handler,
- },
- {
- MethodName: "DeleteInstancePartition",
- Handler: _InstanceAdmin_DeleteInstancePartition_Handler,
- },
- {
- MethodName: "UpdateInstancePartition",
- Handler: _InstanceAdmin_UpdateInstancePartition_Handler,
- },
- {
- MethodName: "ListInstancePartitionOperations",
- Handler: _InstanceAdmin_ListInstancePartitionOperations_Handler,
- },
- {
- MethodName: "MoveInstance",
- Handler: _InstanceAdmin_MoveInstance_Handler,
- },
- },
- Streams: []grpc.StreamDesc{},
- Metadata: "google/spanner/admin/instance/v1/spanner_instance_admin.proto",
-}
diff --git a/vendor/cloud.google.com/go/spanner/admin/instance/apiv1/path_funcs.go b/vendor/cloud.google.com/go/spanner/admin/instance/apiv1/path_funcs.go
deleted file mode 100644
index 92c5c053c..000000000
--- a/vendor/cloud.google.com/go/spanner/admin/instance/apiv1/path_funcs.go
+++ /dev/null
@@ -1,61 +0,0 @@
-// Copyright 2018 Google LLC
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// https://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package instance
-
-// InstanceAdminProjectPath returns the path for the project resource.
-//
-// Deprecated: Use
-//
-// fmt.Sprintf("projects/%s", project)
-//
-// instead.
-func InstanceAdminProjectPath(project string) string {
- return "" +
- "projects/" +
- project +
- ""
-}
-
-// InstanceAdminInstanceConfigPath returns the path for the instance config resource.
-//
-// Deprecated: Use
-//
-// fmt.Sprintf("projects/%s/instanceConfigs/%s", project, instanceConfig)
-//
-// instead.
-func InstanceAdminInstanceConfigPath(project, instanceConfig string) string {
- return "" +
- "projects/" +
- project +
- "/instanceConfigs/" +
- instanceConfig +
- ""
-}
-
-// InstanceAdminInstancePath returns the path for the instance resource.
-//
-// Deprecated: Use
-//
-// fmt.Sprintf("projects/%s/instances/%s", project, instance)
-//
-// instead.
-func InstanceAdminInstancePath(project, instance string) string {
- return "" +
- "projects/" +
- project +
- "/instances/" +
- instance +
- ""
-}
diff --git a/vendor/cloud.google.com/go/spanner/admin/instance/apiv1/version.go b/vendor/cloud.google.com/go/spanner/admin/instance/apiv1/version.go
deleted file mode 100644
index 0eaf4377d..000000000
--- a/vendor/cloud.google.com/go/spanner/admin/instance/apiv1/version.go
+++ /dev/null
@@ -1,23 +0,0 @@
-// Copyright 2023 Google LLC
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-// Code generated by gapicgen. DO NOT EDIT.
-
-package instance
-
-import "cloud.google.com/go/spanner/internal"
-
-func init() {
- versionClient = internal.Version
-}
diff --git a/vendor/cloud.google.com/go/spanner/apiv1/auxiliary.go b/vendor/cloud.google.com/go/spanner/apiv1/auxiliary.go
deleted file mode 100644
index 622ef2730..000000000
--- a/vendor/cloud.google.com/go/spanner/apiv1/auxiliary.go
+++ /dev/null
@@ -1,69 +0,0 @@
-// Copyright 2024 Google LLC
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// https://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-// Code generated by protoc-gen-go_gapic. DO NOT EDIT.
-
-package spanner
-
-import (
- spannerpb "cloud.google.com/go/spanner/apiv1/spannerpb"
- "google.golang.org/api/iterator"
-)
-
-// SessionIterator manages a stream of *spannerpb.Session.
-type SessionIterator struct {
- items []*spannerpb.Session
- pageInfo *iterator.PageInfo
- nextFunc func() error
-
- // Response is the raw response for the current page.
- // It must be cast to the RPC response type.
- // Calling Next() or InternalFetch() updates this value.
- Response interface{}
-
- // InternalFetch is for use by the Google Cloud Libraries only.
- // It is not part of the stable interface of this package.
- //
- // InternalFetch returns results from a single call to the underlying RPC.
- // The number of results is no greater than pageSize.
- // If there are no more results, nextPageToken is empty and err is nil.
- InternalFetch func(pageSize int, pageToken string) (results []*spannerpb.Session, nextPageToken string, err error)
-}
-
-// PageInfo supports pagination. See the google.golang.org/api/iterator package for details.
-func (it *SessionIterator) PageInfo() *iterator.PageInfo {
- return it.pageInfo
-}
-
-// Next returns the next result. Its second return value is iterator.Done if there are no more
-// results. Once Next returns Done, all subsequent calls will return Done.
-func (it *SessionIterator) Next() (*spannerpb.Session, error) {
- var item *spannerpb.Session
- if err := it.nextFunc(); err != nil {
- return item, err
- }
- item = it.items[0]
- it.items = it.items[1:]
- return item, nil
-}
-
-func (it *SessionIterator) bufLen() int {
- return len(it.items)
-}
-
-func (it *SessionIterator) takeBuf() interface{} {
- b := it.items
- it.items = nil
- return b
-}
diff --git a/vendor/cloud.google.com/go/spanner/apiv1/doc.go b/vendor/cloud.google.com/go/spanner/apiv1/doc.go
deleted file mode 100644
index 310fcebb6..000000000
--- a/vendor/cloud.google.com/go/spanner/apiv1/doc.go
+++ /dev/null
@@ -1,123 +0,0 @@
-// Copyright 2024 Google LLC
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// https://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-// Code generated by protoc-gen-go_gapic. DO NOT EDIT.
-
-// Package spanner is an auto-generated package for the
-// Cloud Spanner API.
-//
-// Cloud Spanner is a managed, mission-critical, globally consistent and
-// scalable relational database service.
-//
-// # General documentation
-//
-// For information that is relevant for all client libraries please reference
-// https://pkg.go.dev/cloud.google.com/go#pkg-overview. Some information on this
-// page includes:
-//
-// - [Authentication and Authorization]
-// - [Timeouts and Cancellation]
-// - [Testing against Client Libraries]
-// - [Debugging Client Libraries]
-// - [Inspecting errors]
-//
-// # Example usage
-//
-// To get started with this package, create a client.
-//
-// ctx := context.Background()
-// // This snippet has been automatically generated and should be regarded as a code template only.
-// // It will require modifications to work:
-// // - It may require correct/in-range values for request initialization.
-// // - It may require specifying regional endpoints when creating the service client as shown in:
-// // https://pkg.go.dev/cloud.google.com/go#hdr-Client_Options
-// c, err := spanner.NewClient(ctx)
-// if err != nil {
-// // TODO: Handle error.
-// }
-// defer c.Close()
-//
-// The client will use your default application credentials. Clients should be reused instead of created as needed.
-// The methods of Client are safe for concurrent use by multiple goroutines.
-// The returned client must be Closed when it is done being used.
-//
-// # Using the Client
-//
-// The following is an example of making an API call with the newly created client.
-//
-// ctx := context.Background()
-// // This snippet has been automatically generated and should be regarded as a code template only.
-// // It will require modifications to work:
-// // - It may require correct/in-range values for request initialization.
-// // - It may require specifying regional endpoints when creating the service client as shown in:
-// // https://pkg.go.dev/cloud.google.com/go#hdr-Client_Options
-// c, err := spanner.NewClient(ctx)
-// if err != nil {
-// // TODO: Handle error.
-// }
-// defer c.Close()
-//
-// req := &spannerpb.BatchCreateSessionsRequest{
-// // TODO: Fill request struct fields.
-// // See https://pkg.go.dev/cloud.google.com/go/spanner/apiv1/spannerpb#BatchCreateSessionsRequest.
-// }
-// resp, err := c.BatchCreateSessions(ctx, req)
-// if err != nil {
-// // TODO: Handle error.
-// }
-// // TODO: Use resp.
-// _ = resp
-//
-// # Use of Context
-//
-// The ctx passed to NewClient is used for authentication requests and
-// for creating the underlying connection, but is not used for subsequent calls.
-// Individual methods on the client use the ctx given to them.
-//
-// To close the open connection, use the Close() method.
-//
-// [Authentication and Authorization]: https://pkg.go.dev/cloud.google.com/go#hdr-Authentication_and_Authorization
-// [Timeouts and Cancellation]: https://pkg.go.dev/cloud.google.com/go#hdr-Timeouts_and_Cancellation
-// [Testing against Client Libraries]: https://pkg.go.dev/cloud.google.com/go#hdr-Testing
-// [Debugging Client Libraries]: https://pkg.go.dev/cloud.google.com/go#hdr-Debugging
-// [Inspecting errors]: https://pkg.go.dev/cloud.google.com/go#hdr-Inspecting_errors
-package spanner // import "cloud.google.com/go/spanner/apiv1"
-
-import (
- "context"
-
- "google.golang.org/api/option"
-)
-
-// For more information on implementing a client constructor hook, see
-// https://github.com/googleapis/google-cloud-go/wiki/Customizing-constructors.
-type clientHookParams struct{}
-type clientHook func(context.Context, clientHookParams) ([]option.ClientOption, error)
-
-var versionClient string
-
-func getVersionClient() string {
- if versionClient == "" {
- return "UNKNOWN"
- }
- return versionClient
-}
-
-// DefaultAuthScopes reports the default set of authentication scopes to use with this package.
-func DefaultAuthScopes() []string {
- return []string{
- "https://www.googleapis.com/auth/cloud-platform",
- "https://www.googleapis.com/auth/spanner.data",
- }
-}
diff --git a/vendor/cloud.google.com/go/spanner/apiv1/info.go b/vendor/cloud.google.com/go/spanner/apiv1/info.go
deleted file mode 100644
index 4b7b79669..000000000
--- a/vendor/cloud.google.com/go/spanner/apiv1/info.go
+++ /dev/null
@@ -1,24 +0,0 @@
-// Copyright 2020 Google LLC
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package spanner
-
-// SetGoogleClientInfo sets the name and version of the application in
-// the `x-goog-api-client` header passed on each request. Also passes any
-// provided key-value pairs. Intended for use by Google-written clients.
-//
-// Internal use only.
-func (c *Client) SetGoogleClientInfo(keyval ...string) {
- c.setGoogleClientInfo(keyval...)
-}
diff --git a/vendor/cloud.google.com/go/spanner/apiv1/path_funcs.go b/vendor/cloud.google.com/go/spanner/apiv1/path_funcs.go
deleted file mode 100644
index db29b0ba5..000000000
--- a/vendor/cloud.google.com/go/spanner/apiv1/path_funcs.go
+++ /dev/null
@@ -1,53 +0,0 @@
-// Copyright 2018 Google LLC
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// https://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package spanner
-
-// DatabasePath returns the path for the database resource.
-//
-// Deprecated: Use
-//
-// fmt.Sprintf("projects/%s/instances/%s/databases/%s", project, instance, database)
-//
-// instead.
-func DatabasePath(project, instance, database string) string {
- return "" +
- "projects/" +
- project +
- "/instances/" +
- instance +
- "/databases/" +
- database +
- ""
-}
-
-// SessionPath returns the path for the session resource.
-//
-// Deprecated: Use
-//
-// fmt.Sprintf("projects/%s/instances/%s/databases/%s/sessions/%s", project, instance, database, session)
-//
-// instead.
-func SessionPath(project, instance, database, session string) string {
- return "" +
- "projects/" +
- project +
- "/instances/" +
- instance +
- "/databases/" +
- database +
- "/sessions/" +
- session +
- ""
-}
diff --git a/vendor/cloud.google.com/go/spanner/apiv1/spanner_client.go b/vendor/cloud.google.com/go/spanner/apiv1/spanner_client.go
deleted file mode 100644
index ccc6aa769..000000000
--- a/vendor/cloud.google.com/go/spanner/apiv1/spanner_client.go
+++ /dev/null
@@ -1,2445 +0,0 @@
-// Copyright 2024 Google LLC
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// https://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-// Code generated by protoc-gen-go_gapic. DO NOT EDIT.
-
-package spanner
-
-import (
- "bytes"
- "context"
- "errors"
- "fmt"
- "io"
- "math"
- "net/http"
- "net/url"
- "time"
-
- spannerpb "cloud.google.com/go/spanner/apiv1/spannerpb"
- gax "github.com/googleapis/gax-go/v2"
- "google.golang.org/api/googleapi"
- "google.golang.org/api/iterator"
- "google.golang.org/api/option"
- "google.golang.org/api/option/internaloption"
- gtransport "google.golang.org/api/transport/grpc"
- httptransport "google.golang.org/api/transport/http"
- "google.golang.org/grpc"
- "google.golang.org/grpc/codes"
- "google.golang.org/grpc/metadata"
- "google.golang.org/protobuf/encoding/protojson"
- "google.golang.org/protobuf/proto"
-)
-
-var newClientHook clientHook
-
-// CallOptions contains the retry settings for each method of Client.
-type CallOptions struct {
- CreateSession []gax.CallOption
- BatchCreateSessions []gax.CallOption
- GetSession []gax.CallOption
- ListSessions []gax.CallOption
- DeleteSession []gax.CallOption
- ExecuteSql []gax.CallOption
- ExecuteStreamingSql []gax.CallOption
- ExecuteBatchDml []gax.CallOption
- Read []gax.CallOption
- StreamingRead []gax.CallOption
- BeginTransaction []gax.CallOption
- Commit []gax.CallOption
- Rollback []gax.CallOption
- PartitionQuery []gax.CallOption
- PartitionRead []gax.CallOption
- BatchWrite []gax.CallOption
-}
-
-func defaultGRPCClientOptions() []option.ClientOption {
- return []option.ClientOption{
- internaloption.WithDefaultEndpoint("spanner.googleapis.com:443"),
- internaloption.WithDefaultEndpointTemplate("spanner.UNIVERSE_DOMAIN:443"),
- internaloption.WithDefaultMTLSEndpoint("spanner.mtls.googleapis.com:443"),
- internaloption.WithDefaultUniverseDomain("googleapis.com"),
- internaloption.WithDefaultAudience("https://spanner.googleapis.com/"),
- internaloption.WithDefaultScopes(DefaultAuthScopes()...),
- internaloption.EnableJwtWithScope(),
- option.WithGRPCDialOption(grpc.WithDefaultCallOptions(
- grpc.MaxCallRecvMsgSize(math.MaxInt32))),
- }
-}
-
-func defaultCallOptions() *CallOptions {
- return &CallOptions{
- CreateSession: []gax.CallOption{
- gax.WithTimeout(30000 * time.Millisecond),
- gax.WithRetry(func() gax.Retryer {
- return gax.OnCodes([]codes.Code{
- codes.Unavailable,
- codes.ResourceExhausted,
- }, gax.Backoff{
- Initial: 250 * time.Millisecond,
- Max: 32000 * time.Millisecond,
- Multiplier: 1.30,
- })
- }),
- },
- BatchCreateSessions: []gax.CallOption{
- gax.WithTimeout(60000 * time.Millisecond),
- gax.WithRetry(func() gax.Retryer {
- return gax.OnCodes([]codes.Code{
- codes.Unavailable,
- codes.ResourceExhausted,
- }, gax.Backoff{
- Initial: 250 * time.Millisecond,
- Max: 32000 * time.Millisecond,
- Multiplier: 1.30,
- })
- }),
- },
- GetSession: []gax.CallOption{
- gax.WithTimeout(30000 * time.Millisecond),
- gax.WithRetry(func() gax.Retryer {
- return gax.OnCodes([]codes.Code{
- codes.Unavailable,
- codes.ResourceExhausted,
- }, gax.Backoff{
- Initial: 250 * time.Millisecond,
- Max: 32000 * time.Millisecond,
- Multiplier: 1.30,
- })
- }),
- },
- ListSessions: []gax.CallOption{
- gax.WithTimeout(3600000 * time.Millisecond),
- gax.WithRetry(func() gax.Retryer {
- return gax.OnCodes([]codes.Code{
- codes.Unavailable,
- codes.ResourceExhausted,
- }, gax.Backoff{
- Initial: 250 * time.Millisecond,
- Max: 32000 * time.Millisecond,
- Multiplier: 1.30,
- })
- }),
- },
- DeleteSession: []gax.CallOption{
- gax.WithTimeout(30000 * time.Millisecond),
- gax.WithRetry(func() gax.Retryer {
- return gax.OnCodes([]codes.Code{
- codes.Unavailable,
- codes.ResourceExhausted,
- }, gax.Backoff{
- Initial: 250 * time.Millisecond,
- Max: 32000 * time.Millisecond,
- Multiplier: 1.30,
- })
- }),
- },
- ExecuteSql: []gax.CallOption{
- gax.WithTimeout(30000 * time.Millisecond),
- gax.WithRetry(func() gax.Retryer {
- return gax.OnCodes([]codes.Code{
- codes.Unavailable,
- codes.ResourceExhausted,
- }, gax.Backoff{
- Initial: 250 * time.Millisecond,
- Max: 32000 * time.Millisecond,
- Multiplier: 1.30,
- })
- }),
- },
- ExecuteStreamingSql: []gax.CallOption{},
- ExecuteBatchDml: []gax.CallOption{
- gax.WithTimeout(30000 * time.Millisecond),
- gax.WithRetry(func() gax.Retryer {
- return gax.OnCodes([]codes.Code{
- codes.Unavailable,
- codes.ResourceExhausted,
- }, gax.Backoff{
- Initial: 250 * time.Millisecond,
- Max: 32000 * time.Millisecond,
- Multiplier: 1.30,
- })
- }),
- },
- Read: []gax.CallOption{
- gax.WithTimeout(30000 * time.Millisecond),
- gax.WithRetry(func() gax.Retryer {
- return gax.OnCodes([]codes.Code{
- codes.Unavailable,
- codes.ResourceExhausted,
- }, gax.Backoff{
- Initial: 250 * time.Millisecond,
- Max: 32000 * time.Millisecond,
- Multiplier: 1.30,
- })
- }),
- },
- StreamingRead: []gax.CallOption{},
- BeginTransaction: []gax.CallOption{
- gax.WithTimeout(30000 * time.Millisecond),
- gax.WithRetry(func() gax.Retryer {
- return gax.OnCodes([]codes.Code{
- codes.Unavailable,
- codes.ResourceExhausted,
- }, gax.Backoff{
- Initial: 250 * time.Millisecond,
- Max: 32000 * time.Millisecond,
- Multiplier: 1.30,
- })
- }),
- },
- Commit: []gax.CallOption{
- gax.WithTimeout(3600000 * time.Millisecond),
- gax.WithRetry(func() gax.Retryer {
- return gax.OnCodes([]codes.Code{
- codes.Unavailable,
- codes.ResourceExhausted,
- }, gax.Backoff{
- Initial: 250 * time.Millisecond,
- Max: 32000 * time.Millisecond,
- Multiplier: 1.30,
- })
- }),
- },
- Rollback: []gax.CallOption{
- gax.WithTimeout(30000 * time.Millisecond),
- gax.WithRetry(func() gax.Retryer {
- return gax.OnCodes([]codes.Code{
- codes.Unavailable,
- codes.ResourceExhausted,
- }, gax.Backoff{
- Initial: 250 * time.Millisecond,
- Max: 32000 * time.Millisecond,
- Multiplier: 1.30,
- })
- }),
- },
- PartitionQuery: []gax.CallOption{
- gax.WithTimeout(30000 * time.Millisecond),
- gax.WithRetry(func() gax.Retryer {
- return gax.OnCodes([]codes.Code{
- codes.Unavailable,
- codes.ResourceExhausted,
- }, gax.Backoff{
- Initial: 250 * time.Millisecond,
- Max: 32000 * time.Millisecond,
- Multiplier: 1.30,
- })
- }),
- },
- PartitionRead: []gax.CallOption{
- gax.WithTimeout(30000 * time.Millisecond),
- gax.WithRetry(func() gax.Retryer {
- return gax.OnCodes([]codes.Code{
- codes.Unavailable,
- codes.ResourceExhausted,
- }, gax.Backoff{
- Initial: 250 * time.Millisecond,
- Max: 32000 * time.Millisecond,
- Multiplier: 1.30,
- })
- }),
- },
- BatchWrite: []gax.CallOption{},
- }
-}
-
-func defaultRESTCallOptions() *CallOptions {
- return &CallOptions{
- CreateSession: []gax.CallOption{
- gax.WithTimeout(30000 * time.Millisecond),
- gax.WithRetry(func() gax.Retryer {
- return gax.OnHTTPCodes(gax.Backoff{
- Initial: 250 * time.Millisecond,
- Max: 32000 * time.Millisecond,
- Multiplier: 1.30,
- },
- http.StatusServiceUnavailable,
- http.StatusTooManyRequests)
- }),
- },
- BatchCreateSessions: []gax.CallOption{
- gax.WithTimeout(60000 * time.Millisecond),
- gax.WithRetry(func() gax.Retryer {
- return gax.OnHTTPCodes(gax.Backoff{
- Initial: 250 * time.Millisecond,
- Max: 32000 * time.Millisecond,
- Multiplier: 1.30,
- },
- http.StatusServiceUnavailable,
- http.StatusTooManyRequests)
- }),
- },
- GetSession: []gax.CallOption{
- gax.WithTimeout(30000 * time.Millisecond),
- gax.WithRetry(func() gax.Retryer {
- return gax.OnHTTPCodes(gax.Backoff{
- Initial: 250 * time.Millisecond,
- Max: 32000 * time.Millisecond,
- Multiplier: 1.30,
- },
- http.StatusServiceUnavailable,
- http.StatusTooManyRequests)
- }),
- },
- ListSessions: []gax.CallOption{
- gax.WithTimeout(3600000 * time.Millisecond),
- gax.WithRetry(func() gax.Retryer {
- return gax.OnHTTPCodes(gax.Backoff{
- Initial: 250 * time.Millisecond,
- Max: 32000 * time.Millisecond,
- Multiplier: 1.30,
- },
- http.StatusServiceUnavailable,
- http.StatusTooManyRequests)
- }),
- },
- DeleteSession: []gax.CallOption{
- gax.WithTimeout(30000 * time.Millisecond),
- gax.WithRetry(func() gax.Retryer {
- return gax.OnHTTPCodes(gax.Backoff{
- Initial: 250 * time.Millisecond,
- Max: 32000 * time.Millisecond,
- Multiplier: 1.30,
- },
- http.StatusServiceUnavailable,
- http.StatusTooManyRequests)
- }),
- },
- ExecuteSql: []gax.CallOption{
- gax.WithTimeout(30000 * time.Millisecond),
- gax.WithRetry(func() gax.Retryer {
- return gax.OnHTTPCodes(gax.Backoff{
- Initial: 250 * time.Millisecond,
- Max: 32000 * time.Millisecond,
- Multiplier: 1.30,
- },
- http.StatusServiceUnavailable,
- http.StatusTooManyRequests)
- }),
- },
- ExecuteStreamingSql: []gax.CallOption{
- gax.WithTimeout(3600000 * time.Millisecond),
- },
- ExecuteBatchDml: []gax.CallOption{
- gax.WithTimeout(30000 * time.Millisecond),
- gax.WithRetry(func() gax.Retryer {
- return gax.OnHTTPCodes(gax.Backoff{
- Initial: 250 * time.Millisecond,
- Max: 32000 * time.Millisecond,
- Multiplier: 1.30,
- },
- http.StatusServiceUnavailable,
- http.StatusTooManyRequests)
- }),
- },
- Read: []gax.CallOption{
- gax.WithTimeout(30000 * time.Millisecond),
- gax.WithRetry(func() gax.Retryer {
- return gax.OnHTTPCodes(gax.Backoff{
- Initial: 250 * time.Millisecond,
- Max: 32000 * time.Millisecond,
- Multiplier: 1.30,
- },
- http.StatusServiceUnavailable,
- http.StatusTooManyRequests)
- }),
- },
- StreamingRead: []gax.CallOption{
- gax.WithTimeout(3600000 * time.Millisecond),
- },
- BeginTransaction: []gax.CallOption{
- gax.WithTimeout(30000 * time.Millisecond),
- gax.WithRetry(func() gax.Retryer {
- return gax.OnHTTPCodes(gax.Backoff{
- Initial: 250 * time.Millisecond,
- Max: 32000 * time.Millisecond,
- Multiplier: 1.30,
- },
- http.StatusServiceUnavailable,
- http.StatusTooManyRequests)
- }),
- },
- Commit: []gax.CallOption{
- gax.WithTimeout(3600000 * time.Millisecond),
- gax.WithRetry(func() gax.Retryer {
- return gax.OnHTTPCodes(gax.Backoff{
- Initial: 250 * time.Millisecond,
- Max: 32000 * time.Millisecond,
- Multiplier: 1.30,
- },
- http.StatusServiceUnavailable,
- http.StatusTooManyRequests)
- }),
- },
- Rollback: []gax.CallOption{
- gax.WithTimeout(30000 * time.Millisecond),
- gax.WithRetry(func() gax.Retryer {
- return gax.OnHTTPCodes(gax.Backoff{
- Initial: 250 * time.Millisecond,
- Max: 32000 * time.Millisecond,
- Multiplier: 1.30,
- },
- http.StatusServiceUnavailable,
- http.StatusTooManyRequests)
- }),
- },
- PartitionQuery: []gax.CallOption{
- gax.WithTimeout(30000 * time.Millisecond),
- gax.WithRetry(func() gax.Retryer {
- return gax.OnHTTPCodes(gax.Backoff{
- Initial: 250 * time.Millisecond,
- Max: 32000 * time.Millisecond,
- Multiplier: 1.30,
- },
- http.StatusServiceUnavailable,
- http.StatusTooManyRequests)
- }),
- },
- PartitionRead: []gax.CallOption{
- gax.WithTimeout(30000 * time.Millisecond),
- gax.WithRetry(func() gax.Retryer {
- return gax.OnHTTPCodes(gax.Backoff{
- Initial: 250 * time.Millisecond,
- Max: 32000 * time.Millisecond,
- Multiplier: 1.30,
- },
- http.StatusServiceUnavailable,
- http.StatusTooManyRequests)
- }),
- },
- BatchWrite: []gax.CallOption{
- gax.WithTimeout(3600000 * time.Millisecond),
- },
- }
-}
-
-// internalClient is an interface that defines the methods available from Cloud Spanner API.
-type internalClient interface {
- Close() error
- setGoogleClientInfo(...string)
- Connection() *grpc.ClientConn
- CreateSession(context.Context, *spannerpb.CreateSessionRequest, ...gax.CallOption) (*spannerpb.Session, error)
- BatchCreateSessions(context.Context, *spannerpb.BatchCreateSessionsRequest, ...gax.CallOption) (*spannerpb.BatchCreateSessionsResponse, error)
- GetSession(context.Context, *spannerpb.GetSessionRequest, ...gax.CallOption) (*spannerpb.Session, error)
- ListSessions(context.Context, *spannerpb.ListSessionsRequest, ...gax.CallOption) *SessionIterator
- DeleteSession(context.Context, *spannerpb.DeleteSessionRequest, ...gax.CallOption) error
- ExecuteSql(context.Context, *spannerpb.ExecuteSqlRequest, ...gax.CallOption) (*spannerpb.ResultSet, error)
- ExecuteStreamingSql(context.Context, *spannerpb.ExecuteSqlRequest, ...gax.CallOption) (spannerpb.Spanner_ExecuteStreamingSqlClient, error)
- ExecuteBatchDml(context.Context, *spannerpb.ExecuteBatchDmlRequest, ...gax.CallOption) (*spannerpb.ExecuteBatchDmlResponse, error)
- Read(context.Context, *spannerpb.ReadRequest, ...gax.CallOption) (*spannerpb.ResultSet, error)
- StreamingRead(context.Context, *spannerpb.ReadRequest, ...gax.CallOption) (spannerpb.Spanner_StreamingReadClient, error)
- BeginTransaction(context.Context, *spannerpb.BeginTransactionRequest, ...gax.CallOption) (*spannerpb.Transaction, error)
- Commit(context.Context, *spannerpb.CommitRequest, ...gax.CallOption) (*spannerpb.CommitResponse, error)
- Rollback(context.Context, *spannerpb.RollbackRequest, ...gax.CallOption) error
- PartitionQuery(context.Context, *spannerpb.PartitionQueryRequest, ...gax.CallOption) (*spannerpb.PartitionResponse, error)
- PartitionRead(context.Context, *spannerpb.PartitionReadRequest, ...gax.CallOption) (*spannerpb.PartitionResponse, error)
- BatchWrite(context.Context, *spannerpb.BatchWriteRequest, ...gax.CallOption) (spannerpb.Spanner_BatchWriteClient, error)
-}
-
-// Client is a client for interacting with Cloud Spanner API.
-// Methods, except Close, may be called concurrently. However, fields must not be modified concurrently with method calls.
-//
-// # Cloud Spanner API
-//
-// The Cloud Spanner API can be used to manage sessions and execute
-// transactions on data stored in Cloud Spanner databases.
-type Client struct {
- // The internal transport-dependent client.
- internalClient internalClient
-
- // The call options for this service.
- CallOptions *CallOptions
-}
-
-// Wrapper methods routed to the internal client.
-
-// Close closes the connection to the API service. The user should invoke this when
-// the client is no longer required.
-func (c *Client) Close() error {
- return c.internalClient.Close()
-}
-
-// setGoogleClientInfo sets the name and version of the application in
-// the `x-goog-api-client` header passed on each request. Intended for
-// use by Google-written clients.
-func (c *Client) setGoogleClientInfo(keyval ...string) {
- c.internalClient.setGoogleClientInfo(keyval...)
-}
-
-// Connection returns a connection to the API service.
-//
-// Deprecated: Connections are now pooled so this method does not always
-// return the same resource.
-func (c *Client) Connection() *grpc.ClientConn {
- return c.internalClient.Connection()
-}
-
-// CreateSession creates a new session. A session can be used to perform
-// transactions that read and/or modify data in a Cloud Spanner database.
-// Sessions are meant to be reused for many consecutive
-// transactions.
-//
-// Sessions can only execute one transaction at a time. To execute
-// multiple concurrent read-write/write-only transactions, create
-// multiple sessions. Note that standalone reads and queries use a
-// transaction internally, and count toward the one transaction
-// limit.
-//
-// Active sessions use additional server resources, so it is a good idea to
-// delete idle and unneeded sessions.
-// Aside from explicit deletes, Cloud Spanner may delete sessions for which no
-// operations are sent for more than an hour. If a session is deleted,
-// requests to it return NOT_FOUND.
-//
-// Idle sessions can be kept alive by sending a trivial SQL query
-// periodically, e.g., "SELECT 1".
-func (c *Client) CreateSession(ctx context.Context, req *spannerpb.CreateSessionRequest, opts ...gax.CallOption) (*spannerpb.Session, error) {
- return c.internalClient.CreateSession(ctx, req, opts...)
-}
-
-// BatchCreateSessions creates multiple new sessions.
-//
-// This API can be used to initialize a session cache on the clients.
-// See https://goo.gl/TgSFN2 (at https://goo.gl/TgSFN2) for best practices on session cache management.
-func (c *Client) BatchCreateSessions(ctx context.Context, req *spannerpb.BatchCreateSessionsRequest, opts ...gax.CallOption) (*spannerpb.BatchCreateSessionsResponse, error) {
- return c.internalClient.BatchCreateSessions(ctx, req, opts...)
-}
-
-// GetSession gets a session. Returns NOT_FOUND if the session does not exist.
-// This is mainly useful for determining whether a session is still
-// alive.
-func (c *Client) GetSession(ctx context.Context, req *spannerpb.GetSessionRequest, opts ...gax.CallOption) (*spannerpb.Session, error) {
- return c.internalClient.GetSession(ctx, req, opts...)
-}
-
-// ListSessions lists all sessions in a given database.
-func (c *Client) ListSessions(ctx context.Context, req *spannerpb.ListSessionsRequest, opts ...gax.CallOption) *SessionIterator {
- return c.internalClient.ListSessions(ctx, req, opts...)
-}
-
-// DeleteSession ends a session, releasing server resources associated with it. This will
-// asynchronously trigger cancellation of any operations that are running with
-// this session.
-func (c *Client) DeleteSession(ctx context.Context, req *spannerpb.DeleteSessionRequest, opts ...gax.CallOption) error {
- return c.internalClient.DeleteSession(ctx, req, opts...)
-}
-
-// ExecuteSql executes an SQL statement, returning all results in a single reply. This
-// method cannot be used to return a result set larger than 10 MiB;
-// if the query yields more data than that, the query fails with
-// a FAILED_PRECONDITION error.
-//
-// Operations inside read-write transactions might return ABORTED. If
-// this occurs, the application should restart the transaction from
-// the beginning. See Transaction for more
-// details.
-//
-// Larger result sets can be fetched in streaming fashion by calling
-// ExecuteStreamingSql
-// instead.
-func (c *Client) ExecuteSql(ctx context.Context, req *spannerpb.ExecuteSqlRequest, opts ...gax.CallOption) (*spannerpb.ResultSet, error) {
- return c.internalClient.ExecuteSql(ctx, req, opts...)
-}
-
-// ExecuteStreamingSql like ExecuteSql, except returns the
-// result set as a stream. Unlike
-// ExecuteSql, there is no limit on
-// the size of the returned result set. However, no individual row in the
-// result set can exceed 100 MiB, and no column value can exceed 10 MiB.
-func (c *Client) ExecuteStreamingSql(ctx context.Context, req *spannerpb.ExecuteSqlRequest, opts ...gax.CallOption) (spannerpb.Spanner_ExecuteStreamingSqlClient, error) {
- return c.internalClient.ExecuteStreamingSql(ctx, req, opts...)
-}
-
-// ExecuteBatchDml executes a batch of SQL DML statements. This method allows many statements
-// to be run with lower latency than submitting them sequentially with
-// ExecuteSql.
-//
-// Statements are executed in sequential order. A request can succeed even if
-// a statement fails. The
-// ExecuteBatchDmlResponse.status
-// field in the response provides information about the statement that failed.
-// Clients must inspect this field to determine whether an error occurred.
-//
-// Execution stops after the first failed statement; the remaining statements
-// are not executed.
-func (c *Client) ExecuteBatchDml(ctx context.Context, req *spannerpb.ExecuteBatchDmlRequest, opts ...gax.CallOption) (*spannerpb.ExecuteBatchDmlResponse, error) {
- return c.internalClient.ExecuteBatchDml(ctx, req, opts...)
-}
-
-// Read reads rows from the database using key lookups and scans, as a
-// simple key/value style alternative to
-// ExecuteSql. This method cannot be
-// used to return a result set larger than 10 MiB; if the read matches more
-// data than that, the read fails with a FAILED_PRECONDITION
-// error.
-//
-// Reads inside read-write transactions might return ABORTED. If
-// this occurs, the application should restart the transaction from
-// the beginning. See Transaction for more
-// details.
-//
-// Larger result sets can be yielded in streaming fashion by calling
-// StreamingRead instead.
-func (c *Client) Read(ctx context.Context, req *spannerpb.ReadRequest, opts ...gax.CallOption) (*spannerpb.ResultSet, error) {
- return c.internalClient.Read(ctx, req, opts...)
-}
-
-// StreamingRead like Read, except returns the result set
-// as a stream. Unlike Read, there is no
-// limit on the size of the returned result set. However, no individual row in
-// the result set can exceed 100 MiB, and no column value can exceed
-// 10 MiB.
-func (c *Client) StreamingRead(ctx context.Context, req *spannerpb.ReadRequest, opts ...gax.CallOption) (spannerpb.Spanner_StreamingReadClient, error) {
- return c.internalClient.StreamingRead(ctx, req, opts...)
-}
-
-// BeginTransaction begins a new transaction. This step can often be skipped:
-// Read,
-// ExecuteSql and
-// Commit can begin a new transaction as a
-// side-effect.
-func (c *Client) BeginTransaction(ctx context.Context, req *spannerpb.BeginTransactionRequest, opts ...gax.CallOption) (*spannerpb.Transaction, error) {
- return c.internalClient.BeginTransaction(ctx, req, opts...)
-}
-
-// Commit commits a transaction. The request includes the mutations to be
-// applied to rows in the database.
-//
-// Commit might return an ABORTED error. This can occur at any time;
-// commonly, the cause is conflicts with concurrent
-// transactions. However, it can also happen for a variety of other
-// reasons. If Commit returns ABORTED, the caller should re-attempt
-// the transaction from the beginning, re-using the same session.
-//
-// On very rare occasions, Commit might return UNKNOWN. This can happen,
-// for example, if the client job experiences a 1+ hour networking failure.
-// At that point, Cloud Spanner has lost track of the transaction outcome and
-// we recommend that you perform another read from the database to see the
-// state of things as they are now.
-func (c *Client) Commit(ctx context.Context, req *spannerpb.CommitRequest, opts ...gax.CallOption) (*spannerpb.CommitResponse, error) {
- return c.internalClient.Commit(ctx, req, opts...)
-}
-
-// Rollback rolls back a transaction, releasing any locks it holds. It is a good
-// idea to call this for any transaction that includes one or more
-// Read or
-// ExecuteSql requests and ultimately
-// decides not to commit.
-//
-// Rollback returns OK if it successfully aborts the transaction, the
-// transaction was already aborted, or the transaction is not
-// found. Rollback never returns ABORTED.
-func (c *Client) Rollback(ctx context.Context, req *spannerpb.RollbackRequest, opts ...gax.CallOption) error {
- return c.internalClient.Rollback(ctx, req, opts...)
-}
-
-// PartitionQuery creates a set of partition tokens that can be used to execute a query
-// operation in parallel. Each of the returned partition tokens can be used
-// by ExecuteStreamingSql to
-// specify a subset of the query result to read. The same session and
-// read-only transaction must be used by the PartitionQueryRequest used to
-// create the partition tokens and the ExecuteSqlRequests that use the
-// partition tokens.
-//
-// Partition tokens become invalid when the session used to create them
-// is deleted, is idle for too long, begins a new transaction, or becomes too
-// old. When any of these happen, it is not possible to resume the query, and
-// the whole operation must be restarted from the beginning.
-func (c *Client) PartitionQuery(ctx context.Context, req *spannerpb.PartitionQueryRequest, opts ...gax.CallOption) (*spannerpb.PartitionResponse, error) {
- return c.internalClient.PartitionQuery(ctx, req, opts...)
-}
-
-// PartitionRead creates a set of partition tokens that can be used to execute a read
-// operation in parallel. Each of the returned partition tokens can be used
-// by StreamingRead to specify a
-// subset of the read result to read. The same session and read-only
-// transaction must be used by the PartitionReadRequest used to create the
-// partition tokens and the ReadRequests that use the partition tokens. There
-// are no ordering guarantees on rows returned among the returned partition
-// tokens, or even within each individual StreamingRead call issued with a
-// partition_token.
-//
-// Partition tokens become invalid when the session used to create them
-// is deleted, is idle for too long, begins a new transaction, or becomes too
-// old. When any of these happen, it is not possible to resume the read, and
-// the whole operation must be restarted from the beginning.
-func (c *Client) PartitionRead(ctx context.Context, req *spannerpb.PartitionReadRequest, opts ...gax.CallOption) (*spannerpb.PartitionResponse, error) {
- return c.internalClient.PartitionRead(ctx, req, opts...)
-}
-
-// BatchWrite batches the supplied mutation groups in a collection of efficient
-// transactions. All mutations in a group are committed atomically. However,
-// mutations across groups can be committed non-atomically in an unspecified
-// order and thus, they must be independent of each other. Partial failure is
-// possible, i.e., some groups may have been committed successfully, while
-// some may have failed. The results of individual batches are streamed into
-// the response as the batches are applied.
-//
-// BatchWrite requests are not replay protected, meaning that each mutation
-// group may be applied more than once. Replays of non-idempotent mutations
-// may have undesirable effects. For example, replays of an insert mutation
-// may produce an already exists error or if you use generated or commit
-// timestamp-based keys, it may result in additional rows being added to the
-// mutation’s table. We recommend structuring your mutation groups to be
-// idempotent to avoid this issue.
-func (c *Client) BatchWrite(ctx context.Context, req *spannerpb.BatchWriteRequest, opts ...gax.CallOption) (spannerpb.Spanner_BatchWriteClient, error) {
- return c.internalClient.BatchWrite(ctx, req, opts...)
-}
-
-// gRPCClient is a client for interacting with Cloud Spanner API over gRPC transport.
-//
-// Methods, except Close, may be called concurrently. However, fields must not be modified concurrently with method calls.
-type gRPCClient struct {
- // Connection pool of gRPC connections to the service.
- connPool gtransport.ConnPool
-
- // Points back to the CallOptions field of the containing Client
- CallOptions **CallOptions
-
- // The gRPC API client.
- client spannerpb.SpannerClient
-
- // The x-goog-* metadata to be sent with each request.
- xGoogHeaders []string
-}
-
-// NewClient creates a new spanner client based on gRPC.
-// The returned client must be Closed when it is done being used to clean up its underlying connections.
-//
-// # Cloud Spanner API
-//
-// The Cloud Spanner API can be used to manage sessions and execute
-// transactions on data stored in Cloud Spanner databases.
-func NewClient(ctx context.Context, opts ...option.ClientOption) (*Client, error) {
- clientOpts := defaultGRPCClientOptions()
- if newClientHook != nil {
- hookOpts, err := newClientHook(ctx, clientHookParams{})
- if err != nil {
- return nil, err
- }
- clientOpts = append(clientOpts, hookOpts...)
- }
-
- connPool, err := gtransport.DialPool(ctx, append(clientOpts, opts...)...)
- if err != nil {
- return nil, err
- }
- client := Client{CallOptions: defaultCallOptions()}
-
- c := &gRPCClient{
- connPool: connPool,
- client: spannerpb.NewSpannerClient(connPool),
- CallOptions: &client.CallOptions,
- }
- c.setGoogleClientInfo()
-
- client.internalClient = c
-
- return &client, nil
-}
-
-// Connection returns a connection to the API service.
-//
-// Deprecated: Connections are now pooled so this method does not always
-// return the same resource.
-func (c *gRPCClient) Connection() *grpc.ClientConn {
- return c.connPool.Conn()
-}
-
-// setGoogleClientInfo sets the name and version of the application in
-// the `x-goog-api-client` header passed on each request. Intended for
-// use by Google-written clients.
-func (c *gRPCClient) setGoogleClientInfo(keyval ...string) {
- kv := append([]string{"gl-go", gax.GoVersion}, keyval...)
- kv = append(kv, "gapic", getVersionClient(), "gax", gax.Version, "grpc", grpc.Version)
- c.xGoogHeaders = []string{
- "x-goog-api-client", gax.XGoogHeader(kv...),
- }
-}
-
-// Close closes the connection to the API service. The user should invoke this when
-// the client is no longer required.
-func (c *gRPCClient) Close() error {
- return c.connPool.Close()
-}
-
-// Methods, except Close, may be called concurrently. However, fields must not be modified concurrently with method calls.
-type restClient struct {
- // The http endpoint to connect to.
- endpoint string
-
- // The http client.
- httpClient *http.Client
-
- // The x-goog-* headers to be sent with each request.
- xGoogHeaders []string
-
- // Points back to the CallOptions field of the containing Client
- CallOptions **CallOptions
-}
-
-// NewRESTClient creates a new spanner rest client.
-//
-// # Cloud Spanner API
-//
-// The Cloud Spanner API can be used to manage sessions and execute
-// transactions on data stored in Cloud Spanner databases.
-func NewRESTClient(ctx context.Context, opts ...option.ClientOption) (*Client, error) {
- clientOpts := append(defaultRESTClientOptions(), opts...)
- httpClient, endpoint, err := httptransport.NewClient(ctx, clientOpts...)
- if err != nil {
- return nil, err
- }
-
- callOpts := defaultRESTCallOptions()
- c := &restClient{
- endpoint: endpoint,
- httpClient: httpClient,
- CallOptions: &callOpts,
- }
- c.setGoogleClientInfo()
-
- return &Client{internalClient: c, CallOptions: callOpts}, nil
-}
-
-func defaultRESTClientOptions() []option.ClientOption {
- return []option.ClientOption{
- internaloption.WithDefaultEndpoint("https://spanner.googleapis.com"),
- internaloption.WithDefaultEndpointTemplate("https://spanner.UNIVERSE_DOMAIN"),
- internaloption.WithDefaultMTLSEndpoint("https://spanner.mtls.googleapis.com"),
- internaloption.WithDefaultUniverseDomain("googleapis.com"),
- internaloption.WithDefaultAudience("https://spanner.googleapis.com/"),
- internaloption.WithDefaultScopes(DefaultAuthScopes()...),
- }
-}
-
-// setGoogleClientInfo sets the name and version of the application in
-// the `x-goog-api-client` header passed on each request. Intended for
-// use by Google-written clients.
-func (c *restClient) setGoogleClientInfo(keyval ...string) {
- kv := append([]string{"gl-go", gax.GoVersion}, keyval...)
- kv = append(kv, "gapic", getVersionClient(), "gax", gax.Version, "rest", "UNKNOWN")
- c.xGoogHeaders = []string{
- "x-goog-api-client", gax.XGoogHeader(kv...),
- }
-}
-
-// Close closes the connection to the API service. The user should invoke this when
-// the client is no longer required.
-func (c *restClient) Close() error {
- // Replace httpClient with nil to force cleanup.
- c.httpClient = nil
- return nil
-}
-
-// Connection returns a connection to the API service.
-//
-// Deprecated: This method always returns nil.
-func (c *restClient) Connection() *grpc.ClientConn {
- return nil
-}
-func (c *gRPCClient) CreateSession(ctx context.Context, req *spannerpb.CreateSessionRequest, opts ...gax.CallOption) (*spannerpb.Session, error) {
- hds := []string{"x-goog-request-params", fmt.Sprintf("%s=%v", "database", url.QueryEscape(req.GetDatabase()))}
-
- hds = append(c.xGoogHeaders, hds...)
- ctx = gax.InsertMetadataIntoOutgoingContext(ctx, hds...)
- opts = append((*c.CallOptions).CreateSession[0:len((*c.CallOptions).CreateSession):len((*c.CallOptions).CreateSession)], opts...)
- var resp *spannerpb.Session
- err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
- var err error
- resp, err = c.client.CreateSession(ctx, req, settings.GRPC...)
- return err
- }, opts...)
- if err != nil {
- return nil, err
- }
- return resp, nil
-}
-
-func (c *gRPCClient) BatchCreateSessions(ctx context.Context, req *spannerpb.BatchCreateSessionsRequest, opts ...gax.CallOption) (*spannerpb.BatchCreateSessionsResponse, error) {
- hds := []string{"x-goog-request-params", fmt.Sprintf("%s=%v", "database", url.QueryEscape(req.GetDatabase()))}
-
- hds = append(c.xGoogHeaders, hds...)
- ctx = gax.InsertMetadataIntoOutgoingContext(ctx, hds...)
- opts = append((*c.CallOptions).BatchCreateSessions[0:len((*c.CallOptions).BatchCreateSessions):len((*c.CallOptions).BatchCreateSessions)], opts...)
- var resp *spannerpb.BatchCreateSessionsResponse
- err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
- var err error
- resp, err = c.client.BatchCreateSessions(ctx, req, settings.GRPC...)
- return err
- }, opts...)
- if err != nil {
- return nil, err
- }
- return resp, nil
-}
-
-func (c *gRPCClient) GetSession(ctx context.Context, req *spannerpb.GetSessionRequest, opts ...gax.CallOption) (*spannerpb.Session, error) {
- hds := []string{"x-goog-request-params", fmt.Sprintf("%s=%v", "name", url.QueryEscape(req.GetName()))}
-
- hds = append(c.xGoogHeaders, hds...)
- ctx = gax.InsertMetadataIntoOutgoingContext(ctx, hds...)
- opts = append((*c.CallOptions).GetSession[0:len((*c.CallOptions).GetSession):len((*c.CallOptions).GetSession)], opts...)
- var resp *spannerpb.Session
- err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
- var err error
- resp, err = c.client.GetSession(ctx, req, settings.GRPC...)
- return err
- }, opts...)
- if err != nil {
- return nil, err
- }
- return resp, nil
-}
-
-func (c *gRPCClient) ListSessions(ctx context.Context, req *spannerpb.ListSessionsRequest, opts ...gax.CallOption) *SessionIterator {
- hds := []string{"x-goog-request-params", fmt.Sprintf("%s=%v", "database", url.QueryEscape(req.GetDatabase()))}
-
- hds = append(c.xGoogHeaders, hds...)
- ctx = gax.InsertMetadataIntoOutgoingContext(ctx, hds...)
- opts = append((*c.CallOptions).ListSessions[0:len((*c.CallOptions).ListSessions):len((*c.CallOptions).ListSessions)], opts...)
- it := &SessionIterator{}
- req = proto.Clone(req).(*spannerpb.ListSessionsRequest)
- it.InternalFetch = func(pageSize int, pageToken string) ([]*spannerpb.Session, string, error) {
- resp := &spannerpb.ListSessionsResponse{}
- if pageToken != "" {
- req.PageToken = pageToken
- }
- if pageSize > math.MaxInt32 {
- req.PageSize = math.MaxInt32
- } else if pageSize != 0 {
- req.PageSize = int32(pageSize)
- }
- err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
- var err error
- resp, err = c.client.ListSessions(ctx, req, settings.GRPC...)
- return err
- }, opts...)
- if err != nil {
- return nil, "", err
- }
-
- it.Response = resp
- return resp.GetSessions(), resp.GetNextPageToken(), nil
- }
- fetch := func(pageSize int, pageToken string) (string, error) {
- items, nextPageToken, err := it.InternalFetch(pageSize, pageToken)
- if err != nil {
- return "", err
- }
- it.items = append(it.items, items...)
- return nextPageToken, nil
- }
-
- it.pageInfo, it.nextFunc = iterator.NewPageInfo(fetch, it.bufLen, it.takeBuf)
- it.pageInfo.MaxSize = int(req.GetPageSize())
- it.pageInfo.Token = req.GetPageToken()
-
- return it
-}
-
-func (c *gRPCClient) DeleteSession(ctx context.Context, req *spannerpb.DeleteSessionRequest, opts ...gax.CallOption) error {
- hds := []string{"x-goog-request-params", fmt.Sprintf("%s=%v", "name", url.QueryEscape(req.GetName()))}
-
- hds = append(c.xGoogHeaders, hds...)
- ctx = gax.InsertMetadataIntoOutgoingContext(ctx, hds...)
- opts = append((*c.CallOptions).DeleteSession[0:len((*c.CallOptions).DeleteSession):len((*c.CallOptions).DeleteSession)], opts...)
- err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
- var err error
- _, err = c.client.DeleteSession(ctx, req, settings.GRPC...)
- return err
- }, opts...)
- return err
-}
-
-func (c *gRPCClient) ExecuteSql(ctx context.Context, req *spannerpb.ExecuteSqlRequest, opts ...gax.CallOption) (*spannerpb.ResultSet, error) {
- hds := []string{"x-goog-request-params", fmt.Sprintf("%s=%v", "session", url.QueryEscape(req.GetSession()))}
-
- hds = append(c.xGoogHeaders, hds...)
- ctx = gax.InsertMetadataIntoOutgoingContext(ctx, hds...)
- opts = append((*c.CallOptions).ExecuteSql[0:len((*c.CallOptions).ExecuteSql):len((*c.CallOptions).ExecuteSql)], opts...)
- var resp *spannerpb.ResultSet
- err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
- var err error
- resp, err = c.client.ExecuteSql(ctx, req, settings.GRPC...)
- return err
- }, opts...)
- if err != nil {
- return nil, err
- }
- return resp, nil
-}
-
-func (c *gRPCClient) ExecuteStreamingSql(ctx context.Context, req *spannerpb.ExecuteSqlRequest, opts ...gax.CallOption) (spannerpb.Spanner_ExecuteStreamingSqlClient, error) {
- hds := []string{"x-goog-request-params", fmt.Sprintf("%s=%v", "session", url.QueryEscape(req.GetSession()))}
-
- hds = append(c.xGoogHeaders, hds...)
- ctx = gax.InsertMetadataIntoOutgoingContext(ctx, hds...)
- opts = append((*c.CallOptions).ExecuteStreamingSql[0:len((*c.CallOptions).ExecuteStreamingSql):len((*c.CallOptions).ExecuteStreamingSql)], opts...)
- var resp spannerpb.Spanner_ExecuteStreamingSqlClient
- err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
- var err error
- resp, err = c.client.ExecuteStreamingSql(ctx, req, settings.GRPC...)
- return err
- }, opts...)
- if err != nil {
- return nil, err
- }
- return resp, nil
-}
-
-func (c *gRPCClient) ExecuteBatchDml(ctx context.Context, req *spannerpb.ExecuteBatchDmlRequest, opts ...gax.CallOption) (*spannerpb.ExecuteBatchDmlResponse, error) {
- hds := []string{"x-goog-request-params", fmt.Sprintf("%s=%v", "session", url.QueryEscape(req.GetSession()))}
-
- hds = append(c.xGoogHeaders, hds...)
- ctx = gax.InsertMetadataIntoOutgoingContext(ctx, hds...)
- opts = append((*c.CallOptions).ExecuteBatchDml[0:len((*c.CallOptions).ExecuteBatchDml):len((*c.CallOptions).ExecuteBatchDml)], opts...)
- var resp *spannerpb.ExecuteBatchDmlResponse
- err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
- var err error
- resp, err = c.client.ExecuteBatchDml(ctx, req, settings.GRPC...)
- return err
- }, opts...)
- if err != nil {
- return nil, err
- }
- return resp, nil
-}
-
-func (c *gRPCClient) Read(ctx context.Context, req *spannerpb.ReadRequest, opts ...gax.CallOption) (*spannerpb.ResultSet, error) {
- hds := []string{"x-goog-request-params", fmt.Sprintf("%s=%v", "session", url.QueryEscape(req.GetSession()))}
-
- hds = append(c.xGoogHeaders, hds...)
- ctx = gax.InsertMetadataIntoOutgoingContext(ctx, hds...)
- opts = append((*c.CallOptions).Read[0:len((*c.CallOptions).Read):len((*c.CallOptions).Read)], opts...)
- var resp *spannerpb.ResultSet
- err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
- var err error
- resp, err = c.client.Read(ctx, req, settings.GRPC...)
- return err
- }, opts...)
- if err != nil {
- return nil, err
- }
- return resp, nil
-}
-
-func (c *gRPCClient) StreamingRead(ctx context.Context, req *spannerpb.ReadRequest, opts ...gax.CallOption) (spannerpb.Spanner_StreamingReadClient, error) {
- hds := []string{"x-goog-request-params", fmt.Sprintf("%s=%v", "session", url.QueryEscape(req.GetSession()))}
-
- hds = append(c.xGoogHeaders, hds...)
- ctx = gax.InsertMetadataIntoOutgoingContext(ctx, hds...)
- opts = append((*c.CallOptions).StreamingRead[0:len((*c.CallOptions).StreamingRead):len((*c.CallOptions).StreamingRead)], opts...)
- var resp spannerpb.Spanner_StreamingReadClient
- err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
- var err error
- resp, err = c.client.StreamingRead(ctx, req, settings.GRPC...)
- return err
- }, opts...)
- if err != nil {
- return nil, err
- }
- return resp, nil
-}
-
-func (c *gRPCClient) BeginTransaction(ctx context.Context, req *spannerpb.BeginTransactionRequest, opts ...gax.CallOption) (*spannerpb.Transaction, error) {
- hds := []string{"x-goog-request-params", fmt.Sprintf("%s=%v", "session", url.QueryEscape(req.GetSession()))}
-
- hds = append(c.xGoogHeaders, hds...)
- ctx = gax.InsertMetadataIntoOutgoingContext(ctx, hds...)
- opts = append((*c.CallOptions).BeginTransaction[0:len((*c.CallOptions).BeginTransaction):len((*c.CallOptions).BeginTransaction)], opts...)
- var resp *spannerpb.Transaction
- err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
- var err error
- resp, err = c.client.BeginTransaction(ctx, req, settings.GRPC...)
- return err
- }, opts...)
- if err != nil {
- return nil, err
- }
- return resp, nil
-}
-
-func (c *gRPCClient) Commit(ctx context.Context, req *spannerpb.CommitRequest, opts ...gax.CallOption) (*spannerpb.CommitResponse, error) {
- hds := []string{"x-goog-request-params", fmt.Sprintf("%s=%v", "session", url.QueryEscape(req.GetSession()))}
-
- hds = append(c.xGoogHeaders, hds...)
- ctx = gax.InsertMetadataIntoOutgoingContext(ctx, hds...)
- opts = append((*c.CallOptions).Commit[0:len((*c.CallOptions).Commit):len((*c.CallOptions).Commit)], opts...)
- var resp *spannerpb.CommitResponse
- err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
- var err error
- resp, err = c.client.Commit(ctx, req, settings.GRPC...)
- return err
- }, opts...)
- if err != nil {
- return nil, err
- }
- return resp, nil
-}
-
-func (c *gRPCClient) Rollback(ctx context.Context, req *spannerpb.RollbackRequest, opts ...gax.CallOption) error {
- hds := []string{"x-goog-request-params", fmt.Sprintf("%s=%v", "session", url.QueryEscape(req.GetSession()))}
-
- hds = append(c.xGoogHeaders, hds...)
- ctx = gax.InsertMetadataIntoOutgoingContext(ctx, hds...)
- opts = append((*c.CallOptions).Rollback[0:len((*c.CallOptions).Rollback):len((*c.CallOptions).Rollback)], opts...)
- err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
- var err error
- _, err = c.client.Rollback(ctx, req, settings.GRPC...)
- return err
- }, opts...)
- return err
-}
-
-func (c *gRPCClient) PartitionQuery(ctx context.Context, req *spannerpb.PartitionQueryRequest, opts ...gax.CallOption) (*spannerpb.PartitionResponse, error) {
- hds := []string{"x-goog-request-params", fmt.Sprintf("%s=%v", "session", url.QueryEscape(req.GetSession()))}
-
- hds = append(c.xGoogHeaders, hds...)
- ctx = gax.InsertMetadataIntoOutgoingContext(ctx, hds...)
- opts = append((*c.CallOptions).PartitionQuery[0:len((*c.CallOptions).PartitionQuery):len((*c.CallOptions).PartitionQuery)], opts...)
- var resp *spannerpb.PartitionResponse
- err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
- var err error
- resp, err = c.client.PartitionQuery(ctx, req, settings.GRPC...)
- return err
- }, opts...)
- if err != nil {
- return nil, err
- }
- return resp, nil
-}
-
-func (c *gRPCClient) PartitionRead(ctx context.Context, req *spannerpb.PartitionReadRequest, opts ...gax.CallOption) (*spannerpb.PartitionResponse, error) {
- hds := []string{"x-goog-request-params", fmt.Sprintf("%s=%v", "session", url.QueryEscape(req.GetSession()))}
-
- hds = append(c.xGoogHeaders, hds...)
- ctx = gax.InsertMetadataIntoOutgoingContext(ctx, hds...)
- opts = append((*c.CallOptions).PartitionRead[0:len((*c.CallOptions).PartitionRead):len((*c.CallOptions).PartitionRead)], opts...)
- var resp *spannerpb.PartitionResponse
- err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
- var err error
- resp, err = c.client.PartitionRead(ctx, req, settings.GRPC...)
- return err
- }, opts...)
- if err != nil {
- return nil, err
- }
- return resp, nil
-}
-
-func (c *gRPCClient) BatchWrite(ctx context.Context, req *spannerpb.BatchWriteRequest, opts ...gax.CallOption) (spannerpb.Spanner_BatchWriteClient, error) {
- hds := []string{"x-goog-request-params", fmt.Sprintf("%s=%v", "session", url.QueryEscape(req.GetSession()))}
-
- hds = append(c.xGoogHeaders, hds...)
- ctx = gax.InsertMetadataIntoOutgoingContext(ctx, hds...)
- opts = append((*c.CallOptions).BatchWrite[0:len((*c.CallOptions).BatchWrite):len((*c.CallOptions).BatchWrite)], opts...)
- var resp spannerpb.Spanner_BatchWriteClient
- err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
- var err error
- resp, err = c.client.BatchWrite(ctx, req, settings.GRPC...)
- return err
- }, opts...)
- if err != nil {
- return nil, err
- }
- return resp, nil
-}
-
-// CreateSession creates a new session. A session can be used to perform
-// transactions that read and/or modify data in a Cloud Spanner database.
-// Sessions are meant to be reused for many consecutive
-// transactions.
-//
-// Sessions can only execute one transaction at a time. To execute
-// multiple concurrent read-write/write-only transactions, create
-// multiple sessions. Note that standalone reads and queries use a
-// transaction internally, and count toward the one transaction
-// limit.
-//
-// Active sessions use additional server resources, so it is a good idea to
-// delete idle and unneeded sessions.
-// Aside from explicit deletes, Cloud Spanner may delete sessions for which no
-// operations are sent for more than an hour. If a session is deleted,
-// requests to it return NOT_FOUND.
-//
-// Idle sessions can be kept alive by sending a trivial SQL query
-// periodically, e.g., "SELECT 1".
-func (c *restClient) CreateSession(ctx context.Context, req *spannerpb.CreateSessionRequest, opts ...gax.CallOption) (*spannerpb.Session, error) {
- m := protojson.MarshalOptions{AllowPartial: true, UseEnumNumbers: true}
- jsonReq, err := m.Marshal(req)
- if err != nil {
- return nil, err
- }
-
- baseUrl, err := url.Parse(c.endpoint)
- if err != nil {
- return nil, err
- }
- baseUrl.Path += fmt.Sprintf("/v1/%v/sessions", req.GetDatabase())
-
- params := url.Values{}
- params.Add("$alt", "json;enum-encoding=int")
-
- baseUrl.RawQuery = params.Encode()
-
- // Build HTTP headers from client and context metadata.
- hds := []string{"x-goog-request-params", fmt.Sprintf("%s=%v", "database", url.QueryEscape(req.GetDatabase()))}
-
- hds = append(c.xGoogHeaders, hds...)
- hds = append(hds, "Content-Type", "application/json")
- headers := gax.BuildHeaders(ctx, hds...)
- opts = append((*c.CallOptions).CreateSession[0:len((*c.CallOptions).CreateSession):len((*c.CallOptions).CreateSession)], opts...)
- unm := protojson.UnmarshalOptions{AllowPartial: true, DiscardUnknown: true}
- resp := &spannerpb.Session{}
- e := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
- if settings.Path != "" {
- baseUrl.Path = settings.Path
- }
- httpReq, err := http.NewRequest("POST", baseUrl.String(), bytes.NewReader(jsonReq))
- if err != nil {
- return err
- }
- httpReq = httpReq.WithContext(ctx)
- httpReq.Header = headers
-
- httpRsp, err := c.httpClient.Do(httpReq)
- if err != nil {
- return err
- }
- defer httpRsp.Body.Close()
-
- if err = googleapi.CheckResponse(httpRsp); err != nil {
- return err
- }
-
- buf, err := io.ReadAll(httpRsp.Body)
- if err != nil {
- return err
- }
-
- if err := unm.Unmarshal(buf, resp); err != nil {
- return err
- }
-
- return nil
- }, opts...)
- if e != nil {
- return nil, e
- }
- return resp, nil
-}
-
-// BatchCreateSessions creates multiple new sessions.
-//
-// This API can be used to initialize a session cache on the clients.
-// See https://goo.gl/TgSFN2 (at https://goo.gl/TgSFN2) for best practices on session cache management.
-func (c *restClient) BatchCreateSessions(ctx context.Context, req *spannerpb.BatchCreateSessionsRequest, opts ...gax.CallOption) (*spannerpb.BatchCreateSessionsResponse, error) {
- m := protojson.MarshalOptions{AllowPartial: true, UseEnumNumbers: true}
- jsonReq, err := m.Marshal(req)
- if err != nil {
- return nil, err
- }
-
- baseUrl, err := url.Parse(c.endpoint)
- if err != nil {
- return nil, err
- }
- baseUrl.Path += fmt.Sprintf("/v1/%v/sessions:batchCreate", req.GetDatabase())
-
- params := url.Values{}
- params.Add("$alt", "json;enum-encoding=int")
-
- baseUrl.RawQuery = params.Encode()
-
- // Build HTTP headers from client and context metadata.
- hds := []string{"x-goog-request-params", fmt.Sprintf("%s=%v", "database", url.QueryEscape(req.GetDatabase()))}
-
- hds = append(c.xGoogHeaders, hds...)
- hds = append(hds, "Content-Type", "application/json")
- headers := gax.BuildHeaders(ctx, hds...)
- opts = append((*c.CallOptions).BatchCreateSessions[0:len((*c.CallOptions).BatchCreateSessions):len((*c.CallOptions).BatchCreateSessions)], opts...)
- unm := protojson.UnmarshalOptions{AllowPartial: true, DiscardUnknown: true}
- resp := &spannerpb.BatchCreateSessionsResponse{}
- e := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
- if settings.Path != "" {
- baseUrl.Path = settings.Path
- }
- httpReq, err := http.NewRequest("POST", baseUrl.String(), bytes.NewReader(jsonReq))
- if err != nil {
- return err
- }
- httpReq = httpReq.WithContext(ctx)
- httpReq.Header = headers
-
- httpRsp, err := c.httpClient.Do(httpReq)
- if err != nil {
- return err
- }
- defer httpRsp.Body.Close()
-
- if err = googleapi.CheckResponse(httpRsp); err != nil {
- return err
- }
-
- buf, err := io.ReadAll(httpRsp.Body)
- if err != nil {
- return err
- }
-
- if err := unm.Unmarshal(buf, resp); err != nil {
- return err
- }
-
- return nil
- }, opts...)
- if e != nil {
- return nil, e
- }
- return resp, nil
-}
-
-// GetSession gets a session. Returns NOT_FOUND if the session does not exist.
-// This is mainly useful for determining whether a session is still
-// alive.
-func (c *restClient) GetSession(ctx context.Context, req *spannerpb.GetSessionRequest, opts ...gax.CallOption) (*spannerpb.Session, error) {
- baseUrl, err := url.Parse(c.endpoint)
- if err != nil {
- return nil, err
- }
- baseUrl.Path += fmt.Sprintf("/v1/%v", req.GetName())
-
- params := url.Values{}
- params.Add("$alt", "json;enum-encoding=int")
-
- baseUrl.RawQuery = params.Encode()
-
- // Build HTTP headers from client and context metadata.
- hds := []string{"x-goog-request-params", fmt.Sprintf("%s=%v", "name", url.QueryEscape(req.GetName()))}
-
- hds = append(c.xGoogHeaders, hds...)
- hds = append(hds, "Content-Type", "application/json")
- headers := gax.BuildHeaders(ctx, hds...)
- opts = append((*c.CallOptions).GetSession[0:len((*c.CallOptions).GetSession):len((*c.CallOptions).GetSession)], opts...)
- unm := protojson.UnmarshalOptions{AllowPartial: true, DiscardUnknown: true}
- resp := &spannerpb.Session{}
- e := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
- if settings.Path != "" {
- baseUrl.Path = settings.Path
- }
- httpReq, err := http.NewRequest("GET", baseUrl.String(), nil)
- if err != nil {
- return err
- }
- httpReq = httpReq.WithContext(ctx)
- httpReq.Header = headers
-
- httpRsp, err := c.httpClient.Do(httpReq)
- if err != nil {
- return err
- }
- defer httpRsp.Body.Close()
-
- if err = googleapi.CheckResponse(httpRsp); err != nil {
- return err
- }
-
- buf, err := io.ReadAll(httpRsp.Body)
- if err != nil {
- return err
- }
-
- if err := unm.Unmarshal(buf, resp); err != nil {
- return err
- }
-
- return nil
- }, opts...)
- if e != nil {
- return nil, e
- }
- return resp, nil
-}
-
-// ListSessions lists all sessions in a given database.
-func (c *restClient) ListSessions(ctx context.Context, req *spannerpb.ListSessionsRequest, opts ...gax.CallOption) *SessionIterator {
- it := &SessionIterator{}
- req = proto.Clone(req).(*spannerpb.ListSessionsRequest)
- unm := protojson.UnmarshalOptions{AllowPartial: true, DiscardUnknown: true}
- it.InternalFetch = func(pageSize int, pageToken string) ([]*spannerpb.Session, string, error) {
- resp := &spannerpb.ListSessionsResponse{}
- if pageToken != "" {
- req.PageToken = pageToken
- }
- if pageSize > math.MaxInt32 {
- req.PageSize = math.MaxInt32
- } else if pageSize != 0 {
- req.PageSize = int32(pageSize)
- }
- baseUrl, err := url.Parse(c.endpoint)
- if err != nil {
- return nil, "", err
- }
- baseUrl.Path += fmt.Sprintf("/v1/%v/sessions", req.GetDatabase())
-
- params := url.Values{}
- params.Add("$alt", "json;enum-encoding=int")
- if req.GetFilter() != "" {
- params.Add("filter", fmt.Sprintf("%v", req.GetFilter()))
- }
- if req.GetPageSize() != 0 {
- params.Add("pageSize", fmt.Sprintf("%v", req.GetPageSize()))
- }
- if req.GetPageToken() != "" {
- params.Add("pageToken", fmt.Sprintf("%v", req.GetPageToken()))
- }
-
- baseUrl.RawQuery = params.Encode()
-
- // Build HTTP headers from client and context metadata.
- hds := append(c.xGoogHeaders, "Content-Type", "application/json")
- headers := gax.BuildHeaders(ctx, hds...)
- e := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
- if settings.Path != "" {
- baseUrl.Path = settings.Path
- }
- httpReq, err := http.NewRequest("GET", baseUrl.String(), nil)
- if err != nil {
- return err
- }
- httpReq.Header = headers
-
- httpRsp, err := c.httpClient.Do(httpReq)
- if err != nil {
- return err
- }
- defer httpRsp.Body.Close()
-
- if err = googleapi.CheckResponse(httpRsp); err != nil {
- return err
- }
-
- buf, err := io.ReadAll(httpRsp.Body)
- if err != nil {
- return err
- }
-
- if err := unm.Unmarshal(buf, resp); err != nil {
- return err
- }
-
- return nil
- }, opts...)
- if e != nil {
- return nil, "", e
- }
- it.Response = resp
- return resp.GetSessions(), resp.GetNextPageToken(), nil
- }
-
- fetch := func(pageSize int, pageToken string) (string, error) {
- items, nextPageToken, err := it.InternalFetch(pageSize, pageToken)
- if err != nil {
- return "", err
- }
- it.items = append(it.items, items...)
- return nextPageToken, nil
- }
-
- it.pageInfo, it.nextFunc = iterator.NewPageInfo(fetch, it.bufLen, it.takeBuf)
- it.pageInfo.MaxSize = int(req.GetPageSize())
- it.pageInfo.Token = req.GetPageToken()
-
- return it
-}
-
-// DeleteSession ends a session, releasing server resources associated with it. This will
-// asynchronously trigger cancellation of any operations that are running with
-// this session.
-func (c *restClient) DeleteSession(ctx context.Context, req *spannerpb.DeleteSessionRequest, opts ...gax.CallOption) error {
- baseUrl, err := url.Parse(c.endpoint)
- if err != nil {
- return err
- }
- baseUrl.Path += fmt.Sprintf("/v1/%v", req.GetName())
-
- params := url.Values{}
- params.Add("$alt", "json;enum-encoding=int")
-
- baseUrl.RawQuery = params.Encode()
-
- // Build HTTP headers from client and context metadata.
- hds := []string{"x-goog-request-params", fmt.Sprintf("%s=%v", "name", url.QueryEscape(req.GetName()))}
-
- hds = append(c.xGoogHeaders, hds...)
- hds = append(hds, "Content-Type", "application/json")
- headers := gax.BuildHeaders(ctx, hds...)
- return gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
- if settings.Path != "" {
- baseUrl.Path = settings.Path
- }
- httpReq, err := http.NewRequest("DELETE", baseUrl.String(), nil)
- if err != nil {
- return err
- }
- httpReq = httpReq.WithContext(ctx)
- httpReq.Header = headers
-
- httpRsp, err := c.httpClient.Do(httpReq)
- if err != nil {
- return err
- }
- defer httpRsp.Body.Close()
-
- // Returns nil if there is no error, otherwise wraps
- // the response code and body into a non-nil error
- return googleapi.CheckResponse(httpRsp)
- }, opts...)
-}
-
-// ExecuteSql executes an SQL statement, returning all results in a single reply. This
-// method cannot be used to return a result set larger than 10 MiB;
-// if the query yields more data than that, the query fails with
-// a FAILED_PRECONDITION error.
-//
-// Operations inside read-write transactions might return ABORTED. If
-// this occurs, the application should restart the transaction from
-// the beginning. See Transaction for more
-// details.
-//
-// Larger result sets can be fetched in streaming fashion by calling
-// ExecuteStreamingSql
-// instead.
-func (c *restClient) ExecuteSql(ctx context.Context, req *spannerpb.ExecuteSqlRequest, opts ...gax.CallOption) (*spannerpb.ResultSet, error) {
- m := protojson.MarshalOptions{AllowPartial: true, UseEnumNumbers: true}
- jsonReq, err := m.Marshal(req)
- if err != nil {
- return nil, err
- }
-
- baseUrl, err := url.Parse(c.endpoint)
- if err != nil {
- return nil, err
- }
- baseUrl.Path += fmt.Sprintf("/v1/%v:executeSql", req.GetSession())
-
- params := url.Values{}
- params.Add("$alt", "json;enum-encoding=int")
-
- baseUrl.RawQuery = params.Encode()
-
- // Build HTTP headers from client and context metadata.
- hds := []string{"x-goog-request-params", fmt.Sprintf("%s=%v", "session", url.QueryEscape(req.GetSession()))}
-
- hds = append(c.xGoogHeaders, hds...)
- hds = append(hds, "Content-Type", "application/json")
- headers := gax.BuildHeaders(ctx, hds...)
- opts = append((*c.CallOptions).ExecuteSql[0:len((*c.CallOptions).ExecuteSql):len((*c.CallOptions).ExecuteSql)], opts...)
- unm := protojson.UnmarshalOptions{AllowPartial: true, DiscardUnknown: true}
- resp := &spannerpb.ResultSet{}
- e := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
- if settings.Path != "" {
- baseUrl.Path = settings.Path
- }
- httpReq, err := http.NewRequest("POST", baseUrl.String(), bytes.NewReader(jsonReq))
- if err != nil {
- return err
- }
- httpReq = httpReq.WithContext(ctx)
- httpReq.Header = headers
-
- httpRsp, err := c.httpClient.Do(httpReq)
- if err != nil {
- return err
- }
- defer httpRsp.Body.Close()
-
- if err = googleapi.CheckResponse(httpRsp); err != nil {
- return err
- }
-
- buf, err := io.ReadAll(httpRsp.Body)
- if err != nil {
- return err
- }
-
- if err := unm.Unmarshal(buf, resp); err != nil {
- return err
- }
-
- return nil
- }, opts...)
- if e != nil {
- return nil, e
- }
- return resp, nil
-}
-
-// ExecuteStreamingSql like ExecuteSql, except returns the
-// result set as a stream. Unlike
-// ExecuteSql, there is no limit on
-// the size of the returned result set. However, no individual row in the
-// result set can exceed 100 MiB, and no column value can exceed 10 MiB.
-func (c *restClient) ExecuteStreamingSql(ctx context.Context, req *spannerpb.ExecuteSqlRequest, opts ...gax.CallOption) (spannerpb.Spanner_ExecuteStreamingSqlClient, error) {
- m := protojson.MarshalOptions{AllowPartial: true, UseEnumNumbers: true}
- jsonReq, err := m.Marshal(req)
- if err != nil {
- return nil, err
- }
-
- baseUrl, err := url.Parse(c.endpoint)
- if err != nil {
- return nil, err
- }
- baseUrl.Path += fmt.Sprintf("/v1/%v:executeStreamingSql", req.GetSession())
-
- params := url.Values{}
- params.Add("$alt", "json;enum-encoding=int")
-
- baseUrl.RawQuery = params.Encode()
-
- // Build HTTP headers from client and context metadata.
- hds := []string{"x-goog-request-params", fmt.Sprintf("%s=%v", "session", url.QueryEscape(req.GetSession()))}
-
- hds = append(c.xGoogHeaders, hds...)
- hds = append(hds, "Content-Type", "application/json")
- headers := gax.BuildHeaders(ctx, hds...)
- var streamClient *executeStreamingSqlRESTClient
- e := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
- if settings.Path != "" {
- baseUrl.Path = settings.Path
- }
- httpReq, err := http.NewRequest("POST", baseUrl.String(), bytes.NewReader(jsonReq))
- if err != nil {
- return err
- }
- httpReq = httpReq.WithContext(ctx)
- httpReq.Header = headers
-
- httpRsp, err := c.httpClient.Do(httpReq)
- if err != nil {
- return err
- }
-
- if err = googleapi.CheckResponse(httpRsp); err != nil {
- return err
- }
-
- streamClient = &executeStreamingSqlRESTClient{
- ctx: ctx,
- md: metadata.MD(httpRsp.Header),
- stream: gax.NewProtoJSONStreamReader(httpRsp.Body, (&spannerpb.PartialResultSet{}).ProtoReflect().Type()),
- }
- return nil
- }, opts...)
-
- return streamClient, e
-}
-
-// executeStreamingSqlRESTClient is the stream client used to consume the server stream created by
-// the REST implementation of ExecuteStreamingSql.
-type executeStreamingSqlRESTClient struct {
- ctx context.Context
- md metadata.MD
- stream *gax.ProtoJSONStream
-}
-
-func (c *executeStreamingSqlRESTClient) Recv() (*spannerpb.PartialResultSet, error) {
- if err := c.ctx.Err(); err != nil {
- defer c.stream.Close()
- return nil, err
- }
- msg, err := c.stream.Recv()
- if err != nil {
- defer c.stream.Close()
- return nil, err
- }
- res := msg.(*spannerpb.PartialResultSet)
- return res, nil
-}
-
-func (c *executeStreamingSqlRESTClient) Header() (metadata.MD, error) {
- return c.md, nil
-}
-
-func (c *executeStreamingSqlRESTClient) Trailer() metadata.MD {
- return c.md
-}
-
-func (c *executeStreamingSqlRESTClient) CloseSend() error {
- // This is a no-op to fulfill the interface.
- return errors.New("this method is not implemented for a server-stream")
-}
-
-func (c *executeStreamingSqlRESTClient) Context() context.Context {
- return c.ctx
-}
-
-func (c *executeStreamingSqlRESTClient) SendMsg(m interface{}) error {
- // This is a no-op to fulfill the interface.
- return errors.New("this method is not implemented for a server-stream")
-}
-
-func (c *executeStreamingSqlRESTClient) RecvMsg(m interface{}) error {
- // This is a no-op to fulfill the interface.
- return errors.New("this method is not implemented, use Recv")
-}
-
-// ExecuteBatchDml executes a batch of SQL DML statements. This method allows many statements
-// to be run with lower latency than submitting them sequentially with
-// ExecuteSql.
-//
-// Statements are executed in sequential order. A request can succeed even if
-// a statement fails. The
-// ExecuteBatchDmlResponse.status
-// field in the response provides information about the statement that failed.
-// Clients must inspect this field to determine whether an error occurred.
-//
-// Execution stops after the first failed statement; the remaining statements
-// are not executed.
-func (c *restClient) ExecuteBatchDml(ctx context.Context, req *spannerpb.ExecuteBatchDmlRequest, opts ...gax.CallOption) (*spannerpb.ExecuteBatchDmlResponse, error) {
- m := protojson.MarshalOptions{AllowPartial: true, UseEnumNumbers: true}
- jsonReq, err := m.Marshal(req)
- if err != nil {
- return nil, err
- }
-
- baseUrl, err := url.Parse(c.endpoint)
- if err != nil {
- return nil, err
- }
- baseUrl.Path += fmt.Sprintf("/v1/%v:executeBatchDml", req.GetSession())
-
- params := url.Values{}
- params.Add("$alt", "json;enum-encoding=int")
-
- baseUrl.RawQuery = params.Encode()
-
- // Build HTTP headers from client and context metadata.
- hds := []string{"x-goog-request-params", fmt.Sprintf("%s=%v", "session", url.QueryEscape(req.GetSession()))}
-
- hds = append(c.xGoogHeaders, hds...)
- hds = append(hds, "Content-Type", "application/json")
- headers := gax.BuildHeaders(ctx, hds...)
- opts = append((*c.CallOptions).ExecuteBatchDml[0:len((*c.CallOptions).ExecuteBatchDml):len((*c.CallOptions).ExecuteBatchDml)], opts...)
- unm := protojson.UnmarshalOptions{AllowPartial: true, DiscardUnknown: true}
- resp := &spannerpb.ExecuteBatchDmlResponse{}
- e := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
- if settings.Path != "" {
- baseUrl.Path = settings.Path
- }
- httpReq, err := http.NewRequest("POST", baseUrl.String(), bytes.NewReader(jsonReq))
- if err != nil {
- return err
- }
- httpReq = httpReq.WithContext(ctx)
- httpReq.Header = headers
-
- httpRsp, err := c.httpClient.Do(httpReq)
- if err != nil {
- return err
- }
- defer httpRsp.Body.Close()
-
- if err = googleapi.CheckResponse(httpRsp); err != nil {
- return err
- }
-
- buf, err := io.ReadAll(httpRsp.Body)
- if err != nil {
- return err
- }
-
- if err := unm.Unmarshal(buf, resp); err != nil {
- return err
- }
-
- return nil
- }, opts...)
- if e != nil {
- return nil, e
- }
- return resp, nil
-}
-
-// Read reads rows from the database using key lookups and scans, as a
-// simple key/value style alternative to
-// ExecuteSql. This method cannot be
-// used to return a result set larger than 10 MiB; if the read matches more
-// data than that, the read fails with a FAILED_PRECONDITION
-// error.
-//
-// Reads inside read-write transactions might return ABORTED. If
-// this occurs, the application should restart the transaction from
-// the beginning. See Transaction for more
-// details.
-//
-// Larger result sets can be yielded in streaming fashion by calling
-// StreamingRead instead.
-func (c *restClient) Read(ctx context.Context, req *spannerpb.ReadRequest, opts ...gax.CallOption) (*spannerpb.ResultSet, error) {
- m := protojson.MarshalOptions{AllowPartial: true, UseEnumNumbers: true}
- jsonReq, err := m.Marshal(req)
- if err != nil {
- return nil, err
- }
-
- baseUrl, err := url.Parse(c.endpoint)
- if err != nil {
- return nil, err
- }
- baseUrl.Path += fmt.Sprintf("/v1/%v:read", req.GetSession())
-
- params := url.Values{}
- params.Add("$alt", "json;enum-encoding=int")
-
- baseUrl.RawQuery = params.Encode()
-
- // Build HTTP headers from client and context metadata.
- hds := []string{"x-goog-request-params", fmt.Sprintf("%s=%v", "session", url.QueryEscape(req.GetSession()))}
-
- hds = append(c.xGoogHeaders, hds...)
- hds = append(hds, "Content-Type", "application/json")
- headers := gax.BuildHeaders(ctx, hds...)
- opts = append((*c.CallOptions).Read[0:len((*c.CallOptions).Read):len((*c.CallOptions).Read)], opts...)
- unm := protojson.UnmarshalOptions{AllowPartial: true, DiscardUnknown: true}
- resp := &spannerpb.ResultSet{}
- e := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
- if settings.Path != "" {
- baseUrl.Path = settings.Path
- }
- httpReq, err := http.NewRequest("POST", baseUrl.String(), bytes.NewReader(jsonReq))
- if err != nil {
- return err
- }
- httpReq = httpReq.WithContext(ctx)
- httpReq.Header = headers
-
- httpRsp, err := c.httpClient.Do(httpReq)
- if err != nil {
- return err
- }
- defer httpRsp.Body.Close()
-
- if err = googleapi.CheckResponse(httpRsp); err != nil {
- return err
- }
-
- buf, err := io.ReadAll(httpRsp.Body)
- if err != nil {
- return err
- }
-
- if err := unm.Unmarshal(buf, resp); err != nil {
- return err
- }
-
- return nil
- }, opts...)
- if e != nil {
- return nil, e
- }
- return resp, nil
-}
-
-// StreamingRead like Read, except returns the result set
-// as a stream. Unlike Read, there is no
-// limit on the size of the returned result set. However, no individual row in
-// the result set can exceed 100 MiB, and no column value can exceed
-// 10 MiB.
-func (c *restClient) StreamingRead(ctx context.Context, req *spannerpb.ReadRequest, opts ...gax.CallOption) (spannerpb.Spanner_StreamingReadClient, error) {
- m := protojson.MarshalOptions{AllowPartial: true, UseEnumNumbers: true}
- jsonReq, err := m.Marshal(req)
- if err != nil {
- return nil, err
- }
-
- baseUrl, err := url.Parse(c.endpoint)
- if err != nil {
- return nil, err
- }
- baseUrl.Path += fmt.Sprintf("/v1/%v:streamingRead", req.GetSession())
-
- params := url.Values{}
- params.Add("$alt", "json;enum-encoding=int")
-
- baseUrl.RawQuery = params.Encode()
-
- // Build HTTP headers from client and context metadata.
- hds := []string{"x-goog-request-params", fmt.Sprintf("%s=%v", "session", url.QueryEscape(req.GetSession()))}
-
- hds = append(c.xGoogHeaders, hds...)
- hds = append(hds, "Content-Type", "application/json")
- headers := gax.BuildHeaders(ctx, hds...)
- var streamClient *streamingReadRESTClient
- e := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
- if settings.Path != "" {
- baseUrl.Path = settings.Path
- }
- httpReq, err := http.NewRequest("POST", baseUrl.String(), bytes.NewReader(jsonReq))
- if err != nil {
- return err
- }
- httpReq = httpReq.WithContext(ctx)
- httpReq.Header = headers
-
- httpRsp, err := c.httpClient.Do(httpReq)
- if err != nil {
- return err
- }
-
- if err = googleapi.CheckResponse(httpRsp); err != nil {
- return err
- }
-
- streamClient = &streamingReadRESTClient{
- ctx: ctx,
- md: metadata.MD(httpRsp.Header),
- stream: gax.NewProtoJSONStreamReader(httpRsp.Body, (&spannerpb.PartialResultSet{}).ProtoReflect().Type()),
- }
- return nil
- }, opts...)
-
- return streamClient, e
-}
-
-// streamingReadRESTClient is the stream client used to consume the server stream created by
-// the REST implementation of StreamingRead.
-type streamingReadRESTClient struct {
- ctx context.Context
- md metadata.MD
- stream *gax.ProtoJSONStream
-}
-
-func (c *streamingReadRESTClient) Recv() (*spannerpb.PartialResultSet, error) {
- if err := c.ctx.Err(); err != nil {
- defer c.stream.Close()
- return nil, err
- }
- msg, err := c.stream.Recv()
- if err != nil {
- defer c.stream.Close()
- return nil, err
- }
- res := msg.(*spannerpb.PartialResultSet)
- return res, nil
-}
-
-func (c *streamingReadRESTClient) Header() (metadata.MD, error) {
- return c.md, nil
-}
-
-func (c *streamingReadRESTClient) Trailer() metadata.MD {
- return c.md
-}
-
-func (c *streamingReadRESTClient) CloseSend() error {
- // This is a no-op to fulfill the interface.
- return errors.New("this method is not implemented for a server-stream")
-}
-
-func (c *streamingReadRESTClient) Context() context.Context {
- return c.ctx
-}
-
-func (c *streamingReadRESTClient) SendMsg(m interface{}) error {
- // This is a no-op to fulfill the interface.
- return errors.New("this method is not implemented for a server-stream")
-}
-
-func (c *streamingReadRESTClient) RecvMsg(m interface{}) error {
- // This is a no-op to fulfill the interface.
- return errors.New("this method is not implemented, use Recv")
-}
-
-// BeginTransaction begins a new transaction. This step can often be skipped:
-// Read,
-// ExecuteSql and
-// Commit can begin a new transaction as a
-// side-effect.
-func (c *restClient) BeginTransaction(ctx context.Context, req *spannerpb.BeginTransactionRequest, opts ...gax.CallOption) (*spannerpb.Transaction, error) {
- m := protojson.MarshalOptions{AllowPartial: true, UseEnumNumbers: true}
- jsonReq, err := m.Marshal(req)
- if err != nil {
- return nil, err
- }
-
- baseUrl, err := url.Parse(c.endpoint)
- if err != nil {
- return nil, err
- }
- baseUrl.Path += fmt.Sprintf("/v1/%v:beginTransaction", req.GetSession())
-
- params := url.Values{}
- params.Add("$alt", "json;enum-encoding=int")
-
- baseUrl.RawQuery = params.Encode()
-
- // Build HTTP headers from client and context metadata.
- hds := []string{"x-goog-request-params", fmt.Sprintf("%s=%v", "session", url.QueryEscape(req.GetSession()))}
-
- hds = append(c.xGoogHeaders, hds...)
- hds = append(hds, "Content-Type", "application/json")
- headers := gax.BuildHeaders(ctx, hds...)
- opts = append((*c.CallOptions).BeginTransaction[0:len((*c.CallOptions).BeginTransaction):len((*c.CallOptions).BeginTransaction)], opts...)
- unm := protojson.UnmarshalOptions{AllowPartial: true, DiscardUnknown: true}
- resp := &spannerpb.Transaction{}
- e := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
- if settings.Path != "" {
- baseUrl.Path = settings.Path
- }
- httpReq, err := http.NewRequest("POST", baseUrl.String(), bytes.NewReader(jsonReq))
- if err != nil {
- return err
- }
- httpReq = httpReq.WithContext(ctx)
- httpReq.Header = headers
-
- httpRsp, err := c.httpClient.Do(httpReq)
- if err != nil {
- return err
- }
- defer httpRsp.Body.Close()
-
- if err = googleapi.CheckResponse(httpRsp); err != nil {
- return err
- }
-
- buf, err := io.ReadAll(httpRsp.Body)
- if err != nil {
- return err
- }
-
- if err := unm.Unmarshal(buf, resp); err != nil {
- return err
- }
-
- return nil
- }, opts...)
- if e != nil {
- return nil, e
- }
- return resp, nil
-}
-
-// Commit commits a transaction. The request includes the mutations to be
-// applied to rows in the database.
-//
-// Commit might return an ABORTED error. This can occur at any time;
-// commonly, the cause is conflicts with concurrent
-// transactions. However, it can also happen for a variety of other
-// reasons. If Commit returns ABORTED, the caller should re-attempt
-// the transaction from the beginning, re-using the same session.
-//
-// On very rare occasions, Commit might return UNKNOWN. This can happen,
-// for example, if the client job experiences a 1+ hour networking failure.
-// At that point, Cloud Spanner has lost track of the transaction outcome and
-// we recommend that you perform another read from the database to see the
-// state of things as they are now.
-func (c *restClient) Commit(ctx context.Context, req *spannerpb.CommitRequest, opts ...gax.CallOption) (*spannerpb.CommitResponse, error) {
- m := protojson.MarshalOptions{AllowPartial: true, UseEnumNumbers: true}
- jsonReq, err := m.Marshal(req)
- if err != nil {
- return nil, err
- }
-
- baseUrl, err := url.Parse(c.endpoint)
- if err != nil {
- return nil, err
- }
- baseUrl.Path += fmt.Sprintf("/v1/%v:commit", req.GetSession())
-
- params := url.Values{}
- params.Add("$alt", "json;enum-encoding=int")
-
- baseUrl.RawQuery = params.Encode()
-
- // Build HTTP headers from client and context metadata.
- hds := []string{"x-goog-request-params", fmt.Sprintf("%s=%v", "session", url.QueryEscape(req.GetSession()))}
-
- hds = append(c.xGoogHeaders, hds...)
- hds = append(hds, "Content-Type", "application/json")
- headers := gax.BuildHeaders(ctx, hds...)
- opts = append((*c.CallOptions).Commit[0:len((*c.CallOptions).Commit):len((*c.CallOptions).Commit)], opts...)
- unm := protojson.UnmarshalOptions{AllowPartial: true, DiscardUnknown: true}
- resp := &spannerpb.CommitResponse{}
- e := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
- if settings.Path != "" {
- baseUrl.Path = settings.Path
- }
- httpReq, err := http.NewRequest("POST", baseUrl.String(), bytes.NewReader(jsonReq))
- if err != nil {
- return err
- }
- httpReq = httpReq.WithContext(ctx)
- httpReq.Header = headers
-
- httpRsp, err := c.httpClient.Do(httpReq)
- if err != nil {
- return err
- }
- defer httpRsp.Body.Close()
-
- if err = googleapi.CheckResponse(httpRsp); err != nil {
- return err
- }
-
- buf, err := io.ReadAll(httpRsp.Body)
- if err != nil {
- return err
- }
-
- if err := unm.Unmarshal(buf, resp); err != nil {
- return err
- }
-
- return nil
- }, opts...)
- if e != nil {
- return nil, e
- }
- return resp, nil
-}
-
-// Rollback rolls back a transaction, releasing any locks it holds. It is a good
-// idea to call this for any transaction that includes one or more
-// Read or
-// ExecuteSql requests and ultimately
-// decides not to commit.
-//
-// Rollback returns OK if it successfully aborts the transaction, the
-// transaction was already aborted, or the transaction is not
-// found. Rollback never returns ABORTED.
-func (c *restClient) Rollback(ctx context.Context, req *spannerpb.RollbackRequest, opts ...gax.CallOption) error {
- m := protojson.MarshalOptions{AllowPartial: true, UseEnumNumbers: true}
- jsonReq, err := m.Marshal(req)
- if err != nil {
- return err
- }
-
- baseUrl, err := url.Parse(c.endpoint)
- if err != nil {
- return err
- }
- baseUrl.Path += fmt.Sprintf("/v1/%v:rollback", req.GetSession())
-
- params := url.Values{}
- params.Add("$alt", "json;enum-encoding=int")
-
- baseUrl.RawQuery = params.Encode()
-
- // Build HTTP headers from client and context metadata.
- hds := []string{"x-goog-request-params", fmt.Sprintf("%s=%v", "session", url.QueryEscape(req.GetSession()))}
-
- hds = append(c.xGoogHeaders, hds...)
- hds = append(hds, "Content-Type", "application/json")
- headers := gax.BuildHeaders(ctx, hds...)
- return gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
- if settings.Path != "" {
- baseUrl.Path = settings.Path
- }
- httpReq, err := http.NewRequest("POST", baseUrl.String(), bytes.NewReader(jsonReq))
- if err != nil {
- return err
- }
- httpReq = httpReq.WithContext(ctx)
- httpReq.Header = headers
-
- httpRsp, err := c.httpClient.Do(httpReq)
- if err != nil {
- return err
- }
- defer httpRsp.Body.Close()
-
- // Returns nil if there is no error, otherwise wraps
- // the response code and body into a non-nil error
- return googleapi.CheckResponse(httpRsp)
- }, opts...)
-}
-
-// PartitionQuery creates a set of partition tokens that can be used to execute a query
-// operation in parallel. Each of the returned partition tokens can be used
-// by ExecuteStreamingSql to
-// specify a subset of the query result to read. The same session and
-// read-only transaction must be used by the PartitionQueryRequest used to
-// create the partition tokens and the ExecuteSqlRequests that use the
-// partition tokens.
-//
-// Partition tokens become invalid when the session used to create them
-// is deleted, is idle for too long, begins a new transaction, or becomes too
-// old. When any of these happen, it is not possible to resume the query, and
-// the whole operation must be restarted from the beginning.
-func (c *restClient) PartitionQuery(ctx context.Context, req *spannerpb.PartitionQueryRequest, opts ...gax.CallOption) (*spannerpb.PartitionResponse, error) {
- m := protojson.MarshalOptions{AllowPartial: true, UseEnumNumbers: true}
- jsonReq, err := m.Marshal(req)
- if err != nil {
- return nil, err
- }
-
- baseUrl, err := url.Parse(c.endpoint)
- if err != nil {
- return nil, err
- }
- baseUrl.Path += fmt.Sprintf("/v1/%v:partitionQuery", req.GetSession())
-
- params := url.Values{}
- params.Add("$alt", "json;enum-encoding=int")
-
- baseUrl.RawQuery = params.Encode()
-
- // Build HTTP headers from client and context metadata.
- hds := []string{"x-goog-request-params", fmt.Sprintf("%s=%v", "session", url.QueryEscape(req.GetSession()))}
-
- hds = append(c.xGoogHeaders, hds...)
- hds = append(hds, "Content-Type", "application/json")
- headers := gax.BuildHeaders(ctx, hds...)
- opts = append((*c.CallOptions).PartitionQuery[0:len((*c.CallOptions).PartitionQuery):len((*c.CallOptions).PartitionQuery)], opts...)
- unm := protojson.UnmarshalOptions{AllowPartial: true, DiscardUnknown: true}
- resp := &spannerpb.PartitionResponse{}
- e := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
- if settings.Path != "" {
- baseUrl.Path = settings.Path
- }
- httpReq, err := http.NewRequest("POST", baseUrl.String(), bytes.NewReader(jsonReq))
- if err != nil {
- return err
- }
- httpReq = httpReq.WithContext(ctx)
- httpReq.Header = headers
-
- httpRsp, err := c.httpClient.Do(httpReq)
- if err != nil {
- return err
- }
- defer httpRsp.Body.Close()
-
- if err = googleapi.CheckResponse(httpRsp); err != nil {
- return err
- }
-
- buf, err := io.ReadAll(httpRsp.Body)
- if err != nil {
- return err
- }
-
- if err := unm.Unmarshal(buf, resp); err != nil {
- return err
- }
-
- return nil
- }, opts...)
- if e != nil {
- return nil, e
- }
- return resp, nil
-}
-
-// PartitionRead creates a set of partition tokens that can be used to execute a read
-// operation in parallel. Each of the returned partition tokens can be used
-// by StreamingRead to specify a
-// subset of the read result to read. The same session and read-only
-// transaction must be used by the PartitionReadRequest used to create the
-// partition tokens and the ReadRequests that use the partition tokens. There
-// are no ordering guarantees on rows returned among the returned partition
-// tokens, or even within each individual StreamingRead call issued with a
-// partition_token.
-//
-// Partition tokens become invalid when the session used to create them
-// is deleted, is idle for too long, begins a new transaction, or becomes too
-// old. When any of these happen, it is not possible to resume the read, and
-// the whole operation must be restarted from the beginning.
-func (c *restClient) PartitionRead(ctx context.Context, req *spannerpb.PartitionReadRequest, opts ...gax.CallOption) (*spannerpb.PartitionResponse, error) {
- m := protojson.MarshalOptions{AllowPartial: true, UseEnumNumbers: true}
- jsonReq, err := m.Marshal(req)
- if err != nil {
- return nil, err
- }
-
- baseUrl, err := url.Parse(c.endpoint)
- if err != nil {
- return nil, err
- }
- baseUrl.Path += fmt.Sprintf("/v1/%v:partitionRead", req.GetSession())
-
- params := url.Values{}
- params.Add("$alt", "json;enum-encoding=int")
-
- baseUrl.RawQuery = params.Encode()
-
- // Build HTTP headers from client and context metadata.
- hds := []string{"x-goog-request-params", fmt.Sprintf("%s=%v", "session", url.QueryEscape(req.GetSession()))}
-
- hds = append(c.xGoogHeaders, hds...)
- hds = append(hds, "Content-Type", "application/json")
- headers := gax.BuildHeaders(ctx, hds...)
- opts = append((*c.CallOptions).PartitionRead[0:len((*c.CallOptions).PartitionRead):len((*c.CallOptions).PartitionRead)], opts...)
- unm := protojson.UnmarshalOptions{AllowPartial: true, DiscardUnknown: true}
- resp := &spannerpb.PartitionResponse{}
- e := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
- if settings.Path != "" {
- baseUrl.Path = settings.Path
- }
- httpReq, err := http.NewRequest("POST", baseUrl.String(), bytes.NewReader(jsonReq))
- if err != nil {
- return err
- }
- httpReq = httpReq.WithContext(ctx)
- httpReq.Header = headers
-
- httpRsp, err := c.httpClient.Do(httpReq)
- if err != nil {
- return err
- }
- defer httpRsp.Body.Close()
-
- if err = googleapi.CheckResponse(httpRsp); err != nil {
- return err
- }
-
- buf, err := io.ReadAll(httpRsp.Body)
- if err != nil {
- return err
- }
-
- if err := unm.Unmarshal(buf, resp); err != nil {
- return err
- }
-
- return nil
- }, opts...)
- if e != nil {
- return nil, e
- }
- return resp, nil
-}
-
-// BatchWrite batches the supplied mutation groups in a collection of efficient
-// transactions. All mutations in a group are committed atomically. However,
-// mutations across groups can be committed non-atomically in an unspecified
-// order and thus, they must be independent of each other. Partial failure is
-// possible, i.e., some groups may have been committed successfully, while
-// some may have failed. The results of individual batches are streamed into
-// the response as the batches are applied.
-//
-// BatchWrite requests are not replay protected, meaning that each mutation
-// group may be applied more than once. Replays of non-idempotent mutations
-// may have undesirable effects. For example, replays of an insert mutation
-// may produce an already exists error or if you use generated or commit
-// timestamp-based keys, it may result in additional rows being added to the
-// mutation’s table. We recommend structuring your mutation groups to be
-// idempotent to avoid this issue.
-func (c *restClient) BatchWrite(ctx context.Context, req *spannerpb.BatchWriteRequest, opts ...gax.CallOption) (spannerpb.Spanner_BatchWriteClient, error) {
- m := protojson.MarshalOptions{AllowPartial: true, UseEnumNumbers: true}
- jsonReq, err := m.Marshal(req)
- if err != nil {
- return nil, err
- }
-
- baseUrl, err := url.Parse(c.endpoint)
- if err != nil {
- return nil, err
- }
- baseUrl.Path += fmt.Sprintf("/v1/%v:batchWrite", req.GetSession())
-
- params := url.Values{}
- params.Add("$alt", "json;enum-encoding=int")
-
- baseUrl.RawQuery = params.Encode()
-
- // Build HTTP headers from client and context metadata.
- hds := []string{"x-goog-request-params", fmt.Sprintf("%s=%v", "session", url.QueryEscape(req.GetSession()))}
-
- hds = append(c.xGoogHeaders, hds...)
- hds = append(hds, "Content-Type", "application/json")
- headers := gax.BuildHeaders(ctx, hds...)
- var streamClient *batchWriteRESTClient
- e := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
- if settings.Path != "" {
- baseUrl.Path = settings.Path
- }
- httpReq, err := http.NewRequest("POST", baseUrl.String(), bytes.NewReader(jsonReq))
- if err != nil {
- return err
- }
- httpReq = httpReq.WithContext(ctx)
- httpReq.Header = headers
-
- httpRsp, err := c.httpClient.Do(httpReq)
- if err != nil {
- return err
- }
-
- if err = googleapi.CheckResponse(httpRsp); err != nil {
- return err
- }
-
- streamClient = &batchWriteRESTClient{
- ctx: ctx,
- md: metadata.MD(httpRsp.Header),
- stream: gax.NewProtoJSONStreamReader(httpRsp.Body, (&spannerpb.BatchWriteResponse{}).ProtoReflect().Type()),
- }
- return nil
- }, opts...)
-
- return streamClient, e
-}
-
-// batchWriteRESTClient is the stream client used to consume the server stream created by
-// the REST implementation of BatchWrite.
-type batchWriteRESTClient struct {
- ctx context.Context
- md metadata.MD
- stream *gax.ProtoJSONStream
-}
-
-func (c *batchWriteRESTClient) Recv() (*spannerpb.BatchWriteResponse, error) {
- if err := c.ctx.Err(); err != nil {
- defer c.stream.Close()
- return nil, err
- }
- msg, err := c.stream.Recv()
- if err != nil {
- defer c.stream.Close()
- return nil, err
- }
- res := msg.(*spannerpb.BatchWriteResponse)
- return res, nil
-}
-
-func (c *batchWriteRESTClient) Header() (metadata.MD, error) {
- return c.md, nil
-}
-
-func (c *batchWriteRESTClient) Trailer() metadata.MD {
- return c.md
-}
-
-func (c *batchWriteRESTClient) CloseSend() error {
- // This is a no-op to fulfill the interface.
- return errors.New("this method is not implemented for a server-stream")
-}
-
-func (c *batchWriteRESTClient) Context() context.Context {
- return c.ctx
-}
-
-func (c *batchWriteRESTClient) SendMsg(m interface{}) error {
- // This is a no-op to fulfill the interface.
- return errors.New("this method is not implemented for a server-stream")
-}
-
-func (c *batchWriteRESTClient) RecvMsg(m interface{}) error {
- // This is a no-op to fulfill the interface.
- return errors.New("this method is not implemented, use Recv")
-}
diff --git a/vendor/cloud.google.com/go/spanner/apiv1/spanner_client_options.go b/vendor/cloud.google.com/go/spanner/apiv1/spanner_client_options.go
deleted file mode 100644
index 5b924b5d1..000000000
--- a/vendor/cloud.google.com/go/spanner/apiv1/spanner_client_options.go
+++ /dev/null
@@ -1,25 +0,0 @@
-// Copyright 2021 Google LLC
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// https://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package spanner
-
-import "google.golang.org/api/option"
-
-// Returns the default client options used by the generated Spanner client.
-//
-// This function is only intended for use by the client library, and may be
-// removed at any time without any warning.
-func DefaultClientOptions() []option.ClientOption {
- return defaultGRPCClientOptions()
-}
diff --git a/vendor/cloud.google.com/go/spanner/apiv1/spannerpb/commit_response.pb.go b/vendor/cloud.google.com/go/spanner/apiv1/spannerpb/commit_response.pb.go
deleted file mode 100644
index ccfb44258..000000000
--- a/vendor/cloud.google.com/go/spanner/apiv1/spannerpb/commit_response.pb.go
+++ /dev/null
@@ -1,268 +0,0 @@
-// Copyright 2024 Google LLC
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-// Code generated by protoc-gen-go. DO NOT EDIT.
-// versions:
-// protoc-gen-go v1.34.2
-// protoc v4.25.3
-// source: google/spanner/v1/commit_response.proto
-
-package spannerpb
-
-import (
- reflect "reflect"
- sync "sync"
-
- protoreflect "google.golang.org/protobuf/reflect/protoreflect"
- protoimpl "google.golang.org/protobuf/runtime/protoimpl"
- timestamppb "google.golang.org/protobuf/types/known/timestamppb"
-)
-
-const (
- // Verify that this generated code is sufficiently up-to-date.
- _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion)
- // Verify that runtime/protoimpl is sufficiently up-to-date.
- _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20)
-)
-
-// The response for [Commit][google.spanner.v1.Spanner.Commit].
-type CommitResponse struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
- // The Cloud Spanner timestamp at which the transaction committed.
- CommitTimestamp *timestamppb.Timestamp `protobuf:"bytes,1,opt,name=commit_timestamp,json=commitTimestamp,proto3" json:"commit_timestamp,omitempty"`
- // The statistics about this Commit. Not returned by default.
- // For more information, see
- // [CommitRequest.return_commit_stats][google.spanner.v1.CommitRequest.return_commit_stats].
- CommitStats *CommitResponse_CommitStats `protobuf:"bytes,2,opt,name=commit_stats,json=commitStats,proto3" json:"commit_stats,omitempty"`
-}
-
-func (x *CommitResponse) Reset() {
- *x = CommitResponse{}
- if protoimpl.UnsafeEnabled {
- mi := &file_google_spanner_v1_commit_response_proto_msgTypes[0]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
-}
-
-func (x *CommitResponse) String() string {
- return protoimpl.X.MessageStringOf(x)
-}
-
-func (*CommitResponse) ProtoMessage() {}
-
-func (x *CommitResponse) ProtoReflect() protoreflect.Message {
- mi := &file_google_spanner_v1_commit_response_proto_msgTypes[0]
- if protoimpl.UnsafeEnabled && x != nil {
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- if ms.LoadMessageInfo() == nil {
- ms.StoreMessageInfo(mi)
- }
- return ms
- }
- return mi.MessageOf(x)
-}
-
-// Deprecated: Use CommitResponse.ProtoReflect.Descriptor instead.
-func (*CommitResponse) Descriptor() ([]byte, []int) {
- return file_google_spanner_v1_commit_response_proto_rawDescGZIP(), []int{0}
-}
-
-func (x *CommitResponse) GetCommitTimestamp() *timestamppb.Timestamp {
- if x != nil {
- return x.CommitTimestamp
- }
- return nil
-}
-
-func (x *CommitResponse) GetCommitStats() *CommitResponse_CommitStats {
- if x != nil {
- return x.CommitStats
- }
- return nil
-}
-
-// Additional statistics about a commit.
-type CommitResponse_CommitStats struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
- // The total number of mutations for the transaction. Knowing the
- // `mutation_count` value can help you maximize the number of mutations
- // in a transaction and minimize the number of API round trips. You can
- // also monitor this value to prevent transactions from exceeding the system
- // [limit](https://cloud.google.com/spanner/quotas#limits_for_creating_reading_updating_and_deleting_data).
- // If the number of mutations exceeds the limit, the server returns
- // [INVALID_ARGUMENT](https://cloud.google.com/spanner/docs/reference/rest/v1/Code#ENUM_VALUES.INVALID_ARGUMENT).
- MutationCount int64 `protobuf:"varint,1,opt,name=mutation_count,json=mutationCount,proto3" json:"mutation_count,omitempty"`
-}
-
-func (x *CommitResponse_CommitStats) Reset() {
- *x = CommitResponse_CommitStats{}
- if protoimpl.UnsafeEnabled {
- mi := &file_google_spanner_v1_commit_response_proto_msgTypes[1]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
-}
-
-func (x *CommitResponse_CommitStats) String() string {
- return protoimpl.X.MessageStringOf(x)
-}
-
-func (*CommitResponse_CommitStats) ProtoMessage() {}
-
-func (x *CommitResponse_CommitStats) ProtoReflect() protoreflect.Message {
- mi := &file_google_spanner_v1_commit_response_proto_msgTypes[1]
- if protoimpl.UnsafeEnabled && x != nil {
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- if ms.LoadMessageInfo() == nil {
- ms.StoreMessageInfo(mi)
- }
- return ms
- }
- return mi.MessageOf(x)
-}
-
-// Deprecated: Use CommitResponse_CommitStats.ProtoReflect.Descriptor instead.
-func (*CommitResponse_CommitStats) Descriptor() ([]byte, []int) {
- return file_google_spanner_v1_commit_response_proto_rawDescGZIP(), []int{0, 0}
-}
-
-func (x *CommitResponse_CommitStats) GetMutationCount() int64 {
- if x != nil {
- return x.MutationCount
- }
- return 0
-}
-
-var File_google_spanner_v1_commit_response_proto protoreflect.FileDescriptor
-
-var file_google_spanner_v1_commit_response_proto_rawDesc = []byte{
- 0x0a, 0x27, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x73, 0x70, 0x61, 0x6e, 0x6e, 0x65, 0x72,
- 0x2f, 0x76, 0x31, 0x2f, 0x63, 0x6f, 0x6d, 0x6d, 0x69, 0x74, 0x5f, 0x72, 0x65, 0x73, 0x70, 0x6f,
- 0x6e, 0x73, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x11, 0x67, 0x6f, 0x6f, 0x67, 0x6c,
- 0x65, 0x2e, 0x73, 0x70, 0x61, 0x6e, 0x6e, 0x65, 0x72, 0x2e, 0x76, 0x31, 0x1a, 0x1f, 0x67, 0x6f,
- 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x74, 0x69,
- 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0xdf, 0x01,
- 0x0a, 0x0e, 0x43, 0x6f, 0x6d, 0x6d, 0x69, 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65,
- 0x12, 0x45, 0x0a, 0x10, 0x63, 0x6f, 0x6d, 0x6d, 0x69, 0x74, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x73,
- 0x74, 0x61, 0x6d, 0x70, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f,
- 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d,
- 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x0f, 0x63, 0x6f, 0x6d, 0x6d, 0x69, 0x74, 0x54, 0x69,
- 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x12, 0x50, 0x0a, 0x0c, 0x63, 0x6f, 0x6d, 0x6d, 0x69,
- 0x74, 0x5f, 0x73, 0x74, 0x61, 0x74, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2d, 0x2e,
- 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x70, 0x61, 0x6e, 0x6e, 0x65, 0x72, 0x2e, 0x76,
- 0x31, 0x2e, 0x43, 0x6f, 0x6d, 0x6d, 0x69, 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65,
- 0x2e, 0x43, 0x6f, 0x6d, 0x6d, 0x69, 0x74, 0x53, 0x74, 0x61, 0x74, 0x73, 0x52, 0x0b, 0x63, 0x6f,
- 0x6d, 0x6d, 0x69, 0x74, 0x53, 0x74, 0x61, 0x74, 0x73, 0x1a, 0x34, 0x0a, 0x0b, 0x43, 0x6f, 0x6d,
- 0x6d, 0x69, 0x74, 0x53, 0x74, 0x61, 0x74, 0x73, 0x12, 0x25, 0x0a, 0x0e, 0x6d, 0x75, 0x74, 0x61,
- 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x03,
- 0x52, 0x0d, 0x6d, 0x75, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x42,
- 0xb6, 0x01, 0x0a, 0x15, 0x63, 0x6f, 0x6d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73,
- 0x70, 0x61, 0x6e, 0x6e, 0x65, 0x72, 0x2e, 0x76, 0x31, 0x42, 0x13, 0x43, 0x6f, 0x6d, 0x6d, 0x69,
- 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01,
- 0x5a, 0x35, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x63,
- 0x6f, 0x6d, 0x2f, 0x67, 0x6f, 0x2f, 0x73, 0x70, 0x61, 0x6e, 0x6e, 0x65, 0x72, 0x2f, 0x61, 0x70,
- 0x69, 0x76, 0x31, 0x2f, 0x73, 0x70, 0x61, 0x6e, 0x6e, 0x65, 0x72, 0x70, 0x62, 0x3b, 0x73, 0x70,
- 0x61, 0x6e, 0x6e, 0x65, 0x72, 0x70, 0x62, 0xaa, 0x02, 0x17, 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65,
- 0x2e, 0x43, 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x53, 0x70, 0x61, 0x6e, 0x6e, 0x65, 0x72, 0x2e, 0x56,
- 0x31, 0xca, 0x02, 0x17, 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x5c, 0x43, 0x6c, 0x6f, 0x75, 0x64,
- 0x5c, 0x53, 0x70, 0x61, 0x6e, 0x6e, 0x65, 0x72, 0x5c, 0x56, 0x31, 0xea, 0x02, 0x1a, 0x47, 0x6f,
- 0x6f, 0x67, 0x6c, 0x65, 0x3a, 0x3a, 0x43, 0x6c, 0x6f, 0x75, 0x64, 0x3a, 0x3a, 0x53, 0x70, 0x61,
- 0x6e, 0x6e, 0x65, 0x72, 0x3a, 0x3a, 0x56, 0x31, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33,
-}
-
-var (
- file_google_spanner_v1_commit_response_proto_rawDescOnce sync.Once
- file_google_spanner_v1_commit_response_proto_rawDescData = file_google_spanner_v1_commit_response_proto_rawDesc
-)
-
-func file_google_spanner_v1_commit_response_proto_rawDescGZIP() []byte {
- file_google_spanner_v1_commit_response_proto_rawDescOnce.Do(func() {
- file_google_spanner_v1_commit_response_proto_rawDescData = protoimpl.X.CompressGZIP(file_google_spanner_v1_commit_response_proto_rawDescData)
- })
- return file_google_spanner_v1_commit_response_proto_rawDescData
-}
-
-var file_google_spanner_v1_commit_response_proto_msgTypes = make([]protoimpl.MessageInfo, 2)
-var file_google_spanner_v1_commit_response_proto_goTypes = []any{
- (*CommitResponse)(nil), // 0: google.spanner.v1.CommitResponse
- (*CommitResponse_CommitStats)(nil), // 1: google.spanner.v1.CommitResponse.CommitStats
- (*timestamppb.Timestamp)(nil), // 2: google.protobuf.Timestamp
-}
-var file_google_spanner_v1_commit_response_proto_depIdxs = []int32{
- 2, // 0: google.spanner.v1.CommitResponse.commit_timestamp:type_name -> google.protobuf.Timestamp
- 1, // 1: google.spanner.v1.CommitResponse.commit_stats:type_name -> google.spanner.v1.CommitResponse.CommitStats
- 2, // [2:2] is the sub-list for method output_type
- 2, // [2:2] is the sub-list for method input_type
- 2, // [2:2] is the sub-list for extension type_name
- 2, // [2:2] is the sub-list for extension extendee
- 0, // [0:2] is the sub-list for field type_name
-}
-
-func init() { file_google_spanner_v1_commit_response_proto_init() }
-func file_google_spanner_v1_commit_response_proto_init() {
- if File_google_spanner_v1_commit_response_proto != nil {
- return
- }
- if !protoimpl.UnsafeEnabled {
- file_google_spanner_v1_commit_response_proto_msgTypes[0].Exporter = func(v any, i int) any {
- switch v := v.(*CommitResponse); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_google_spanner_v1_commit_response_proto_msgTypes[1].Exporter = func(v any, i int) any {
- switch v := v.(*CommitResponse_CommitStats); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- }
- type x struct{}
- out := protoimpl.TypeBuilder{
- File: protoimpl.DescBuilder{
- GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
- RawDescriptor: file_google_spanner_v1_commit_response_proto_rawDesc,
- NumEnums: 0,
- NumMessages: 2,
- NumExtensions: 0,
- NumServices: 0,
- },
- GoTypes: file_google_spanner_v1_commit_response_proto_goTypes,
- DependencyIndexes: file_google_spanner_v1_commit_response_proto_depIdxs,
- MessageInfos: file_google_spanner_v1_commit_response_proto_msgTypes,
- }.Build()
- File_google_spanner_v1_commit_response_proto = out.File
- file_google_spanner_v1_commit_response_proto_rawDesc = nil
- file_google_spanner_v1_commit_response_proto_goTypes = nil
- file_google_spanner_v1_commit_response_proto_depIdxs = nil
-}
diff --git a/vendor/cloud.google.com/go/spanner/apiv1/spannerpb/keys.pb.go b/vendor/cloud.google.com/go/spanner/apiv1/spannerpb/keys.pb.go
deleted file mode 100644
index 0cfb01a43..000000000
--- a/vendor/cloud.google.com/go/spanner/apiv1/spannerpb/keys.pb.go
+++ /dev/null
@@ -1,476 +0,0 @@
-// Copyright 2024 Google LLC
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-// Code generated by protoc-gen-go. DO NOT EDIT.
-// versions:
-// protoc-gen-go v1.34.2
-// protoc v4.25.3
-// source: google/spanner/v1/keys.proto
-
-package spannerpb
-
-import (
- reflect "reflect"
- sync "sync"
-
- protoreflect "google.golang.org/protobuf/reflect/protoreflect"
- protoimpl "google.golang.org/protobuf/runtime/protoimpl"
- structpb "google.golang.org/protobuf/types/known/structpb"
-)
-
-const (
- // Verify that this generated code is sufficiently up-to-date.
- _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion)
- // Verify that runtime/protoimpl is sufficiently up-to-date.
- _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20)
-)
-
-// KeyRange represents a range of rows in a table or index.
-//
-// A range has a start key and an end key. These keys can be open or
-// closed, indicating if the range includes rows with that key.
-//
-// Keys are represented by lists, where the ith value in the list
-// corresponds to the ith component of the table or index primary key.
-// Individual values are encoded as described
-// [here][google.spanner.v1.TypeCode].
-//
-// For example, consider the following table definition:
-//
-// CREATE TABLE UserEvents (
-// UserName STRING(MAX),
-// EventDate STRING(10)
-// ) PRIMARY KEY(UserName, EventDate);
-//
-// The following keys name rows in this table:
-//
-// ["Bob", "2014-09-23"]
-// ["Alfred", "2015-06-12"]
-//
-// Since the `UserEvents` table's `PRIMARY KEY` clause names two
-// columns, each `UserEvents` key has two elements; the first is the
-// `UserName`, and the second is the `EventDate`.
-//
-// Key ranges with multiple components are interpreted
-// lexicographically by component using the table or index key's declared
-// sort order. For example, the following range returns all events for
-// user `"Bob"` that occurred in the year 2015:
-//
-// "start_closed": ["Bob", "2015-01-01"]
-// "end_closed": ["Bob", "2015-12-31"]
-//
-// Start and end keys can omit trailing key components. This affects the
-// inclusion and exclusion of rows that exactly match the provided key
-// components: if the key is closed, then rows that exactly match the
-// provided components are included; if the key is open, then rows
-// that exactly match are not included.
-//
-// For example, the following range includes all events for `"Bob"` that
-// occurred during and after the year 2000:
-//
-// "start_closed": ["Bob", "2000-01-01"]
-// "end_closed": ["Bob"]
-//
-// The next example retrieves all events for `"Bob"`:
-//
-// "start_closed": ["Bob"]
-// "end_closed": ["Bob"]
-//
-// To retrieve events before the year 2000:
-//
-// "start_closed": ["Bob"]
-// "end_open": ["Bob", "2000-01-01"]
-//
-// The following range includes all rows in the table:
-//
-// "start_closed": []
-// "end_closed": []
-//
-// This range returns all users whose `UserName` begins with any
-// character from A to C:
-//
-// "start_closed": ["A"]
-// "end_open": ["D"]
-//
-// This range returns all users whose `UserName` begins with B:
-//
-// "start_closed": ["B"]
-// "end_open": ["C"]
-//
-// Key ranges honor column sort order. For example, suppose a table is
-// defined as follows:
-//
-// CREATE TABLE DescendingSortedTable {
-// Key INT64,
-// ...
-// ) PRIMARY KEY(Key DESC);
-//
-// The following range retrieves all rows with key values between 1
-// and 100 inclusive:
-//
-// "start_closed": ["100"]
-// "end_closed": ["1"]
-//
-// Note that 100 is passed as the start, and 1 is passed as the end,
-// because `Key` is a descending column in the schema.
-type KeyRange struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
- // The start key must be provided. It can be either closed or open.
- //
- // Types that are assignable to StartKeyType:
- //
- // *KeyRange_StartClosed
- // *KeyRange_StartOpen
- StartKeyType isKeyRange_StartKeyType `protobuf_oneof:"start_key_type"`
- // The end key must be provided. It can be either closed or open.
- //
- // Types that are assignable to EndKeyType:
- //
- // *KeyRange_EndClosed
- // *KeyRange_EndOpen
- EndKeyType isKeyRange_EndKeyType `protobuf_oneof:"end_key_type"`
-}
-
-func (x *KeyRange) Reset() {
- *x = KeyRange{}
- if protoimpl.UnsafeEnabled {
- mi := &file_google_spanner_v1_keys_proto_msgTypes[0]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
-}
-
-func (x *KeyRange) String() string {
- return protoimpl.X.MessageStringOf(x)
-}
-
-func (*KeyRange) ProtoMessage() {}
-
-func (x *KeyRange) ProtoReflect() protoreflect.Message {
- mi := &file_google_spanner_v1_keys_proto_msgTypes[0]
- if protoimpl.UnsafeEnabled && x != nil {
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- if ms.LoadMessageInfo() == nil {
- ms.StoreMessageInfo(mi)
- }
- return ms
- }
- return mi.MessageOf(x)
-}
-
-// Deprecated: Use KeyRange.ProtoReflect.Descriptor instead.
-func (*KeyRange) Descriptor() ([]byte, []int) {
- return file_google_spanner_v1_keys_proto_rawDescGZIP(), []int{0}
-}
-
-func (m *KeyRange) GetStartKeyType() isKeyRange_StartKeyType {
- if m != nil {
- return m.StartKeyType
- }
- return nil
-}
-
-func (x *KeyRange) GetStartClosed() *structpb.ListValue {
- if x, ok := x.GetStartKeyType().(*KeyRange_StartClosed); ok {
- return x.StartClosed
- }
- return nil
-}
-
-func (x *KeyRange) GetStartOpen() *structpb.ListValue {
- if x, ok := x.GetStartKeyType().(*KeyRange_StartOpen); ok {
- return x.StartOpen
- }
- return nil
-}
-
-func (m *KeyRange) GetEndKeyType() isKeyRange_EndKeyType {
- if m != nil {
- return m.EndKeyType
- }
- return nil
-}
-
-func (x *KeyRange) GetEndClosed() *structpb.ListValue {
- if x, ok := x.GetEndKeyType().(*KeyRange_EndClosed); ok {
- return x.EndClosed
- }
- return nil
-}
-
-func (x *KeyRange) GetEndOpen() *structpb.ListValue {
- if x, ok := x.GetEndKeyType().(*KeyRange_EndOpen); ok {
- return x.EndOpen
- }
- return nil
-}
-
-type isKeyRange_StartKeyType interface {
- isKeyRange_StartKeyType()
-}
-
-type KeyRange_StartClosed struct {
- // If the start is closed, then the range includes all rows whose
- // first `len(start_closed)` key columns exactly match `start_closed`.
- StartClosed *structpb.ListValue `protobuf:"bytes,1,opt,name=start_closed,json=startClosed,proto3,oneof"`
-}
-
-type KeyRange_StartOpen struct {
- // If the start is open, then the range excludes rows whose first
- // `len(start_open)` key columns exactly match `start_open`.
- StartOpen *structpb.ListValue `protobuf:"bytes,2,opt,name=start_open,json=startOpen,proto3,oneof"`
-}
-
-func (*KeyRange_StartClosed) isKeyRange_StartKeyType() {}
-
-func (*KeyRange_StartOpen) isKeyRange_StartKeyType() {}
-
-type isKeyRange_EndKeyType interface {
- isKeyRange_EndKeyType()
-}
-
-type KeyRange_EndClosed struct {
- // If the end is closed, then the range includes all rows whose
- // first `len(end_closed)` key columns exactly match `end_closed`.
- EndClosed *structpb.ListValue `protobuf:"bytes,3,opt,name=end_closed,json=endClosed,proto3,oneof"`
-}
-
-type KeyRange_EndOpen struct {
- // If the end is open, then the range excludes rows whose first
- // `len(end_open)` key columns exactly match `end_open`.
- EndOpen *structpb.ListValue `protobuf:"bytes,4,opt,name=end_open,json=endOpen,proto3,oneof"`
-}
-
-func (*KeyRange_EndClosed) isKeyRange_EndKeyType() {}
-
-func (*KeyRange_EndOpen) isKeyRange_EndKeyType() {}
-
-// `KeySet` defines a collection of Cloud Spanner keys and/or key ranges. All
-// the keys are expected to be in the same table or index. The keys need
-// not be sorted in any particular way.
-//
-// If the same key is specified multiple times in the set (for example
-// if two ranges, two keys, or a key and a range overlap), Cloud Spanner
-// behaves as if the key were only specified once.
-type KeySet struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
- // A list of specific keys. Entries in `keys` should have exactly as
- // many elements as there are columns in the primary or index key
- // with which this `KeySet` is used. Individual key values are
- // encoded as described [here][google.spanner.v1.TypeCode].
- Keys []*structpb.ListValue `protobuf:"bytes,1,rep,name=keys,proto3" json:"keys,omitempty"`
- // A list of key ranges. See [KeyRange][google.spanner.v1.KeyRange] for more information about
- // key range specifications.
- Ranges []*KeyRange `protobuf:"bytes,2,rep,name=ranges,proto3" json:"ranges,omitempty"`
- // For convenience `all` can be set to `true` to indicate that this
- // `KeySet` matches all keys in the table or index. Note that any keys
- // specified in `keys` or `ranges` are only yielded once.
- All bool `protobuf:"varint,3,opt,name=all,proto3" json:"all,omitempty"`
-}
-
-func (x *KeySet) Reset() {
- *x = KeySet{}
- if protoimpl.UnsafeEnabled {
- mi := &file_google_spanner_v1_keys_proto_msgTypes[1]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
-}
-
-func (x *KeySet) String() string {
- return protoimpl.X.MessageStringOf(x)
-}
-
-func (*KeySet) ProtoMessage() {}
-
-func (x *KeySet) ProtoReflect() protoreflect.Message {
- mi := &file_google_spanner_v1_keys_proto_msgTypes[1]
- if protoimpl.UnsafeEnabled && x != nil {
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- if ms.LoadMessageInfo() == nil {
- ms.StoreMessageInfo(mi)
- }
- return ms
- }
- return mi.MessageOf(x)
-}
-
-// Deprecated: Use KeySet.ProtoReflect.Descriptor instead.
-func (*KeySet) Descriptor() ([]byte, []int) {
- return file_google_spanner_v1_keys_proto_rawDescGZIP(), []int{1}
-}
-
-func (x *KeySet) GetKeys() []*structpb.ListValue {
- if x != nil {
- return x.Keys
- }
- return nil
-}
-
-func (x *KeySet) GetRanges() []*KeyRange {
- if x != nil {
- return x.Ranges
- }
- return nil
-}
-
-func (x *KeySet) GetAll() bool {
- if x != nil {
- return x.All
- }
- return false
-}
-
-var File_google_spanner_v1_keys_proto protoreflect.FileDescriptor
-
-var file_google_spanner_v1_keys_proto_rawDesc = []byte{
- 0x0a, 0x1c, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x73, 0x70, 0x61, 0x6e, 0x6e, 0x65, 0x72,
- 0x2f, 0x76, 0x31, 0x2f, 0x6b, 0x65, 0x79, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x11,
- 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x70, 0x61, 0x6e, 0x6e, 0x65, 0x72, 0x2e, 0x76,
- 0x31, 0x1a, 0x1c, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62,
- 0x75, 0x66, 0x2f, 0x73, 0x74, 0x72, 0x75, 0x63, 0x74, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22,
- 0xa0, 0x02, 0x0a, 0x08, 0x4b, 0x65, 0x79, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x12, 0x3f, 0x0a, 0x0c,
- 0x73, 0x74, 0x61, 0x72, 0x74, 0x5f, 0x63, 0x6c, 0x6f, 0x73, 0x65, 0x64, 0x18, 0x01, 0x20, 0x01,
- 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74,
- 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x48, 0x00,
- 0x52, 0x0b, 0x73, 0x74, 0x61, 0x72, 0x74, 0x43, 0x6c, 0x6f, 0x73, 0x65, 0x64, 0x12, 0x3b, 0x0a,
- 0x0a, 0x73, 0x74, 0x61, 0x72, 0x74, 0x5f, 0x6f, 0x70, 0x65, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28,
- 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f,
- 0x62, 0x75, 0x66, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x48, 0x00, 0x52,
- 0x09, 0x73, 0x74, 0x61, 0x72, 0x74, 0x4f, 0x70, 0x65, 0x6e, 0x12, 0x3b, 0x0a, 0x0a, 0x65, 0x6e,
- 0x64, 0x5f, 0x63, 0x6c, 0x6f, 0x73, 0x65, 0x64, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a,
- 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66,
- 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x48, 0x01, 0x52, 0x09, 0x65, 0x6e,
- 0x64, 0x43, 0x6c, 0x6f, 0x73, 0x65, 0x64, 0x12, 0x37, 0x0a, 0x08, 0x65, 0x6e, 0x64, 0x5f, 0x6f,
- 0x70, 0x65, 0x6e, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67,
- 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x4c, 0x69, 0x73, 0x74,
- 0x56, 0x61, 0x6c, 0x75, 0x65, 0x48, 0x01, 0x52, 0x07, 0x65, 0x6e, 0x64, 0x4f, 0x70, 0x65, 0x6e,
- 0x42, 0x10, 0x0a, 0x0e, 0x73, 0x74, 0x61, 0x72, 0x74, 0x5f, 0x6b, 0x65, 0x79, 0x5f, 0x74, 0x79,
- 0x70, 0x65, 0x42, 0x0e, 0x0a, 0x0c, 0x65, 0x6e, 0x64, 0x5f, 0x6b, 0x65, 0x79, 0x5f, 0x74, 0x79,
- 0x70, 0x65, 0x22, 0x7f, 0x0a, 0x06, 0x4b, 0x65, 0x79, 0x53, 0x65, 0x74, 0x12, 0x2e, 0x0a, 0x04,
- 0x6b, 0x65, 0x79, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f,
- 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x4c, 0x69, 0x73,
- 0x74, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x04, 0x6b, 0x65, 0x79, 0x73, 0x12, 0x33, 0x0a, 0x06,
- 0x72, 0x61, 0x6e, 0x67, 0x65, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x67,
- 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x70, 0x61, 0x6e, 0x6e, 0x65, 0x72, 0x2e, 0x76, 0x31,
- 0x2e, 0x4b, 0x65, 0x79, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x52, 0x06, 0x72, 0x61, 0x6e, 0x67, 0x65,
- 0x73, 0x12, 0x10, 0x0a, 0x03, 0x61, 0x6c, 0x6c, 0x18, 0x03, 0x20, 0x01, 0x28, 0x08, 0x52, 0x03,
- 0x61, 0x6c, 0x6c, 0x42, 0xac, 0x01, 0x0a, 0x15, 0x63, 0x6f, 0x6d, 0x2e, 0x67, 0x6f, 0x6f, 0x67,
- 0x6c, 0x65, 0x2e, 0x73, 0x70, 0x61, 0x6e, 0x6e, 0x65, 0x72, 0x2e, 0x76, 0x31, 0x42, 0x09, 0x4b,
- 0x65, 0x79, 0x73, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x35, 0x63, 0x6c, 0x6f, 0x75,
- 0x64, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x67, 0x6f, 0x2f,
- 0x73, 0x70, 0x61, 0x6e, 0x6e, 0x65, 0x72, 0x2f, 0x61, 0x70, 0x69, 0x76, 0x31, 0x2f, 0x73, 0x70,
- 0x61, 0x6e, 0x6e, 0x65, 0x72, 0x70, 0x62, 0x3b, 0x73, 0x70, 0x61, 0x6e, 0x6e, 0x65, 0x72, 0x70,
- 0x62, 0xaa, 0x02, 0x17, 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x43, 0x6c, 0x6f, 0x75, 0x64,
- 0x2e, 0x53, 0x70, 0x61, 0x6e, 0x6e, 0x65, 0x72, 0x2e, 0x56, 0x31, 0xca, 0x02, 0x17, 0x47, 0x6f,
- 0x6f, 0x67, 0x6c, 0x65, 0x5c, 0x43, 0x6c, 0x6f, 0x75, 0x64, 0x5c, 0x53, 0x70, 0x61, 0x6e, 0x6e,
- 0x65, 0x72, 0x5c, 0x56, 0x31, 0xea, 0x02, 0x1a, 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x3a, 0x3a,
- 0x43, 0x6c, 0x6f, 0x75, 0x64, 0x3a, 0x3a, 0x53, 0x70, 0x61, 0x6e, 0x6e, 0x65, 0x72, 0x3a, 0x3a,
- 0x56, 0x31, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33,
-}
-
-var (
- file_google_spanner_v1_keys_proto_rawDescOnce sync.Once
- file_google_spanner_v1_keys_proto_rawDescData = file_google_spanner_v1_keys_proto_rawDesc
-)
-
-func file_google_spanner_v1_keys_proto_rawDescGZIP() []byte {
- file_google_spanner_v1_keys_proto_rawDescOnce.Do(func() {
- file_google_spanner_v1_keys_proto_rawDescData = protoimpl.X.CompressGZIP(file_google_spanner_v1_keys_proto_rawDescData)
- })
- return file_google_spanner_v1_keys_proto_rawDescData
-}
-
-var file_google_spanner_v1_keys_proto_msgTypes = make([]protoimpl.MessageInfo, 2)
-var file_google_spanner_v1_keys_proto_goTypes = []any{
- (*KeyRange)(nil), // 0: google.spanner.v1.KeyRange
- (*KeySet)(nil), // 1: google.spanner.v1.KeySet
- (*structpb.ListValue)(nil), // 2: google.protobuf.ListValue
-}
-var file_google_spanner_v1_keys_proto_depIdxs = []int32{
- 2, // 0: google.spanner.v1.KeyRange.start_closed:type_name -> google.protobuf.ListValue
- 2, // 1: google.spanner.v1.KeyRange.start_open:type_name -> google.protobuf.ListValue
- 2, // 2: google.spanner.v1.KeyRange.end_closed:type_name -> google.protobuf.ListValue
- 2, // 3: google.spanner.v1.KeyRange.end_open:type_name -> google.protobuf.ListValue
- 2, // 4: google.spanner.v1.KeySet.keys:type_name -> google.protobuf.ListValue
- 0, // 5: google.spanner.v1.KeySet.ranges:type_name -> google.spanner.v1.KeyRange
- 6, // [6:6] is the sub-list for method output_type
- 6, // [6:6] is the sub-list for method input_type
- 6, // [6:6] is the sub-list for extension type_name
- 6, // [6:6] is the sub-list for extension extendee
- 0, // [0:6] is the sub-list for field type_name
-}
-
-func init() { file_google_spanner_v1_keys_proto_init() }
-func file_google_spanner_v1_keys_proto_init() {
- if File_google_spanner_v1_keys_proto != nil {
- return
- }
- if !protoimpl.UnsafeEnabled {
- file_google_spanner_v1_keys_proto_msgTypes[0].Exporter = func(v any, i int) any {
- switch v := v.(*KeyRange); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_google_spanner_v1_keys_proto_msgTypes[1].Exporter = func(v any, i int) any {
- switch v := v.(*KeySet); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- }
- file_google_spanner_v1_keys_proto_msgTypes[0].OneofWrappers = []any{
- (*KeyRange_StartClosed)(nil),
- (*KeyRange_StartOpen)(nil),
- (*KeyRange_EndClosed)(nil),
- (*KeyRange_EndOpen)(nil),
- }
- type x struct{}
- out := protoimpl.TypeBuilder{
- File: protoimpl.DescBuilder{
- GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
- RawDescriptor: file_google_spanner_v1_keys_proto_rawDesc,
- NumEnums: 0,
- NumMessages: 2,
- NumExtensions: 0,
- NumServices: 0,
- },
- GoTypes: file_google_spanner_v1_keys_proto_goTypes,
- DependencyIndexes: file_google_spanner_v1_keys_proto_depIdxs,
- MessageInfos: file_google_spanner_v1_keys_proto_msgTypes,
- }.Build()
- File_google_spanner_v1_keys_proto = out.File
- file_google_spanner_v1_keys_proto_rawDesc = nil
- file_google_spanner_v1_keys_proto_goTypes = nil
- file_google_spanner_v1_keys_proto_depIdxs = nil
-}
diff --git a/vendor/cloud.google.com/go/spanner/apiv1/spannerpb/mutation.pb.go b/vendor/cloud.google.com/go/spanner/apiv1/spannerpb/mutation.pb.go
deleted file mode 100644
index bdbbac372..000000000
--- a/vendor/cloud.google.com/go/spanner/apiv1/spannerpb/mutation.pb.go
+++ /dev/null
@@ -1,497 +0,0 @@
-// Copyright 2024 Google LLC
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-// Code generated by protoc-gen-go. DO NOT EDIT.
-// versions:
-// protoc-gen-go v1.34.2
-// protoc v4.25.3
-// source: google/spanner/v1/mutation.proto
-
-package spannerpb
-
-import (
- reflect "reflect"
- sync "sync"
-
- _ "google.golang.org/genproto/googleapis/api/annotations"
- protoreflect "google.golang.org/protobuf/reflect/protoreflect"
- protoimpl "google.golang.org/protobuf/runtime/protoimpl"
- structpb "google.golang.org/protobuf/types/known/structpb"
-)
-
-const (
- // Verify that this generated code is sufficiently up-to-date.
- _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion)
- // Verify that runtime/protoimpl is sufficiently up-to-date.
- _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20)
-)
-
-// A modification to one or more Cloud Spanner rows. Mutations can be
-// applied to a Cloud Spanner database by sending them in a
-// [Commit][google.spanner.v1.Spanner.Commit] call.
-type Mutation struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
- // Required. The operation to perform.
- //
- // Types that are assignable to Operation:
- //
- // *Mutation_Insert
- // *Mutation_Update
- // *Mutation_InsertOrUpdate
- // *Mutation_Replace
- // *Mutation_Delete_
- Operation isMutation_Operation `protobuf_oneof:"operation"`
-}
-
-func (x *Mutation) Reset() {
- *x = Mutation{}
- if protoimpl.UnsafeEnabled {
- mi := &file_google_spanner_v1_mutation_proto_msgTypes[0]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
-}
-
-func (x *Mutation) String() string {
- return protoimpl.X.MessageStringOf(x)
-}
-
-func (*Mutation) ProtoMessage() {}
-
-func (x *Mutation) ProtoReflect() protoreflect.Message {
- mi := &file_google_spanner_v1_mutation_proto_msgTypes[0]
- if protoimpl.UnsafeEnabled && x != nil {
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- if ms.LoadMessageInfo() == nil {
- ms.StoreMessageInfo(mi)
- }
- return ms
- }
- return mi.MessageOf(x)
-}
-
-// Deprecated: Use Mutation.ProtoReflect.Descriptor instead.
-func (*Mutation) Descriptor() ([]byte, []int) {
- return file_google_spanner_v1_mutation_proto_rawDescGZIP(), []int{0}
-}
-
-func (m *Mutation) GetOperation() isMutation_Operation {
- if m != nil {
- return m.Operation
- }
- return nil
-}
-
-func (x *Mutation) GetInsert() *Mutation_Write {
- if x, ok := x.GetOperation().(*Mutation_Insert); ok {
- return x.Insert
- }
- return nil
-}
-
-func (x *Mutation) GetUpdate() *Mutation_Write {
- if x, ok := x.GetOperation().(*Mutation_Update); ok {
- return x.Update
- }
- return nil
-}
-
-func (x *Mutation) GetInsertOrUpdate() *Mutation_Write {
- if x, ok := x.GetOperation().(*Mutation_InsertOrUpdate); ok {
- return x.InsertOrUpdate
- }
- return nil
-}
-
-func (x *Mutation) GetReplace() *Mutation_Write {
- if x, ok := x.GetOperation().(*Mutation_Replace); ok {
- return x.Replace
- }
- return nil
-}
-
-func (x *Mutation) GetDelete() *Mutation_Delete {
- if x, ok := x.GetOperation().(*Mutation_Delete_); ok {
- return x.Delete
- }
- return nil
-}
-
-type isMutation_Operation interface {
- isMutation_Operation()
-}
-
-type Mutation_Insert struct {
- // Insert new rows in a table. If any of the rows already exist,
- // the write or transaction fails with error `ALREADY_EXISTS`.
- Insert *Mutation_Write `protobuf:"bytes,1,opt,name=insert,proto3,oneof"`
-}
-
-type Mutation_Update struct {
- // Update existing rows in a table. If any of the rows does not
- // already exist, the transaction fails with error `NOT_FOUND`.
- Update *Mutation_Write `protobuf:"bytes,2,opt,name=update,proto3,oneof"`
-}
-
-type Mutation_InsertOrUpdate struct {
- // Like [insert][google.spanner.v1.Mutation.insert], except that if the row already exists, then
- // its column values are overwritten with the ones provided. Any
- // column values not explicitly written are preserved.
- //
- // When using [insert_or_update][google.spanner.v1.Mutation.insert_or_update], just as when using [insert][google.spanner.v1.Mutation.insert], all `NOT
- // NULL` columns in the table must be given a value. This holds true
- // even when the row already exists and will therefore actually be updated.
- InsertOrUpdate *Mutation_Write `protobuf:"bytes,3,opt,name=insert_or_update,json=insertOrUpdate,proto3,oneof"`
-}
-
-type Mutation_Replace struct {
- // Like [insert][google.spanner.v1.Mutation.insert], except that if the row already exists, it is
- // deleted, and the column values provided are inserted
- // instead. Unlike [insert_or_update][google.spanner.v1.Mutation.insert_or_update], this means any values not
- // explicitly written become `NULL`.
- //
- // In an interleaved table, if you create the child table with the
- // `ON DELETE CASCADE` annotation, then replacing a parent row
- // also deletes the child rows. Otherwise, you must delete the
- // child rows before you replace the parent row.
- Replace *Mutation_Write `protobuf:"bytes,4,opt,name=replace,proto3,oneof"`
-}
-
-type Mutation_Delete_ struct {
- // Delete rows from a table. Succeeds whether or not the named
- // rows were present.
- Delete *Mutation_Delete `protobuf:"bytes,5,opt,name=delete,proto3,oneof"`
-}
-
-func (*Mutation_Insert) isMutation_Operation() {}
-
-func (*Mutation_Update) isMutation_Operation() {}
-
-func (*Mutation_InsertOrUpdate) isMutation_Operation() {}
-
-func (*Mutation_Replace) isMutation_Operation() {}
-
-func (*Mutation_Delete_) isMutation_Operation() {}
-
-// Arguments to [insert][google.spanner.v1.Mutation.insert], [update][google.spanner.v1.Mutation.update], [insert_or_update][google.spanner.v1.Mutation.insert_or_update], and
-// [replace][google.spanner.v1.Mutation.replace] operations.
-type Mutation_Write struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
- // Required. The table whose rows will be written.
- Table string `protobuf:"bytes,1,opt,name=table,proto3" json:"table,omitempty"`
- // The names of the columns in [table][google.spanner.v1.Mutation.Write.table] to be written.
- //
- // The list of columns must contain enough columns to allow
- // Cloud Spanner to derive values for all primary key columns in the
- // row(s) to be modified.
- Columns []string `protobuf:"bytes,2,rep,name=columns,proto3" json:"columns,omitempty"`
- // The values to be written. `values` can contain more than one
- // list of values. If it does, then multiple rows are written, one
- // for each entry in `values`. Each list in `values` must have
- // exactly as many entries as there are entries in [columns][google.spanner.v1.Mutation.Write.columns]
- // above. Sending multiple lists is equivalent to sending multiple
- // `Mutation`s, each containing one `values` entry and repeating
- // [table][google.spanner.v1.Mutation.Write.table] and [columns][google.spanner.v1.Mutation.Write.columns]. Individual values in each list are
- // encoded as described [here][google.spanner.v1.TypeCode].
- Values []*structpb.ListValue `protobuf:"bytes,3,rep,name=values,proto3" json:"values,omitempty"`
-}
-
-func (x *Mutation_Write) Reset() {
- *x = Mutation_Write{}
- if protoimpl.UnsafeEnabled {
- mi := &file_google_spanner_v1_mutation_proto_msgTypes[1]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
-}
-
-func (x *Mutation_Write) String() string {
- return protoimpl.X.MessageStringOf(x)
-}
-
-func (*Mutation_Write) ProtoMessage() {}
-
-func (x *Mutation_Write) ProtoReflect() protoreflect.Message {
- mi := &file_google_spanner_v1_mutation_proto_msgTypes[1]
- if protoimpl.UnsafeEnabled && x != nil {
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- if ms.LoadMessageInfo() == nil {
- ms.StoreMessageInfo(mi)
- }
- return ms
- }
- return mi.MessageOf(x)
-}
-
-// Deprecated: Use Mutation_Write.ProtoReflect.Descriptor instead.
-func (*Mutation_Write) Descriptor() ([]byte, []int) {
- return file_google_spanner_v1_mutation_proto_rawDescGZIP(), []int{0, 0}
-}
-
-func (x *Mutation_Write) GetTable() string {
- if x != nil {
- return x.Table
- }
- return ""
-}
-
-func (x *Mutation_Write) GetColumns() []string {
- if x != nil {
- return x.Columns
- }
- return nil
-}
-
-func (x *Mutation_Write) GetValues() []*structpb.ListValue {
- if x != nil {
- return x.Values
- }
- return nil
-}
-
-// Arguments to [delete][google.spanner.v1.Mutation.delete] operations.
-type Mutation_Delete struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
- // Required. The table whose rows will be deleted.
- Table string `protobuf:"bytes,1,opt,name=table,proto3" json:"table,omitempty"`
- // Required. The primary keys of the rows within [table][google.spanner.v1.Mutation.Delete.table] to delete. The
- // primary keys must be specified in the order in which they appear in the
- // `PRIMARY KEY()` clause of the table's equivalent DDL statement (the DDL
- // statement used to create the table).
- // Delete is idempotent. The transaction will succeed even if some or all
- // rows do not exist.
- KeySet *KeySet `protobuf:"bytes,2,opt,name=key_set,json=keySet,proto3" json:"key_set,omitempty"`
-}
-
-func (x *Mutation_Delete) Reset() {
- *x = Mutation_Delete{}
- if protoimpl.UnsafeEnabled {
- mi := &file_google_spanner_v1_mutation_proto_msgTypes[2]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
-}
-
-func (x *Mutation_Delete) String() string {
- return protoimpl.X.MessageStringOf(x)
-}
-
-func (*Mutation_Delete) ProtoMessage() {}
-
-func (x *Mutation_Delete) ProtoReflect() protoreflect.Message {
- mi := &file_google_spanner_v1_mutation_proto_msgTypes[2]
- if protoimpl.UnsafeEnabled && x != nil {
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- if ms.LoadMessageInfo() == nil {
- ms.StoreMessageInfo(mi)
- }
- return ms
- }
- return mi.MessageOf(x)
-}
-
-// Deprecated: Use Mutation_Delete.ProtoReflect.Descriptor instead.
-func (*Mutation_Delete) Descriptor() ([]byte, []int) {
- return file_google_spanner_v1_mutation_proto_rawDescGZIP(), []int{0, 1}
-}
-
-func (x *Mutation_Delete) GetTable() string {
- if x != nil {
- return x.Table
- }
- return ""
-}
-
-func (x *Mutation_Delete) GetKeySet() *KeySet {
- if x != nil {
- return x.KeySet
- }
- return nil
-}
-
-var File_google_spanner_v1_mutation_proto protoreflect.FileDescriptor
-
-var file_google_spanner_v1_mutation_proto_rawDesc = []byte{
- 0x0a, 0x20, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x73, 0x70, 0x61, 0x6e, 0x6e, 0x65, 0x72,
- 0x2f, 0x76, 0x31, 0x2f, 0x6d, 0x75, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x70, 0x72, 0x6f,
- 0x74, 0x6f, 0x12, 0x11, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x70, 0x61, 0x6e, 0x6e,
- 0x65, 0x72, 0x2e, 0x76, 0x31, 0x1a, 0x1f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x61, 0x70,
- 0x69, 0x2f, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x5f, 0x62, 0x65, 0x68, 0x61, 0x76, 0x69, 0x6f, 0x72,
- 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1c, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70,
- 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x73, 0x74, 0x72, 0x75, 0x63, 0x74, 0x2e, 0x70,
- 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1c, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x73, 0x70, 0x61,
- 0x6e, 0x6e, 0x65, 0x72, 0x2f, 0x76, 0x31, 0x2f, 0x6b, 0x65, 0x79, 0x73, 0x2e, 0x70, 0x72, 0x6f,
- 0x74, 0x6f, 0x22, 0xad, 0x04, 0x0a, 0x08, 0x4d, 0x75, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12,
- 0x3b, 0x0a, 0x06, 0x69, 0x6e, 0x73, 0x65, 0x72, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32,
- 0x21, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x70, 0x61, 0x6e, 0x6e, 0x65, 0x72,
- 0x2e, 0x76, 0x31, 0x2e, 0x4d, 0x75, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x57, 0x72, 0x69,
- 0x74, 0x65, 0x48, 0x00, 0x52, 0x06, 0x69, 0x6e, 0x73, 0x65, 0x72, 0x74, 0x12, 0x3b, 0x0a, 0x06,
- 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x21, 0x2e, 0x67,
- 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x70, 0x61, 0x6e, 0x6e, 0x65, 0x72, 0x2e, 0x76, 0x31,
- 0x2e, 0x4d, 0x75, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x57, 0x72, 0x69, 0x74, 0x65, 0x48,
- 0x00, 0x52, 0x06, 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, 0x12, 0x4d, 0x0a, 0x10, 0x69, 0x6e, 0x73,
- 0x65, 0x72, 0x74, 0x5f, 0x6f, 0x72, 0x5f, 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, 0x18, 0x03, 0x20,
- 0x01, 0x28, 0x0b, 0x32, 0x21, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x70, 0x61,
- 0x6e, 0x6e, 0x65, 0x72, 0x2e, 0x76, 0x31, 0x2e, 0x4d, 0x75, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e,
- 0x2e, 0x57, 0x72, 0x69, 0x74, 0x65, 0x48, 0x00, 0x52, 0x0e, 0x69, 0x6e, 0x73, 0x65, 0x72, 0x74,
- 0x4f, 0x72, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x12, 0x3d, 0x0a, 0x07, 0x72, 0x65, 0x70, 0x6c,
- 0x61, 0x63, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x21, 0x2e, 0x67, 0x6f, 0x6f, 0x67,
- 0x6c, 0x65, 0x2e, 0x73, 0x70, 0x61, 0x6e, 0x6e, 0x65, 0x72, 0x2e, 0x76, 0x31, 0x2e, 0x4d, 0x75,
- 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x57, 0x72, 0x69, 0x74, 0x65, 0x48, 0x00, 0x52, 0x07,
- 0x72, 0x65, 0x70, 0x6c, 0x61, 0x63, 0x65, 0x12, 0x3c, 0x0a, 0x06, 0x64, 0x65, 0x6c, 0x65, 0x74,
- 0x65, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65,
- 0x2e, 0x73, 0x70, 0x61, 0x6e, 0x6e, 0x65, 0x72, 0x2e, 0x76, 0x31, 0x2e, 0x4d, 0x75, 0x74, 0x61,
- 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x48, 0x00, 0x52, 0x06, 0x64,
- 0x65, 0x6c, 0x65, 0x74, 0x65, 0x1a, 0x70, 0x0a, 0x05, 0x57, 0x72, 0x69, 0x74, 0x65, 0x12, 0x19,
- 0x0a, 0x05, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0,
- 0x41, 0x02, 0x52, 0x05, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x12, 0x18, 0x0a, 0x07, 0x63, 0x6f, 0x6c,
- 0x75, 0x6d, 0x6e, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x09, 0x52, 0x07, 0x63, 0x6f, 0x6c, 0x75,
- 0x6d, 0x6e, 0x73, 0x12, 0x32, 0x0a, 0x06, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x73, 0x18, 0x03, 0x20,
- 0x03, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f,
- 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52,
- 0x06, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x73, 0x1a, 0x5c, 0x0a, 0x06, 0x44, 0x65, 0x6c, 0x65, 0x74,
- 0x65, 0x12, 0x19, 0x0a, 0x05, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09,
- 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x05, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x12, 0x37, 0x0a, 0x07,
- 0x6b, 0x65, 0x79, 0x5f, 0x73, 0x65, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e,
- 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x70, 0x61, 0x6e, 0x6e, 0x65, 0x72, 0x2e, 0x76,
- 0x31, 0x2e, 0x4b, 0x65, 0x79, 0x53, 0x65, 0x74, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x06, 0x6b,
- 0x65, 0x79, 0x53, 0x65, 0x74, 0x42, 0x0b, 0x0a, 0x09, 0x6f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x69,
- 0x6f, 0x6e, 0x42, 0xb0, 0x01, 0x0a, 0x15, 0x63, 0x6f, 0x6d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c,
- 0x65, 0x2e, 0x73, 0x70, 0x61, 0x6e, 0x6e, 0x65, 0x72, 0x2e, 0x76, 0x31, 0x42, 0x0d, 0x4d, 0x75,
- 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x35, 0x63,
- 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x63, 0x6f, 0x6d, 0x2f,
- 0x67, 0x6f, 0x2f, 0x73, 0x70, 0x61, 0x6e, 0x6e, 0x65, 0x72, 0x2f, 0x61, 0x70, 0x69, 0x76, 0x31,
- 0x2f, 0x73, 0x70, 0x61, 0x6e, 0x6e, 0x65, 0x72, 0x70, 0x62, 0x3b, 0x73, 0x70, 0x61, 0x6e, 0x6e,
- 0x65, 0x72, 0x70, 0x62, 0xaa, 0x02, 0x17, 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x43, 0x6c,
- 0x6f, 0x75, 0x64, 0x2e, 0x53, 0x70, 0x61, 0x6e, 0x6e, 0x65, 0x72, 0x2e, 0x56, 0x31, 0xca, 0x02,
- 0x17, 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x5c, 0x43, 0x6c, 0x6f, 0x75, 0x64, 0x5c, 0x53, 0x70,
- 0x61, 0x6e, 0x6e, 0x65, 0x72, 0x5c, 0x56, 0x31, 0xea, 0x02, 0x1a, 0x47, 0x6f, 0x6f, 0x67, 0x6c,
- 0x65, 0x3a, 0x3a, 0x43, 0x6c, 0x6f, 0x75, 0x64, 0x3a, 0x3a, 0x53, 0x70, 0x61, 0x6e, 0x6e, 0x65,
- 0x72, 0x3a, 0x3a, 0x56, 0x31, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33,
-}
-
-var (
- file_google_spanner_v1_mutation_proto_rawDescOnce sync.Once
- file_google_spanner_v1_mutation_proto_rawDescData = file_google_spanner_v1_mutation_proto_rawDesc
-)
-
-func file_google_spanner_v1_mutation_proto_rawDescGZIP() []byte {
- file_google_spanner_v1_mutation_proto_rawDescOnce.Do(func() {
- file_google_spanner_v1_mutation_proto_rawDescData = protoimpl.X.CompressGZIP(file_google_spanner_v1_mutation_proto_rawDescData)
- })
- return file_google_spanner_v1_mutation_proto_rawDescData
-}
-
-var file_google_spanner_v1_mutation_proto_msgTypes = make([]protoimpl.MessageInfo, 3)
-var file_google_spanner_v1_mutation_proto_goTypes = []any{
- (*Mutation)(nil), // 0: google.spanner.v1.Mutation
- (*Mutation_Write)(nil), // 1: google.spanner.v1.Mutation.Write
- (*Mutation_Delete)(nil), // 2: google.spanner.v1.Mutation.Delete
- (*structpb.ListValue)(nil), // 3: google.protobuf.ListValue
- (*KeySet)(nil), // 4: google.spanner.v1.KeySet
-}
-var file_google_spanner_v1_mutation_proto_depIdxs = []int32{
- 1, // 0: google.spanner.v1.Mutation.insert:type_name -> google.spanner.v1.Mutation.Write
- 1, // 1: google.spanner.v1.Mutation.update:type_name -> google.spanner.v1.Mutation.Write
- 1, // 2: google.spanner.v1.Mutation.insert_or_update:type_name -> google.spanner.v1.Mutation.Write
- 1, // 3: google.spanner.v1.Mutation.replace:type_name -> google.spanner.v1.Mutation.Write
- 2, // 4: google.spanner.v1.Mutation.delete:type_name -> google.spanner.v1.Mutation.Delete
- 3, // 5: google.spanner.v1.Mutation.Write.values:type_name -> google.protobuf.ListValue
- 4, // 6: google.spanner.v1.Mutation.Delete.key_set:type_name -> google.spanner.v1.KeySet
- 7, // [7:7] is the sub-list for method output_type
- 7, // [7:7] is the sub-list for method input_type
- 7, // [7:7] is the sub-list for extension type_name
- 7, // [7:7] is the sub-list for extension extendee
- 0, // [0:7] is the sub-list for field type_name
-}
-
-func init() { file_google_spanner_v1_mutation_proto_init() }
-func file_google_spanner_v1_mutation_proto_init() {
- if File_google_spanner_v1_mutation_proto != nil {
- return
- }
- file_google_spanner_v1_keys_proto_init()
- if !protoimpl.UnsafeEnabled {
- file_google_spanner_v1_mutation_proto_msgTypes[0].Exporter = func(v any, i int) any {
- switch v := v.(*Mutation); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_google_spanner_v1_mutation_proto_msgTypes[1].Exporter = func(v any, i int) any {
- switch v := v.(*Mutation_Write); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_google_spanner_v1_mutation_proto_msgTypes[2].Exporter = func(v any, i int) any {
- switch v := v.(*Mutation_Delete); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- }
- file_google_spanner_v1_mutation_proto_msgTypes[0].OneofWrappers = []any{
- (*Mutation_Insert)(nil),
- (*Mutation_Update)(nil),
- (*Mutation_InsertOrUpdate)(nil),
- (*Mutation_Replace)(nil),
- (*Mutation_Delete_)(nil),
- }
- type x struct{}
- out := protoimpl.TypeBuilder{
- File: protoimpl.DescBuilder{
- GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
- RawDescriptor: file_google_spanner_v1_mutation_proto_rawDesc,
- NumEnums: 0,
- NumMessages: 3,
- NumExtensions: 0,
- NumServices: 0,
- },
- GoTypes: file_google_spanner_v1_mutation_proto_goTypes,
- DependencyIndexes: file_google_spanner_v1_mutation_proto_depIdxs,
- MessageInfos: file_google_spanner_v1_mutation_proto_msgTypes,
- }.Build()
- File_google_spanner_v1_mutation_proto = out.File
- file_google_spanner_v1_mutation_proto_rawDesc = nil
- file_google_spanner_v1_mutation_proto_goTypes = nil
- file_google_spanner_v1_mutation_proto_depIdxs = nil
-}
diff --git a/vendor/cloud.google.com/go/spanner/apiv1/spannerpb/query_plan.pb.go b/vendor/cloud.google.com/go/spanner/apiv1/spannerpb/query_plan.pb.go
deleted file mode 100644
index fe4411080..000000000
--- a/vendor/cloud.google.com/go/spanner/apiv1/spannerpb/query_plan.pb.go
+++ /dev/null
@@ -1,595 +0,0 @@
-// Copyright 2024 Google LLC
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-// Code generated by protoc-gen-go. DO NOT EDIT.
-// versions:
-// protoc-gen-go v1.34.2
-// protoc v4.25.3
-// source: google/spanner/v1/query_plan.proto
-
-package spannerpb
-
-import (
- reflect "reflect"
- sync "sync"
-
- protoreflect "google.golang.org/protobuf/reflect/protoreflect"
- protoimpl "google.golang.org/protobuf/runtime/protoimpl"
- structpb "google.golang.org/protobuf/types/known/structpb"
-)
-
-const (
- // Verify that this generated code is sufficiently up-to-date.
- _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion)
- // Verify that runtime/protoimpl is sufficiently up-to-date.
- _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20)
-)
-
-// The kind of [PlanNode][google.spanner.v1.PlanNode]. Distinguishes between the two different kinds of
-// nodes that can appear in a query plan.
-type PlanNode_Kind int32
-
-const (
- // Not specified.
- PlanNode_KIND_UNSPECIFIED PlanNode_Kind = 0
- // Denotes a Relational operator node in the expression tree. Relational
- // operators represent iterative processing of rows during query execution.
- // For example, a `TableScan` operation that reads rows from a table.
- PlanNode_RELATIONAL PlanNode_Kind = 1
- // Denotes a Scalar node in the expression tree. Scalar nodes represent
- // non-iterable entities in the query plan. For example, constants or
- // arithmetic operators appearing inside predicate expressions or references
- // to column names.
- PlanNode_SCALAR PlanNode_Kind = 2
-)
-
-// Enum value maps for PlanNode_Kind.
-var (
- PlanNode_Kind_name = map[int32]string{
- 0: "KIND_UNSPECIFIED",
- 1: "RELATIONAL",
- 2: "SCALAR",
- }
- PlanNode_Kind_value = map[string]int32{
- "KIND_UNSPECIFIED": 0,
- "RELATIONAL": 1,
- "SCALAR": 2,
- }
-)
-
-func (x PlanNode_Kind) Enum() *PlanNode_Kind {
- p := new(PlanNode_Kind)
- *p = x
- return p
-}
-
-func (x PlanNode_Kind) String() string {
- return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x))
-}
-
-func (PlanNode_Kind) Descriptor() protoreflect.EnumDescriptor {
- return file_google_spanner_v1_query_plan_proto_enumTypes[0].Descriptor()
-}
-
-func (PlanNode_Kind) Type() protoreflect.EnumType {
- return &file_google_spanner_v1_query_plan_proto_enumTypes[0]
-}
-
-func (x PlanNode_Kind) Number() protoreflect.EnumNumber {
- return protoreflect.EnumNumber(x)
-}
-
-// Deprecated: Use PlanNode_Kind.Descriptor instead.
-func (PlanNode_Kind) EnumDescriptor() ([]byte, []int) {
- return file_google_spanner_v1_query_plan_proto_rawDescGZIP(), []int{0, 0}
-}
-
-// Node information for nodes appearing in a [QueryPlan.plan_nodes][google.spanner.v1.QueryPlan.plan_nodes].
-type PlanNode struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
- // The `PlanNode`'s index in [node list][google.spanner.v1.QueryPlan.plan_nodes].
- Index int32 `protobuf:"varint,1,opt,name=index,proto3" json:"index,omitempty"`
- // Used to determine the type of node. May be needed for visualizing
- // different kinds of nodes differently. For example, If the node is a
- // [SCALAR][google.spanner.v1.PlanNode.Kind.SCALAR] node, it will have a condensed representation
- // which can be used to directly embed a description of the node in its
- // parent.
- Kind PlanNode_Kind `protobuf:"varint,2,opt,name=kind,proto3,enum=google.spanner.v1.PlanNode_Kind" json:"kind,omitempty"`
- // The display name for the node.
- DisplayName string `protobuf:"bytes,3,opt,name=display_name,json=displayName,proto3" json:"display_name,omitempty"`
- // List of child node `index`es and their relationship to this parent.
- ChildLinks []*PlanNode_ChildLink `protobuf:"bytes,4,rep,name=child_links,json=childLinks,proto3" json:"child_links,omitempty"`
- // Condensed representation for [SCALAR][google.spanner.v1.PlanNode.Kind.SCALAR] nodes.
- ShortRepresentation *PlanNode_ShortRepresentation `protobuf:"bytes,5,opt,name=short_representation,json=shortRepresentation,proto3" json:"short_representation,omitempty"`
- // Attributes relevant to the node contained in a group of key-value pairs.
- // For example, a Parameter Reference node could have the following
- // information in its metadata:
- //
- // {
- // "parameter_reference": "param1",
- // "parameter_type": "array"
- // }
- Metadata *structpb.Struct `protobuf:"bytes,6,opt,name=metadata,proto3" json:"metadata,omitempty"`
- // The execution statistics associated with the node, contained in a group of
- // key-value pairs. Only present if the plan was returned as a result of a
- // profile query. For example, number of executions, number of rows/time per
- // execution etc.
- ExecutionStats *structpb.Struct `protobuf:"bytes,7,opt,name=execution_stats,json=executionStats,proto3" json:"execution_stats,omitempty"`
-}
-
-func (x *PlanNode) Reset() {
- *x = PlanNode{}
- if protoimpl.UnsafeEnabled {
- mi := &file_google_spanner_v1_query_plan_proto_msgTypes[0]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
-}
-
-func (x *PlanNode) String() string {
- return protoimpl.X.MessageStringOf(x)
-}
-
-func (*PlanNode) ProtoMessage() {}
-
-func (x *PlanNode) ProtoReflect() protoreflect.Message {
- mi := &file_google_spanner_v1_query_plan_proto_msgTypes[0]
- if protoimpl.UnsafeEnabled && x != nil {
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- if ms.LoadMessageInfo() == nil {
- ms.StoreMessageInfo(mi)
- }
- return ms
- }
- return mi.MessageOf(x)
-}
-
-// Deprecated: Use PlanNode.ProtoReflect.Descriptor instead.
-func (*PlanNode) Descriptor() ([]byte, []int) {
- return file_google_spanner_v1_query_plan_proto_rawDescGZIP(), []int{0}
-}
-
-func (x *PlanNode) GetIndex() int32 {
- if x != nil {
- return x.Index
- }
- return 0
-}
-
-func (x *PlanNode) GetKind() PlanNode_Kind {
- if x != nil {
- return x.Kind
- }
- return PlanNode_KIND_UNSPECIFIED
-}
-
-func (x *PlanNode) GetDisplayName() string {
- if x != nil {
- return x.DisplayName
- }
- return ""
-}
-
-func (x *PlanNode) GetChildLinks() []*PlanNode_ChildLink {
- if x != nil {
- return x.ChildLinks
- }
- return nil
-}
-
-func (x *PlanNode) GetShortRepresentation() *PlanNode_ShortRepresentation {
- if x != nil {
- return x.ShortRepresentation
- }
- return nil
-}
-
-func (x *PlanNode) GetMetadata() *structpb.Struct {
- if x != nil {
- return x.Metadata
- }
- return nil
-}
-
-func (x *PlanNode) GetExecutionStats() *structpb.Struct {
- if x != nil {
- return x.ExecutionStats
- }
- return nil
-}
-
-// Contains an ordered list of nodes appearing in the query plan.
-type QueryPlan struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
- // The nodes in the query plan. Plan nodes are returned in pre-order starting
- // with the plan root. Each [PlanNode][google.spanner.v1.PlanNode]'s `id` corresponds to its index in
- // `plan_nodes`.
- PlanNodes []*PlanNode `protobuf:"bytes,1,rep,name=plan_nodes,json=planNodes,proto3" json:"plan_nodes,omitempty"`
-}
-
-func (x *QueryPlan) Reset() {
- *x = QueryPlan{}
- if protoimpl.UnsafeEnabled {
- mi := &file_google_spanner_v1_query_plan_proto_msgTypes[1]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
-}
-
-func (x *QueryPlan) String() string {
- return protoimpl.X.MessageStringOf(x)
-}
-
-func (*QueryPlan) ProtoMessage() {}
-
-func (x *QueryPlan) ProtoReflect() protoreflect.Message {
- mi := &file_google_spanner_v1_query_plan_proto_msgTypes[1]
- if protoimpl.UnsafeEnabled && x != nil {
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- if ms.LoadMessageInfo() == nil {
- ms.StoreMessageInfo(mi)
- }
- return ms
- }
- return mi.MessageOf(x)
-}
-
-// Deprecated: Use QueryPlan.ProtoReflect.Descriptor instead.
-func (*QueryPlan) Descriptor() ([]byte, []int) {
- return file_google_spanner_v1_query_plan_proto_rawDescGZIP(), []int{1}
-}
-
-func (x *QueryPlan) GetPlanNodes() []*PlanNode {
- if x != nil {
- return x.PlanNodes
- }
- return nil
-}
-
-// Metadata associated with a parent-child relationship appearing in a
-// [PlanNode][google.spanner.v1.PlanNode].
-type PlanNode_ChildLink struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
- // The node to which the link points.
- ChildIndex int32 `protobuf:"varint,1,opt,name=child_index,json=childIndex,proto3" json:"child_index,omitempty"`
- // The type of the link. For example, in Hash Joins this could be used to
- // distinguish between the build child and the probe child, or in the case
- // of the child being an output variable, to represent the tag associated
- // with the output variable.
- Type string `protobuf:"bytes,2,opt,name=type,proto3" json:"type,omitempty"`
- // Only present if the child node is [SCALAR][google.spanner.v1.PlanNode.Kind.SCALAR] and corresponds
- // to an output variable of the parent node. The field carries the name of
- // the output variable.
- // For example, a `TableScan` operator that reads rows from a table will
- // have child links to the `SCALAR` nodes representing the output variables
- // created for each column that is read by the operator. The corresponding
- // `variable` fields will be set to the variable names assigned to the
- // columns.
- Variable string `protobuf:"bytes,3,opt,name=variable,proto3" json:"variable,omitempty"`
-}
-
-func (x *PlanNode_ChildLink) Reset() {
- *x = PlanNode_ChildLink{}
- if protoimpl.UnsafeEnabled {
- mi := &file_google_spanner_v1_query_plan_proto_msgTypes[2]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
-}
-
-func (x *PlanNode_ChildLink) String() string {
- return protoimpl.X.MessageStringOf(x)
-}
-
-func (*PlanNode_ChildLink) ProtoMessage() {}
-
-func (x *PlanNode_ChildLink) ProtoReflect() protoreflect.Message {
- mi := &file_google_spanner_v1_query_plan_proto_msgTypes[2]
- if protoimpl.UnsafeEnabled && x != nil {
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- if ms.LoadMessageInfo() == nil {
- ms.StoreMessageInfo(mi)
- }
- return ms
- }
- return mi.MessageOf(x)
-}
-
-// Deprecated: Use PlanNode_ChildLink.ProtoReflect.Descriptor instead.
-func (*PlanNode_ChildLink) Descriptor() ([]byte, []int) {
- return file_google_spanner_v1_query_plan_proto_rawDescGZIP(), []int{0, 0}
-}
-
-func (x *PlanNode_ChildLink) GetChildIndex() int32 {
- if x != nil {
- return x.ChildIndex
- }
- return 0
-}
-
-func (x *PlanNode_ChildLink) GetType() string {
- if x != nil {
- return x.Type
- }
- return ""
-}
-
-func (x *PlanNode_ChildLink) GetVariable() string {
- if x != nil {
- return x.Variable
- }
- return ""
-}
-
-// Condensed representation of a node and its subtree. Only present for
-// `SCALAR` [PlanNode(s)][google.spanner.v1.PlanNode].
-type PlanNode_ShortRepresentation struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
- // A string representation of the expression subtree rooted at this node.
- Description string `protobuf:"bytes,1,opt,name=description,proto3" json:"description,omitempty"`
- // A mapping of (subquery variable name) -> (subquery node id) for cases
- // where the `description` string of this node references a `SCALAR`
- // subquery contained in the expression subtree rooted at this node. The
- // referenced `SCALAR` subquery may not necessarily be a direct child of
- // this node.
- Subqueries map[string]int32 `protobuf:"bytes,2,rep,name=subqueries,proto3" json:"subqueries,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"varint,2,opt,name=value,proto3"`
-}
-
-func (x *PlanNode_ShortRepresentation) Reset() {
- *x = PlanNode_ShortRepresentation{}
- if protoimpl.UnsafeEnabled {
- mi := &file_google_spanner_v1_query_plan_proto_msgTypes[3]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
-}
-
-func (x *PlanNode_ShortRepresentation) String() string {
- return protoimpl.X.MessageStringOf(x)
-}
-
-func (*PlanNode_ShortRepresentation) ProtoMessage() {}
-
-func (x *PlanNode_ShortRepresentation) ProtoReflect() protoreflect.Message {
- mi := &file_google_spanner_v1_query_plan_proto_msgTypes[3]
- if protoimpl.UnsafeEnabled && x != nil {
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- if ms.LoadMessageInfo() == nil {
- ms.StoreMessageInfo(mi)
- }
- return ms
- }
- return mi.MessageOf(x)
-}
-
-// Deprecated: Use PlanNode_ShortRepresentation.ProtoReflect.Descriptor instead.
-func (*PlanNode_ShortRepresentation) Descriptor() ([]byte, []int) {
- return file_google_spanner_v1_query_plan_proto_rawDescGZIP(), []int{0, 1}
-}
-
-func (x *PlanNode_ShortRepresentation) GetDescription() string {
- if x != nil {
- return x.Description
- }
- return ""
-}
-
-func (x *PlanNode_ShortRepresentation) GetSubqueries() map[string]int32 {
- if x != nil {
- return x.Subqueries
- }
- return nil
-}
-
-var File_google_spanner_v1_query_plan_proto protoreflect.FileDescriptor
-
-var file_google_spanner_v1_query_plan_proto_rawDesc = []byte{
- 0x0a, 0x22, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x73, 0x70, 0x61, 0x6e, 0x6e, 0x65, 0x72,
- 0x2f, 0x76, 0x31, 0x2f, 0x71, 0x75, 0x65, 0x72, 0x79, 0x5f, 0x70, 0x6c, 0x61, 0x6e, 0x2e, 0x70,
- 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x11, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x70, 0x61,
- 0x6e, 0x6e, 0x65, 0x72, 0x2e, 0x76, 0x31, 0x1a, 0x1c, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f,
- 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x73, 0x74, 0x72, 0x75, 0x63, 0x74, 0x2e,
- 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0x8e, 0x06, 0x0a, 0x08, 0x50, 0x6c, 0x61, 0x6e, 0x4e, 0x6f,
- 0x64, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x69, 0x6e, 0x64, 0x65, 0x78, 0x18, 0x01, 0x20, 0x01, 0x28,
- 0x05, 0x52, 0x05, 0x69, 0x6e, 0x64, 0x65, 0x78, 0x12, 0x34, 0x0a, 0x04, 0x6b, 0x69, 0x6e, 0x64,
- 0x18, 0x02, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x20, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e,
- 0x73, 0x70, 0x61, 0x6e, 0x6e, 0x65, 0x72, 0x2e, 0x76, 0x31, 0x2e, 0x50, 0x6c, 0x61, 0x6e, 0x4e,
- 0x6f, 0x64, 0x65, 0x2e, 0x4b, 0x69, 0x6e, 0x64, 0x52, 0x04, 0x6b, 0x69, 0x6e, 0x64, 0x12, 0x21,
- 0x0a, 0x0c, 0x64, 0x69, 0x73, 0x70, 0x6c, 0x61, 0x79, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x03,
- 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x64, 0x69, 0x73, 0x70, 0x6c, 0x61, 0x79, 0x4e, 0x61, 0x6d,
- 0x65, 0x12, 0x46, 0x0a, 0x0b, 0x63, 0x68, 0x69, 0x6c, 0x64, 0x5f, 0x6c, 0x69, 0x6e, 0x6b, 0x73,
- 0x18, 0x04, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x25, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e,
- 0x73, 0x70, 0x61, 0x6e, 0x6e, 0x65, 0x72, 0x2e, 0x76, 0x31, 0x2e, 0x50, 0x6c, 0x61, 0x6e, 0x4e,
- 0x6f, 0x64, 0x65, 0x2e, 0x43, 0x68, 0x69, 0x6c, 0x64, 0x4c, 0x69, 0x6e, 0x6b, 0x52, 0x0a, 0x63,
- 0x68, 0x69, 0x6c, 0x64, 0x4c, 0x69, 0x6e, 0x6b, 0x73, 0x12, 0x62, 0x0a, 0x14, 0x73, 0x68, 0x6f,
- 0x72, 0x74, 0x5f, 0x72, 0x65, 0x70, 0x72, 0x65, 0x73, 0x65, 0x6e, 0x74, 0x61, 0x74, 0x69, 0x6f,
- 0x6e, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2f, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65,
- 0x2e, 0x73, 0x70, 0x61, 0x6e, 0x6e, 0x65, 0x72, 0x2e, 0x76, 0x31, 0x2e, 0x50, 0x6c, 0x61, 0x6e,
- 0x4e, 0x6f, 0x64, 0x65, 0x2e, 0x53, 0x68, 0x6f, 0x72, 0x74, 0x52, 0x65, 0x70, 0x72, 0x65, 0x73,
- 0x65, 0x6e, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x13, 0x73, 0x68, 0x6f, 0x72, 0x74, 0x52,
- 0x65, 0x70, 0x72, 0x65, 0x73, 0x65, 0x6e, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x33, 0x0a,
- 0x08, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0b, 0x32,
- 0x17, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75,
- 0x66, 0x2e, 0x53, 0x74, 0x72, 0x75, 0x63, 0x74, 0x52, 0x08, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61,
- 0x74, 0x61, 0x12, 0x40, 0x0a, 0x0f, 0x65, 0x78, 0x65, 0x63, 0x75, 0x74, 0x69, 0x6f, 0x6e, 0x5f,
- 0x73, 0x74, 0x61, 0x74, 0x73, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x67, 0x6f,
- 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x53, 0x74,
- 0x72, 0x75, 0x63, 0x74, 0x52, 0x0e, 0x65, 0x78, 0x65, 0x63, 0x75, 0x74, 0x69, 0x6f, 0x6e, 0x53,
- 0x74, 0x61, 0x74, 0x73, 0x1a, 0x5c, 0x0a, 0x09, 0x43, 0x68, 0x69, 0x6c, 0x64, 0x4c, 0x69, 0x6e,
- 0x6b, 0x12, 0x1f, 0x0a, 0x0b, 0x63, 0x68, 0x69, 0x6c, 0x64, 0x5f, 0x69, 0x6e, 0x64, 0x65, 0x78,
- 0x18, 0x01, 0x20, 0x01, 0x28, 0x05, 0x52, 0x0a, 0x63, 0x68, 0x69, 0x6c, 0x64, 0x49, 0x6e, 0x64,
- 0x65, 0x78, 0x12, 0x12, 0x0a, 0x04, 0x74, 0x79, 0x70, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09,
- 0x52, 0x04, 0x74, 0x79, 0x70, 0x65, 0x12, 0x1a, 0x0a, 0x08, 0x76, 0x61, 0x72, 0x69, 0x61, 0x62,
- 0x6c, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x76, 0x61, 0x72, 0x69, 0x61, 0x62,
- 0x6c, 0x65, 0x1a, 0xd7, 0x01, 0x0a, 0x13, 0x53, 0x68, 0x6f, 0x72, 0x74, 0x52, 0x65, 0x70, 0x72,
- 0x65, 0x73, 0x65, 0x6e, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x20, 0x0a, 0x0b, 0x64, 0x65,
- 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52,
- 0x0b, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x5f, 0x0a, 0x0a,
- 0x73, 0x75, 0x62, 0x71, 0x75, 0x65, 0x72, 0x69, 0x65, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b,
- 0x32, 0x3f, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x70, 0x61, 0x6e, 0x6e, 0x65,
- 0x72, 0x2e, 0x76, 0x31, 0x2e, 0x50, 0x6c, 0x61, 0x6e, 0x4e, 0x6f, 0x64, 0x65, 0x2e, 0x53, 0x68,
- 0x6f, 0x72, 0x74, 0x52, 0x65, 0x70, 0x72, 0x65, 0x73, 0x65, 0x6e, 0x74, 0x61, 0x74, 0x69, 0x6f,
- 0x6e, 0x2e, 0x53, 0x75, 0x62, 0x71, 0x75, 0x65, 0x72, 0x69, 0x65, 0x73, 0x45, 0x6e, 0x74, 0x72,
- 0x79, 0x52, 0x0a, 0x73, 0x75, 0x62, 0x71, 0x75, 0x65, 0x72, 0x69, 0x65, 0x73, 0x1a, 0x3d, 0x0a,
- 0x0f, 0x53, 0x75, 0x62, 0x71, 0x75, 0x65, 0x72, 0x69, 0x65, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79,
- 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b,
- 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28,
- 0x05, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x22, 0x38, 0x0a, 0x04,
- 0x4b, 0x69, 0x6e, 0x64, 0x12, 0x14, 0x0a, 0x10, 0x4b, 0x49, 0x4e, 0x44, 0x5f, 0x55, 0x4e, 0x53,
- 0x50, 0x45, 0x43, 0x49, 0x46, 0x49, 0x45, 0x44, 0x10, 0x00, 0x12, 0x0e, 0x0a, 0x0a, 0x52, 0x45,
- 0x4c, 0x41, 0x54, 0x49, 0x4f, 0x4e, 0x41, 0x4c, 0x10, 0x01, 0x12, 0x0a, 0x0a, 0x06, 0x53, 0x43,
- 0x41, 0x4c, 0x41, 0x52, 0x10, 0x02, 0x22, 0x47, 0x0a, 0x09, 0x51, 0x75, 0x65, 0x72, 0x79, 0x50,
- 0x6c, 0x61, 0x6e, 0x12, 0x3a, 0x0a, 0x0a, 0x70, 0x6c, 0x61, 0x6e, 0x5f, 0x6e, 0x6f, 0x64, 0x65,
- 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65,
- 0x2e, 0x73, 0x70, 0x61, 0x6e, 0x6e, 0x65, 0x72, 0x2e, 0x76, 0x31, 0x2e, 0x50, 0x6c, 0x61, 0x6e,
- 0x4e, 0x6f, 0x64, 0x65, 0x52, 0x09, 0x70, 0x6c, 0x61, 0x6e, 0x4e, 0x6f, 0x64, 0x65, 0x73, 0x42,
- 0xb1, 0x01, 0x0a, 0x15, 0x63, 0x6f, 0x6d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73,
- 0x70, 0x61, 0x6e, 0x6e, 0x65, 0x72, 0x2e, 0x76, 0x31, 0x42, 0x0e, 0x51, 0x75, 0x65, 0x72, 0x79,
- 0x50, 0x6c, 0x61, 0x6e, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x35, 0x63, 0x6c, 0x6f,
- 0x75, 0x64, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x67, 0x6f,
- 0x2f, 0x73, 0x70, 0x61, 0x6e, 0x6e, 0x65, 0x72, 0x2f, 0x61, 0x70, 0x69, 0x76, 0x31, 0x2f, 0x73,
- 0x70, 0x61, 0x6e, 0x6e, 0x65, 0x72, 0x70, 0x62, 0x3b, 0x73, 0x70, 0x61, 0x6e, 0x6e, 0x65, 0x72,
- 0x70, 0x62, 0xaa, 0x02, 0x17, 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x43, 0x6c, 0x6f, 0x75,
- 0x64, 0x2e, 0x53, 0x70, 0x61, 0x6e, 0x6e, 0x65, 0x72, 0x2e, 0x56, 0x31, 0xca, 0x02, 0x17, 0x47,
- 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x5c, 0x43, 0x6c, 0x6f, 0x75, 0x64, 0x5c, 0x53, 0x70, 0x61, 0x6e,
- 0x6e, 0x65, 0x72, 0x5c, 0x56, 0x31, 0xea, 0x02, 0x1a, 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x3a,
- 0x3a, 0x43, 0x6c, 0x6f, 0x75, 0x64, 0x3a, 0x3a, 0x53, 0x70, 0x61, 0x6e, 0x6e, 0x65, 0x72, 0x3a,
- 0x3a, 0x56, 0x31, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33,
-}
-
-var (
- file_google_spanner_v1_query_plan_proto_rawDescOnce sync.Once
- file_google_spanner_v1_query_plan_proto_rawDescData = file_google_spanner_v1_query_plan_proto_rawDesc
-)
-
-func file_google_spanner_v1_query_plan_proto_rawDescGZIP() []byte {
- file_google_spanner_v1_query_plan_proto_rawDescOnce.Do(func() {
- file_google_spanner_v1_query_plan_proto_rawDescData = protoimpl.X.CompressGZIP(file_google_spanner_v1_query_plan_proto_rawDescData)
- })
- return file_google_spanner_v1_query_plan_proto_rawDescData
-}
-
-var file_google_spanner_v1_query_plan_proto_enumTypes = make([]protoimpl.EnumInfo, 1)
-var file_google_spanner_v1_query_plan_proto_msgTypes = make([]protoimpl.MessageInfo, 5)
-var file_google_spanner_v1_query_plan_proto_goTypes = []any{
- (PlanNode_Kind)(0), // 0: google.spanner.v1.PlanNode.Kind
- (*PlanNode)(nil), // 1: google.spanner.v1.PlanNode
- (*QueryPlan)(nil), // 2: google.spanner.v1.QueryPlan
- (*PlanNode_ChildLink)(nil), // 3: google.spanner.v1.PlanNode.ChildLink
- (*PlanNode_ShortRepresentation)(nil), // 4: google.spanner.v1.PlanNode.ShortRepresentation
- nil, // 5: google.spanner.v1.PlanNode.ShortRepresentation.SubqueriesEntry
- (*structpb.Struct)(nil), // 6: google.protobuf.Struct
-}
-var file_google_spanner_v1_query_plan_proto_depIdxs = []int32{
- 0, // 0: google.spanner.v1.PlanNode.kind:type_name -> google.spanner.v1.PlanNode.Kind
- 3, // 1: google.spanner.v1.PlanNode.child_links:type_name -> google.spanner.v1.PlanNode.ChildLink
- 4, // 2: google.spanner.v1.PlanNode.short_representation:type_name -> google.spanner.v1.PlanNode.ShortRepresentation
- 6, // 3: google.spanner.v1.PlanNode.metadata:type_name -> google.protobuf.Struct
- 6, // 4: google.spanner.v1.PlanNode.execution_stats:type_name -> google.protobuf.Struct
- 1, // 5: google.spanner.v1.QueryPlan.plan_nodes:type_name -> google.spanner.v1.PlanNode
- 5, // 6: google.spanner.v1.PlanNode.ShortRepresentation.subqueries:type_name -> google.spanner.v1.PlanNode.ShortRepresentation.SubqueriesEntry
- 7, // [7:7] is the sub-list for method output_type
- 7, // [7:7] is the sub-list for method input_type
- 7, // [7:7] is the sub-list for extension type_name
- 7, // [7:7] is the sub-list for extension extendee
- 0, // [0:7] is the sub-list for field type_name
-}
-
-func init() { file_google_spanner_v1_query_plan_proto_init() }
-func file_google_spanner_v1_query_plan_proto_init() {
- if File_google_spanner_v1_query_plan_proto != nil {
- return
- }
- if !protoimpl.UnsafeEnabled {
- file_google_spanner_v1_query_plan_proto_msgTypes[0].Exporter = func(v any, i int) any {
- switch v := v.(*PlanNode); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_google_spanner_v1_query_plan_proto_msgTypes[1].Exporter = func(v any, i int) any {
- switch v := v.(*QueryPlan); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_google_spanner_v1_query_plan_proto_msgTypes[2].Exporter = func(v any, i int) any {
- switch v := v.(*PlanNode_ChildLink); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_google_spanner_v1_query_plan_proto_msgTypes[3].Exporter = func(v any, i int) any {
- switch v := v.(*PlanNode_ShortRepresentation); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- }
- type x struct{}
- out := protoimpl.TypeBuilder{
- File: protoimpl.DescBuilder{
- GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
- RawDescriptor: file_google_spanner_v1_query_plan_proto_rawDesc,
- NumEnums: 1,
- NumMessages: 5,
- NumExtensions: 0,
- NumServices: 0,
- },
- GoTypes: file_google_spanner_v1_query_plan_proto_goTypes,
- DependencyIndexes: file_google_spanner_v1_query_plan_proto_depIdxs,
- EnumInfos: file_google_spanner_v1_query_plan_proto_enumTypes,
- MessageInfos: file_google_spanner_v1_query_plan_proto_msgTypes,
- }.Build()
- File_google_spanner_v1_query_plan_proto = out.File
- file_google_spanner_v1_query_plan_proto_rawDesc = nil
- file_google_spanner_v1_query_plan_proto_goTypes = nil
- file_google_spanner_v1_query_plan_proto_depIdxs = nil
-}
diff --git a/vendor/cloud.google.com/go/spanner/apiv1/spannerpb/result_set.pb.go b/vendor/cloud.google.com/go/spanner/apiv1/spannerpb/result_set.pb.go
deleted file mode 100644
index 57b2cb401..000000000
--- a/vendor/cloud.google.com/go/spanner/apiv1/spannerpb/result_set.pb.go
+++ /dev/null
@@ -1,698 +0,0 @@
-// Copyright 2024 Google LLC
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-// Code generated by protoc-gen-go. DO NOT EDIT.
-// versions:
-// protoc-gen-go v1.34.2
-// protoc v4.25.3
-// source: google/spanner/v1/result_set.proto
-
-package spannerpb
-
-import (
- reflect "reflect"
- sync "sync"
-
- protoreflect "google.golang.org/protobuf/reflect/protoreflect"
- protoimpl "google.golang.org/protobuf/runtime/protoimpl"
- structpb "google.golang.org/protobuf/types/known/structpb"
-)
-
-const (
- // Verify that this generated code is sufficiently up-to-date.
- _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion)
- // Verify that runtime/protoimpl is sufficiently up-to-date.
- _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20)
-)
-
-// Results from [Read][google.spanner.v1.Spanner.Read] or
-// [ExecuteSql][google.spanner.v1.Spanner.ExecuteSql].
-type ResultSet struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
- // Metadata about the result set, such as row type information.
- Metadata *ResultSetMetadata `protobuf:"bytes,1,opt,name=metadata,proto3" json:"metadata,omitempty"`
- // Each element in `rows` is a row whose format is defined by
- // [metadata.row_type][google.spanner.v1.ResultSetMetadata.row_type]. The ith element
- // in each row matches the ith field in
- // [metadata.row_type][google.spanner.v1.ResultSetMetadata.row_type]. Elements are
- // encoded based on type as described
- // [here][google.spanner.v1.TypeCode].
- Rows []*structpb.ListValue `protobuf:"bytes,2,rep,name=rows,proto3" json:"rows,omitempty"`
- // Query plan and execution statistics for the SQL statement that
- // produced this result set. These can be requested by setting
- // [ExecuteSqlRequest.query_mode][google.spanner.v1.ExecuteSqlRequest.query_mode].
- // DML statements always produce stats containing the number of rows
- // modified, unless executed using the
- // [ExecuteSqlRequest.QueryMode.PLAN][google.spanner.v1.ExecuteSqlRequest.QueryMode.PLAN] [ExecuteSqlRequest.query_mode][google.spanner.v1.ExecuteSqlRequest.query_mode].
- // Other fields may or may not be populated, based on the
- // [ExecuteSqlRequest.query_mode][google.spanner.v1.ExecuteSqlRequest.query_mode].
- Stats *ResultSetStats `protobuf:"bytes,3,opt,name=stats,proto3" json:"stats,omitempty"`
-}
-
-func (x *ResultSet) Reset() {
- *x = ResultSet{}
- if protoimpl.UnsafeEnabled {
- mi := &file_google_spanner_v1_result_set_proto_msgTypes[0]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
-}
-
-func (x *ResultSet) String() string {
- return protoimpl.X.MessageStringOf(x)
-}
-
-func (*ResultSet) ProtoMessage() {}
-
-func (x *ResultSet) ProtoReflect() protoreflect.Message {
- mi := &file_google_spanner_v1_result_set_proto_msgTypes[0]
- if protoimpl.UnsafeEnabled && x != nil {
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- if ms.LoadMessageInfo() == nil {
- ms.StoreMessageInfo(mi)
- }
- return ms
- }
- return mi.MessageOf(x)
-}
-
-// Deprecated: Use ResultSet.ProtoReflect.Descriptor instead.
-func (*ResultSet) Descriptor() ([]byte, []int) {
- return file_google_spanner_v1_result_set_proto_rawDescGZIP(), []int{0}
-}
-
-func (x *ResultSet) GetMetadata() *ResultSetMetadata {
- if x != nil {
- return x.Metadata
- }
- return nil
-}
-
-func (x *ResultSet) GetRows() []*structpb.ListValue {
- if x != nil {
- return x.Rows
- }
- return nil
-}
-
-func (x *ResultSet) GetStats() *ResultSetStats {
- if x != nil {
- return x.Stats
- }
- return nil
-}
-
-// Partial results from a streaming read or SQL query. Streaming reads and
-// SQL queries better tolerate large result sets, large rows, and large
-// values, but are a little trickier to consume.
-type PartialResultSet struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
- // Metadata about the result set, such as row type information.
- // Only present in the first response.
- Metadata *ResultSetMetadata `protobuf:"bytes,1,opt,name=metadata,proto3" json:"metadata,omitempty"`
- // A streamed result set consists of a stream of values, which might
- // be split into many `PartialResultSet` messages to accommodate
- // large rows and/or large values. Every N complete values defines a
- // row, where N is equal to the number of entries in
- // [metadata.row_type.fields][google.spanner.v1.StructType.fields].
- //
- // Most values are encoded based on type as described
- // [here][google.spanner.v1.TypeCode].
- //
- // It is possible that the last value in values is "chunked",
- // meaning that the rest of the value is sent in subsequent
- // `PartialResultSet`(s). This is denoted by the [chunked_value][google.spanner.v1.PartialResultSet.chunked_value]
- // field. Two or more chunked values can be merged to form a
- // complete value as follows:
- //
- // - `bool/number/null`: cannot be chunked
- // - `string`: concatenate the strings
- // - `list`: concatenate the lists. If the last element in a list is a
- // `string`, `list`, or `object`, merge it with the first element in
- // the next list by applying these rules recursively.
- // - `object`: concatenate the (field name, field value) pairs. If a
- // field name is duplicated, then apply these rules recursively
- // to merge the field values.
- //
- // Some examples of merging:
- //
- // # Strings are concatenated.
- // "foo", "bar" => "foobar"
- //
- // # Lists of non-strings are concatenated.
- // [2, 3], [4] => [2, 3, 4]
- //
- // # Lists are concatenated, but the last and first elements are merged
- // # because they are strings.
- // ["a", "b"], ["c", "d"] => ["a", "bc", "d"]
- //
- // # Lists are concatenated, but the last and first elements are merged
- // # because they are lists. Recursively, the last and first elements
- // # of the inner lists are merged because they are strings.
- // ["a", ["b", "c"]], [["d"], "e"] => ["a", ["b", "cd"], "e"]
- //
- // # Non-overlapping object fields are combined.
- // {"a": "1"}, {"b": "2"} => {"a": "1", "b": 2"}
- //
- // # Overlapping object fields are merged.
- // {"a": "1"}, {"a": "2"} => {"a": "12"}
- //
- // # Examples of merging objects containing lists of strings.
- // {"a": ["1"]}, {"a": ["2"]} => {"a": ["12"]}
- //
- // For a more complete example, suppose a streaming SQL query is
- // yielding a result set whose rows contain a single string
- // field. The following `PartialResultSet`s might be yielded:
- //
- // {
- // "metadata": { ... }
- // "values": ["Hello", "W"]
- // "chunked_value": true
- // "resume_token": "Af65..."
- // }
- // {
- // "values": ["orl"]
- // "chunked_value": true
- // "resume_token": "Bqp2..."
- // }
- // {
- // "values": ["d"]
- // "resume_token": "Zx1B..."
- // }
- //
- // This sequence of `PartialResultSet`s encodes two rows, one
- // containing the field value `"Hello"`, and a second containing the
- // field value `"World" = "W" + "orl" + "d"`.
- Values []*structpb.Value `protobuf:"bytes,2,rep,name=values,proto3" json:"values,omitempty"`
- // If true, then the final value in [values][google.spanner.v1.PartialResultSet.values] is chunked, and must
- // be combined with more values from subsequent `PartialResultSet`s
- // to obtain a complete field value.
- ChunkedValue bool `protobuf:"varint,3,opt,name=chunked_value,json=chunkedValue,proto3" json:"chunked_value,omitempty"`
- // Streaming calls might be interrupted for a variety of reasons, such
- // as TCP connection loss. If this occurs, the stream of results can
- // be resumed by re-sending the original request and including
- // `resume_token`. Note that executing any other transaction in the
- // same session invalidates the token.
- ResumeToken []byte `protobuf:"bytes,4,opt,name=resume_token,json=resumeToken,proto3" json:"resume_token,omitempty"`
- // Query plan and execution statistics for the statement that produced this
- // streaming result set. These can be requested by setting
- // [ExecuteSqlRequest.query_mode][google.spanner.v1.ExecuteSqlRequest.query_mode] and are sent
- // only once with the last response in the stream.
- // This field will also be present in the last response for DML
- // statements.
- Stats *ResultSetStats `protobuf:"bytes,5,opt,name=stats,proto3" json:"stats,omitempty"`
-}
-
-func (x *PartialResultSet) Reset() {
- *x = PartialResultSet{}
- if protoimpl.UnsafeEnabled {
- mi := &file_google_spanner_v1_result_set_proto_msgTypes[1]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
-}
-
-func (x *PartialResultSet) String() string {
- return protoimpl.X.MessageStringOf(x)
-}
-
-func (*PartialResultSet) ProtoMessage() {}
-
-func (x *PartialResultSet) ProtoReflect() protoreflect.Message {
- mi := &file_google_spanner_v1_result_set_proto_msgTypes[1]
- if protoimpl.UnsafeEnabled && x != nil {
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- if ms.LoadMessageInfo() == nil {
- ms.StoreMessageInfo(mi)
- }
- return ms
- }
- return mi.MessageOf(x)
-}
-
-// Deprecated: Use PartialResultSet.ProtoReflect.Descriptor instead.
-func (*PartialResultSet) Descriptor() ([]byte, []int) {
- return file_google_spanner_v1_result_set_proto_rawDescGZIP(), []int{1}
-}
-
-func (x *PartialResultSet) GetMetadata() *ResultSetMetadata {
- if x != nil {
- return x.Metadata
- }
- return nil
-}
-
-func (x *PartialResultSet) GetValues() []*structpb.Value {
- if x != nil {
- return x.Values
- }
- return nil
-}
-
-func (x *PartialResultSet) GetChunkedValue() bool {
- if x != nil {
- return x.ChunkedValue
- }
- return false
-}
-
-func (x *PartialResultSet) GetResumeToken() []byte {
- if x != nil {
- return x.ResumeToken
- }
- return nil
-}
-
-func (x *PartialResultSet) GetStats() *ResultSetStats {
- if x != nil {
- return x.Stats
- }
- return nil
-}
-
-// Metadata about a [ResultSet][google.spanner.v1.ResultSet] or [PartialResultSet][google.spanner.v1.PartialResultSet].
-type ResultSetMetadata struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
- // Indicates the field names and types for the rows in the result
- // set. For example, a SQL query like `"SELECT UserId, UserName FROM
- // Users"` could return a `row_type` value like:
- //
- // "fields": [
- // { "name": "UserId", "type": { "code": "INT64" } },
- // { "name": "UserName", "type": { "code": "STRING" } },
- // ]
- RowType *StructType `protobuf:"bytes,1,opt,name=row_type,json=rowType,proto3" json:"row_type,omitempty"`
- // If the read or SQL query began a transaction as a side-effect, the
- // information about the new transaction is yielded here.
- Transaction *Transaction `protobuf:"bytes,2,opt,name=transaction,proto3" json:"transaction,omitempty"`
- // A SQL query can be parameterized. In PLAN mode, these parameters can be
- // undeclared. This indicates the field names and types for those undeclared
- // parameters in the SQL query. For example, a SQL query like `"SELECT * FROM
- // Users where UserId = @userId and UserName = @userName "` could return a
- // `undeclared_parameters` value like:
- //
- // "fields": [
- // { "name": "UserId", "type": { "code": "INT64" } },
- // { "name": "UserName", "type": { "code": "STRING" } },
- // ]
- UndeclaredParameters *StructType `protobuf:"bytes,3,opt,name=undeclared_parameters,json=undeclaredParameters,proto3" json:"undeclared_parameters,omitempty"`
-}
-
-func (x *ResultSetMetadata) Reset() {
- *x = ResultSetMetadata{}
- if protoimpl.UnsafeEnabled {
- mi := &file_google_spanner_v1_result_set_proto_msgTypes[2]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
-}
-
-func (x *ResultSetMetadata) String() string {
- return protoimpl.X.MessageStringOf(x)
-}
-
-func (*ResultSetMetadata) ProtoMessage() {}
-
-func (x *ResultSetMetadata) ProtoReflect() protoreflect.Message {
- mi := &file_google_spanner_v1_result_set_proto_msgTypes[2]
- if protoimpl.UnsafeEnabled && x != nil {
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- if ms.LoadMessageInfo() == nil {
- ms.StoreMessageInfo(mi)
- }
- return ms
- }
- return mi.MessageOf(x)
-}
-
-// Deprecated: Use ResultSetMetadata.ProtoReflect.Descriptor instead.
-func (*ResultSetMetadata) Descriptor() ([]byte, []int) {
- return file_google_spanner_v1_result_set_proto_rawDescGZIP(), []int{2}
-}
-
-func (x *ResultSetMetadata) GetRowType() *StructType {
- if x != nil {
- return x.RowType
- }
- return nil
-}
-
-func (x *ResultSetMetadata) GetTransaction() *Transaction {
- if x != nil {
- return x.Transaction
- }
- return nil
-}
-
-func (x *ResultSetMetadata) GetUndeclaredParameters() *StructType {
- if x != nil {
- return x.UndeclaredParameters
- }
- return nil
-}
-
-// Additional statistics about a [ResultSet][google.spanner.v1.ResultSet] or [PartialResultSet][google.spanner.v1.PartialResultSet].
-type ResultSetStats struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
- // [QueryPlan][google.spanner.v1.QueryPlan] for the query associated with this result.
- QueryPlan *QueryPlan `protobuf:"bytes,1,opt,name=query_plan,json=queryPlan,proto3" json:"query_plan,omitempty"`
- // Aggregated statistics from the execution of the query. Only present when
- // the query is profiled. For example, a query could return the statistics as
- // follows:
- //
- // {
- // "rows_returned": "3",
- // "elapsed_time": "1.22 secs",
- // "cpu_time": "1.19 secs"
- // }
- QueryStats *structpb.Struct `protobuf:"bytes,2,opt,name=query_stats,json=queryStats,proto3" json:"query_stats,omitempty"`
- // The number of rows modified by the DML statement.
- //
- // Types that are assignable to RowCount:
- //
- // *ResultSetStats_RowCountExact
- // *ResultSetStats_RowCountLowerBound
- RowCount isResultSetStats_RowCount `protobuf_oneof:"row_count"`
-}
-
-func (x *ResultSetStats) Reset() {
- *x = ResultSetStats{}
- if protoimpl.UnsafeEnabled {
- mi := &file_google_spanner_v1_result_set_proto_msgTypes[3]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
-}
-
-func (x *ResultSetStats) String() string {
- return protoimpl.X.MessageStringOf(x)
-}
-
-func (*ResultSetStats) ProtoMessage() {}
-
-func (x *ResultSetStats) ProtoReflect() protoreflect.Message {
- mi := &file_google_spanner_v1_result_set_proto_msgTypes[3]
- if protoimpl.UnsafeEnabled && x != nil {
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- if ms.LoadMessageInfo() == nil {
- ms.StoreMessageInfo(mi)
- }
- return ms
- }
- return mi.MessageOf(x)
-}
-
-// Deprecated: Use ResultSetStats.ProtoReflect.Descriptor instead.
-func (*ResultSetStats) Descriptor() ([]byte, []int) {
- return file_google_spanner_v1_result_set_proto_rawDescGZIP(), []int{3}
-}
-
-func (x *ResultSetStats) GetQueryPlan() *QueryPlan {
- if x != nil {
- return x.QueryPlan
- }
- return nil
-}
-
-func (x *ResultSetStats) GetQueryStats() *structpb.Struct {
- if x != nil {
- return x.QueryStats
- }
- return nil
-}
-
-func (m *ResultSetStats) GetRowCount() isResultSetStats_RowCount {
- if m != nil {
- return m.RowCount
- }
- return nil
-}
-
-func (x *ResultSetStats) GetRowCountExact() int64 {
- if x, ok := x.GetRowCount().(*ResultSetStats_RowCountExact); ok {
- return x.RowCountExact
- }
- return 0
-}
-
-func (x *ResultSetStats) GetRowCountLowerBound() int64 {
- if x, ok := x.GetRowCount().(*ResultSetStats_RowCountLowerBound); ok {
- return x.RowCountLowerBound
- }
- return 0
-}
-
-type isResultSetStats_RowCount interface {
- isResultSetStats_RowCount()
-}
-
-type ResultSetStats_RowCountExact struct {
- // Standard DML returns an exact count of rows that were modified.
- RowCountExact int64 `protobuf:"varint,3,opt,name=row_count_exact,json=rowCountExact,proto3,oneof"`
-}
-
-type ResultSetStats_RowCountLowerBound struct {
- // Partitioned DML does not offer exactly-once semantics, so it
- // returns a lower bound of the rows modified.
- RowCountLowerBound int64 `protobuf:"varint,4,opt,name=row_count_lower_bound,json=rowCountLowerBound,proto3,oneof"`
-}
-
-func (*ResultSetStats_RowCountExact) isResultSetStats_RowCount() {}
-
-func (*ResultSetStats_RowCountLowerBound) isResultSetStats_RowCount() {}
-
-var File_google_spanner_v1_result_set_proto protoreflect.FileDescriptor
-
-var file_google_spanner_v1_result_set_proto_rawDesc = []byte{
- 0x0a, 0x22, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x73, 0x70, 0x61, 0x6e, 0x6e, 0x65, 0x72,
- 0x2f, 0x76, 0x31, 0x2f, 0x72, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x5f, 0x73, 0x65, 0x74, 0x2e, 0x70,
- 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x11, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x70, 0x61,
- 0x6e, 0x6e, 0x65, 0x72, 0x2e, 0x76, 0x31, 0x1a, 0x1c, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f,
- 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x73, 0x74, 0x72, 0x75, 0x63, 0x74, 0x2e,
- 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x22, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x73, 0x70,
- 0x61, 0x6e, 0x6e, 0x65, 0x72, 0x2f, 0x76, 0x31, 0x2f, 0x71, 0x75, 0x65, 0x72, 0x79, 0x5f, 0x70,
- 0x6c, 0x61, 0x6e, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x23, 0x67, 0x6f, 0x6f, 0x67, 0x6c,
- 0x65, 0x2f, 0x73, 0x70, 0x61, 0x6e, 0x6e, 0x65, 0x72, 0x2f, 0x76, 0x31, 0x2f, 0x74, 0x72, 0x61,
- 0x6e, 0x73, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1c,
- 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x73, 0x70, 0x61, 0x6e, 0x6e, 0x65, 0x72, 0x2f, 0x76,
- 0x31, 0x2f, 0x74, 0x79, 0x70, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0xb6, 0x01, 0x0a,
- 0x09, 0x52, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x53, 0x65, 0x74, 0x12, 0x40, 0x0a, 0x08, 0x6d, 0x65,
- 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x67,
- 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x70, 0x61, 0x6e, 0x6e, 0x65, 0x72, 0x2e, 0x76, 0x31,
- 0x2e, 0x52, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x53, 0x65, 0x74, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61,
- 0x74, 0x61, 0x52, 0x08, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x12, 0x2e, 0x0a, 0x04,
- 0x72, 0x6f, 0x77, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f,
- 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x4c, 0x69, 0x73,
- 0x74, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x04, 0x72, 0x6f, 0x77, 0x73, 0x12, 0x37, 0x0a, 0x05,
- 0x73, 0x74, 0x61, 0x74, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x21, 0x2e, 0x67, 0x6f,
- 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x70, 0x61, 0x6e, 0x6e, 0x65, 0x72, 0x2e, 0x76, 0x31, 0x2e,
- 0x52, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x53, 0x65, 0x74, 0x53, 0x74, 0x61, 0x74, 0x73, 0x52, 0x05,
- 0x73, 0x74, 0x61, 0x74, 0x73, 0x22, 0x85, 0x02, 0x0a, 0x10, 0x50, 0x61, 0x72, 0x74, 0x69, 0x61,
- 0x6c, 0x52, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x53, 0x65, 0x74, 0x12, 0x40, 0x0a, 0x08, 0x6d, 0x65,
- 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x67,
- 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x70, 0x61, 0x6e, 0x6e, 0x65, 0x72, 0x2e, 0x76, 0x31,
- 0x2e, 0x52, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x53, 0x65, 0x74, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61,
- 0x74, 0x61, 0x52, 0x08, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x12, 0x2e, 0x0a, 0x06,
- 0x76, 0x61, 0x6c, 0x75, 0x65, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x16, 0x2e, 0x67,
- 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x56,
- 0x61, 0x6c, 0x75, 0x65, 0x52, 0x06, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x73, 0x12, 0x23, 0x0a, 0x0d,
- 0x63, 0x68, 0x75, 0x6e, 0x6b, 0x65, 0x64, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x03, 0x20,
- 0x01, 0x28, 0x08, 0x52, 0x0c, 0x63, 0x68, 0x75, 0x6e, 0x6b, 0x65, 0x64, 0x56, 0x61, 0x6c, 0x75,
- 0x65, 0x12, 0x21, 0x0a, 0x0c, 0x72, 0x65, 0x73, 0x75, 0x6d, 0x65, 0x5f, 0x74, 0x6f, 0x6b, 0x65,
- 0x6e, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x0b, 0x72, 0x65, 0x73, 0x75, 0x6d, 0x65, 0x54,
- 0x6f, 0x6b, 0x65, 0x6e, 0x12, 0x37, 0x0a, 0x05, 0x73, 0x74, 0x61, 0x74, 0x73, 0x18, 0x05, 0x20,
- 0x01, 0x28, 0x0b, 0x32, 0x21, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x70, 0x61,
- 0x6e, 0x6e, 0x65, 0x72, 0x2e, 0x76, 0x31, 0x2e, 0x52, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x53, 0x65,
- 0x74, 0x53, 0x74, 0x61, 0x74, 0x73, 0x52, 0x05, 0x73, 0x74, 0x61, 0x74, 0x73, 0x22, 0xe3, 0x01,
- 0x0a, 0x11, 0x52, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x53, 0x65, 0x74, 0x4d, 0x65, 0x74, 0x61, 0x64,
- 0x61, 0x74, 0x61, 0x12, 0x38, 0x0a, 0x08, 0x72, 0x6f, 0x77, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18,
- 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73,
- 0x70, 0x61, 0x6e, 0x6e, 0x65, 0x72, 0x2e, 0x76, 0x31, 0x2e, 0x53, 0x74, 0x72, 0x75, 0x63, 0x74,
- 0x54, 0x79, 0x70, 0x65, 0x52, 0x07, 0x72, 0x6f, 0x77, 0x54, 0x79, 0x70, 0x65, 0x12, 0x40, 0x0a,
- 0x0b, 0x74, 0x72, 0x61, 0x6e, 0x73, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x01,
- 0x28, 0x0b, 0x32, 0x1e, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x70, 0x61, 0x6e,
- 0x6e, 0x65, 0x72, 0x2e, 0x76, 0x31, 0x2e, 0x54, 0x72, 0x61, 0x6e, 0x73, 0x61, 0x63, 0x74, 0x69,
- 0x6f, 0x6e, 0x52, 0x0b, 0x74, 0x72, 0x61, 0x6e, 0x73, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x12,
- 0x52, 0x0a, 0x15, 0x75, 0x6e, 0x64, 0x65, 0x63, 0x6c, 0x61, 0x72, 0x65, 0x64, 0x5f, 0x70, 0x61,
- 0x72, 0x61, 0x6d, 0x65, 0x74, 0x65, 0x72, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1d,
- 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x70, 0x61, 0x6e, 0x6e, 0x65, 0x72, 0x2e,
- 0x76, 0x31, 0x2e, 0x53, 0x74, 0x72, 0x75, 0x63, 0x74, 0x54, 0x79, 0x70, 0x65, 0x52, 0x14, 0x75,
- 0x6e, 0x64, 0x65, 0x63, 0x6c, 0x61, 0x72, 0x65, 0x64, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x65, 0x74,
- 0x65, 0x72, 0x73, 0x22, 0xf3, 0x01, 0x0a, 0x0e, 0x52, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x53, 0x65,
- 0x74, 0x53, 0x74, 0x61, 0x74, 0x73, 0x12, 0x3b, 0x0a, 0x0a, 0x71, 0x75, 0x65, 0x72, 0x79, 0x5f,
- 0x70, 0x6c, 0x61, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x67, 0x6f, 0x6f,
- 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x70, 0x61, 0x6e, 0x6e, 0x65, 0x72, 0x2e, 0x76, 0x31, 0x2e, 0x51,
- 0x75, 0x65, 0x72, 0x79, 0x50, 0x6c, 0x61, 0x6e, 0x52, 0x09, 0x71, 0x75, 0x65, 0x72, 0x79, 0x50,
- 0x6c, 0x61, 0x6e, 0x12, 0x38, 0x0a, 0x0b, 0x71, 0x75, 0x65, 0x72, 0x79, 0x5f, 0x73, 0x74, 0x61,
- 0x74, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c,
- 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x53, 0x74, 0x72, 0x75, 0x63,
- 0x74, 0x52, 0x0a, 0x71, 0x75, 0x65, 0x72, 0x79, 0x53, 0x74, 0x61, 0x74, 0x73, 0x12, 0x28, 0x0a,
- 0x0f, 0x72, 0x6f, 0x77, 0x5f, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x5f, 0x65, 0x78, 0x61, 0x63, 0x74,
- 0x18, 0x03, 0x20, 0x01, 0x28, 0x03, 0x48, 0x00, 0x52, 0x0d, 0x72, 0x6f, 0x77, 0x43, 0x6f, 0x75,
- 0x6e, 0x74, 0x45, 0x78, 0x61, 0x63, 0x74, 0x12, 0x33, 0x0a, 0x15, 0x72, 0x6f, 0x77, 0x5f, 0x63,
- 0x6f, 0x75, 0x6e, 0x74, 0x5f, 0x6c, 0x6f, 0x77, 0x65, 0x72, 0x5f, 0x62, 0x6f, 0x75, 0x6e, 0x64,
- 0x18, 0x04, 0x20, 0x01, 0x28, 0x03, 0x48, 0x00, 0x52, 0x12, 0x72, 0x6f, 0x77, 0x43, 0x6f, 0x75,
- 0x6e, 0x74, 0x4c, 0x6f, 0x77, 0x65, 0x72, 0x42, 0x6f, 0x75, 0x6e, 0x64, 0x42, 0x0b, 0x0a, 0x09,
- 0x72, 0x6f, 0x77, 0x5f, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x42, 0xb4, 0x01, 0x0a, 0x15, 0x63, 0x6f,
- 0x6d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x70, 0x61, 0x6e, 0x6e, 0x65, 0x72,
- 0x2e, 0x76, 0x31, 0x42, 0x0e, 0x52, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x53, 0x65, 0x74, 0x50, 0x72,
- 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x35, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x67, 0x6f, 0x6f,
- 0x67, 0x6c, 0x65, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x67, 0x6f, 0x2f, 0x73, 0x70, 0x61, 0x6e, 0x6e,
- 0x65, 0x72, 0x2f, 0x61, 0x70, 0x69, 0x76, 0x31, 0x2f, 0x73, 0x70, 0x61, 0x6e, 0x6e, 0x65, 0x72,
- 0x70, 0x62, 0x3b, 0x73, 0x70, 0x61, 0x6e, 0x6e, 0x65, 0x72, 0x70, 0x62, 0xf8, 0x01, 0x01, 0xaa,
- 0x02, 0x17, 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x43, 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x53,
- 0x70, 0x61, 0x6e, 0x6e, 0x65, 0x72, 0x2e, 0x56, 0x31, 0xca, 0x02, 0x17, 0x47, 0x6f, 0x6f, 0x67,
- 0x6c, 0x65, 0x5c, 0x43, 0x6c, 0x6f, 0x75, 0x64, 0x5c, 0x53, 0x70, 0x61, 0x6e, 0x6e, 0x65, 0x72,
- 0x5c, 0x56, 0x31, 0xea, 0x02, 0x1a, 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x3a, 0x3a, 0x43, 0x6c,
- 0x6f, 0x75, 0x64, 0x3a, 0x3a, 0x53, 0x70, 0x61, 0x6e, 0x6e, 0x65, 0x72, 0x3a, 0x3a, 0x56, 0x31,
- 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33,
-}
-
-var (
- file_google_spanner_v1_result_set_proto_rawDescOnce sync.Once
- file_google_spanner_v1_result_set_proto_rawDescData = file_google_spanner_v1_result_set_proto_rawDesc
-)
-
-func file_google_spanner_v1_result_set_proto_rawDescGZIP() []byte {
- file_google_spanner_v1_result_set_proto_rawDescOnce.Do(func() {
- file_google_spanner_v1_result_set_proto_rawDescData = protoimpl.X.CompressGZIP(file_google_spanner_v1_result_set_proto_rawDescData)
- })
- return file_google_spanner_v1_result_set_proto_rawDescData
-}
-
-var file_google_spanner_v1_result_set_proto_msgTypes = make([]protoimpl.MessageInfo, 4)
-var file_google_spanner_v1_result_set_proto_goTypes = []any{
- (*ResultSet)(nil), // 0: google.spanner.v1.ResultSet
- (*PartialResultSet)(nil), // 1: google.spanner.v1.PartialResultSet
- (*ResultSetMetadata)(nil), // 2: google.spanner.v1.ResultSetMetadata
- (*ResultSetStats)(nil), // 3: google.spanner.v1.ResultSetStats
- (*structpb.ListValue)(nil), // 4: google.protobuf.ListValue
- (*structpb.Value)(nil), // 5: google.protobuf.Value
- (*StructType)(nil), // 6: google.spanner.v1.StructType
- (*Transaction)(nil), // 7: google.spanner.v1.Transaction
- (*QueryPlan)(nil), // 8: google.spanner.v1.QueryPlan
- (*structpb.Struct)(nil), // 9: google.protobuf.Struct
-}
-var file_google_spanner_v1_result_set_proto_depIdxs = []int32{
- 2, // 0: google.spanner.v1.ResultSet.metadata:type_name -> google.spanner.v1.ResultSetMetadata
- 4, // 1: google.spanner.v1.ResultSet.rows:type_name -> google.protobuf.ListValue
- 3, // 2: google.spanner.v1.ResultSet.stats:type_name -> google.spanner.v1.ResultSetStats
- 2, // 3: google.spanner.v1.PartialResultSet.metadata:type_name -> google.spanner.v1.ResultSetMetadata
- 5, // 4: google.spanner.v1.PartialResultSet.values:type_name -> google.protobuf.Value
- 3, // 5: google.spanner.v1.PartialResultSet.stats:type_name -> google.spanner.v1.ResultSetStats
- 6, // 6: google.spanner.v1.ResultSetMetadata.row_type:type_name -> google.spanner.v1.StructType
- 7, // 7: google.spanner.v1.ResultSetMetadata.transaction:type_name -> google.spanner.v1.Transaction
- 6, // 8: google.spanner.v1.ResultSetMetadata.undeclared_parameters:type_name -> google.spanner.v1.StructType
- 8, // 9: google.spanner.v1.ResultSetStats.query_plan:type_name -> google.spanner.v1.QueryPlan
- 9, // 10: google.spanner.v1.ResultSetStats.query_stats:type_name -> google.protobuf.Struct
- 11, // [11:11] is the sub-list for method output_type
- 11, // [11:11] is the sub-list for method input_type
- 11, // [11:11] is the sub-list for extension type_name
- 11, // [11:11] is the sub-list for extension extendee
- 0, // [0:11] is the sub-list for field type_name
-}
-
-func init() { file_google_spanner_v1_result_set_proto_init() }
-func file_google_spanner_v1_result_set_proto_init() {
- if File_google_spanner_v1_result_set_proto != nil {
- return
- }
- file_google_spanner_v1_query_plan_proto_init()
- file_google_spanner_v1_transaction_proto_init()
- file_google_spanner_v1_type_proto_init()
- if !protoimpl.UnsafeEnabled {
- file_google_spanner_v1_result_set_proto_msgTypes[0].Exporter = func(v any, i int) any {
- switch v := v.(*ResultSet); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_google_spanner_v1_result_set_proto_msgTypes[1].Exporter = func(v any, i int) any {
- switch v := v.(*PartialResultSet); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_google_spanner_v1_result_set_proto_msgTypes[2].Exporter = func(v any, i int) any {
- switch v := v.(*ResultSetMetadata); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_google_spanner_v1_result_set_proto_msgTypes[3].Exporter = func(v any, i int) any {
- switch v := v.(*ResultSetStats); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- }
- file_google_spanner_v1_result_set_proto_msgTypes[3].OneofWrappers = []any{
- (*ResultSetStats_RowCountExact)(nil),
- (*ResultSetStats_RowCountLowerBound)(nil),
- }
- type x struct{}
- out := protoimpl.TypeBuilder{
- File: protoimpl.DescBuilder{
- GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
- RawDescriptor: file_google_spanner_v1_result_set_proto_rawDesc,
- NumEnums: 0,
- NumMessages: 4,
- NumExtensions: 0,
- NumServices: 0,
- },
- GoTypes: file_google_spanner_v1_result_set_proto_goTypes,
- DependencyIndexes: file_google_spanner_v1_result_set_proto_depIdxs,
- MessageInfos: file_google_spanner_v1_result_set_proto_msgTypes,
- }.Build()
- File_google_spanner_v1_result_set_proto = out.File
- file_google_spanner_v1_result_set_proto_rawDesc = nil
- file_google_spanner_v1_result_set_proto_goTypes = nil
- file_google_spanner_v1_result_set_proto_depIdxs = nil
-}
diff --git a/vendor/cloud.google.com/go/spanner/apiv1/spannerpb/spanner.pb.go b/vendor/cloud.google.com/go/spanner/apiv1/spannerpb/spanner.pb.go
deleted file mode 100644
index 6831ecbc7..000000000
--- a/vendor/cloud.google.com/go/spanner/apiv1/spannerpb/spanner.pb.go
+++ /dev/null
@@ -1,5303 +0,0 @@
-// Copyright 2024 Google LLC
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-// Code generated by protoc-gen-go. DO NOT EDIT.
-// versions:
-// protoc-gen-go v1.34.2
-// protoc v4.25.3
-// source: google/spanner/v1/spanner.proto
-
-package spannerpb
-
-import (
- context "context"
- reflect "reflect"
- sync "sync"
-
- _ "google.golang.org/genproto/googleapis/api/annotations"
- status "google.golang.org/genproto/googleapis/rpc/status"
- grpc "google.golang.org/grpc"
- codes "google.golang.org/grpc/codes"
- status1 "google.golang.org/grpc/status"
- protoreflect "google.golang.org/protobuf/reflect/protoreflect"
- protoimpl "google.golang.org/protobuf/runtime/protoimpl"
- durationpb "google.golang.org/protobuf/types/known/durationpb"
- emptypb "google.golang.org/protobuf/types/known/emptypb"
- structpb "google.golang.org/protobuf/types/known/structpb"
- timestamppb "google.golang.org/protobuf/types/known/timestamppb"
-)
-
-const (
- // Verify that this generated code is sufficiently up-to-date.
- _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion)
- // Verify that runtime/protoimpl is sufficiently up-to-date.
- _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20)
-)
-
-// The relative priority for requests. Note that priority is not applicable
-// for [BeginTransaction][google.spanner.v1.Spanner.BeginTransaction].
-//
-// The priority acts as a hint to the Cloud Spanner scheduler and does not
-// guarantee priority or order of execution. For example:
-//
-// - Some parts of a write operation always execute at `PRIORITY_HIGH`,
-// regardless of the specified priority. This may cause you to see an
-// increase in high priority workload even when executing a low priority
-// request. This can also potentially cause a priority inversion where a
-// lower priority request will be fulfilled ahead of a higher priority
-// request.
-// - If a transaction contains multiple operations with different priorities,
-// Cloud Spanner does not guarantee to process the higher priority
-// operations first. There may be other constraints to satisfy, such as
-// order of operations.
-type RequestOptions_Priority int32
-
-const (
- // `PRIORITY_UNSPECIFIED` is equivalent to `PRIORITY_HIGH`.
- RequestOptions_PRIORITY_UNSPECIFIED RequestOptions_Priority = 0
- // This specifies that the request is low priority.
- RequestOptions_PRIORITY_LOW RequestOptions_Priority = 1
- // This specifies that the request is medium priority.
- RequestOptions_PRIORITY_MEDIUM RequestOptions_Priority = 2
- // This specifies that the request is high priority.
- RequestOptions_PRIORITY_HIGH RequestOptions_Priority = 3
-)
-
-// Enum value maps for RequestOptions_Priority.
-var (
- RequestOptions_Priority_name = map[int32]string{
- 0: "PRIORITY_UNSPECIFIED",
- 1: "PRIORITY_LOW",
- 2: "PRIORITY_MEDIUM",
- 3: "PRIORITY_HIGH",
- }
- RequestOptions_Priority_value = map[string]int32{
- "PRIORITY_UNSPECIFIED": 0,
- "PRIORITY_LOW": 1,
- "PRIORITY_MEDIUM": 2,
- "PRIORITY_HIGH": 3,
- }
-)
-
-func (x RequestOptions_Priority) Enum() *RequestOptions_Priority {
- p := new(RequestOptions_Priority)
- *p = x
- return p
-}
-
-func (x RequestOptions_Priority) String() string {
- return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x))
-}
-
-func (RequestOptions_Priority) Descriptor() protoreflect.EnumDescriptor {
- return file_google_spanner_v1_spanner_proto_enumTypes[0].Descriptor()
-}
-
-func (RequestOptions_Priority) Type() protoreflect.EnumType {
- return &file_google_spanner_v1_spanner_proto_enumTypes[0]
-}
-
-func (x RequestOptions_Priority) Number() protoreflect.EnumNumber {
- return protoreflect.EnumNumber(x)
-}
-
-// Deprecated: Use RequestOptions_Priority.Descriptor instead.
-func (RequestOptions_Priority) EnumDescriptor() ([]byte, []int) {
- return file_google_spanner_v1_spanner_proto_rawDescGZIP(), []int{8, 0}
-}
-
-// Indicates the type of replica.
-type DirectedReadOptions_ReplicaSelection_Type int32
-
-const (
- // Not specified.
- DirectedReadOptions_ReplicaSelection_TYPE_UNSPECIFIED DirectedReadOptions_ReplicaSelection_Type = 0
- // Read-write replicas support both reads and writes.
- DirectedReadOptions_ReplicaSelection_READ_WRITE DirectedReadOptions_ReplicaSelection_Type = 1
- // Read-only replicas only support reads (not writes).
- DirectedReadOptions_ReplicaSelection_READ_ONLY DirectedReadOptions_ReplicaSelection_Type = 2
-)
-
-// Enum value maps for DirectedReadOptions_ReplicaSelection_Type.
-var (
- DirectedReadOptions_ReplicaSelection_Type_name = map[int32]string{
- 0: "TYPE_UNSPECIFIED",
- 1: "READ_WRITE",
- 2: "READ_ONLY",
- }
- DirectedReadOptions_ReplicaSelection_Type_value = map[string]int32{
- "TYPE_UNSPECIFIED": 0,
- "READ_WRITE": 1,
- "READ_ONLY": 2,
- }
-)
-
-func (x DirectedReadOptions_ReplicaSelection_Type) Enum() *DirectedReadOptions_ReplicaSelection_Type {
- p := new(DirectedReadOptions_ReplicaSelection_Type)
- *p = x
- return p
-}
-
-func (x DirectedReadOptions_ReplicaSelection_Type) String() string {
- return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x))
-}
-
-func (DirectedReadOptions_ReplicaSelection_Type) Descriptor() protoreflect.EnumDescriptor {
- return file_google_spanner_v1_spanner_proto_enumTypes[1].Descriptor()
-}
-
-func (DirectedReadOptions_ReplicaSelection_Type) Type() protoreflect.EnumType {
- return &file_google_spanner_v1_spanner_proto_enumTypes[1]
-}
-
-func (x DirectedReadOptions_ReplicaSelection_Type) Number() protoreflect.EnumNumber {
- return protoreflect.EnumNumber(x)
-}
-
-// Deprecated: Use DirectedReadOptions_ReplicaSelection_Type.Descriptor instead.
-func (DirectedReadOptions_ReplicaSelection_Type) EnumDescriptor() ([]byte, []int) {
- return file_google_spanner_v1_spanner_proto_rawDescGZIP(), []int{9, 0, 0}
-}
-
-// Mode in which the statement must be processed.
-type ExecuteSqlRequest_QueryMode int32
-
-const (
- // The default mode. Only the statement results are returned.
- ExecuteSqlRequest_NORMAL ExecuteSqlRequest_QueryMode = 0
- // This mode returns only the query plan, without any results or
- // execution statistics information.
- ExecuteSqlRequest_PLAN ExecuteSqlRequest_QueryMode = 1
- // This mode returns both the query plan and the execution statistics along
- // with the results.
- ExecuteSqlRequest_PROFILE ExecuteSqlRequest_QueryMode = 2
-)
-
-// Enum value maps for ExecuteSqlRequest_QueryMode.
-var (
- ExecuteSqlRequest_QueryMode_name = map[int32]string{
- 0: "NORMAL",
- 1: "PLAN",
- 2: "PROFILE",
- }
- ExecuteSqlRequest_QueryMode_value = map[string]int32{
- "NORMAL": 0,
- "PLAN": 1,
- "PROFILE": 2,
- }
-)
-
-func (x ExecuteSqlRequest_QueryMode) Enum() *ExecuteSqlRequest_QueryMode {
- p := new(ExecuteSqlRequest_QueryMode)
- *p = x
- return p
-}
-
-func (x ExecuteSqlRequest_QueryMode) String() string {
- return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x))
-}
-
-func (ExecuteSqlRequest_QueryMode) Descriptor() protoreflect.EnumDescriptor {
- return file_google_spanner_v1_spanner_proto_enumTypes[2].Descriptor()
-}
-
-func (ExecuteSqlRequest_QueryMode) Type() protoreflect.EnumType {
- return &file_google_spanner_v1_spanner_proto_enumTypes[2]
-}
-
-func (x ExecuteSqlRequest_QueryMode) Number() protoreflect.EnumNumber {
- return protoreflect.EnumNumber(x)
-}
-
-// Deprecated: Use ExecuteSqlRequest_QueryMode.Descriptor instead.
-func (ExecuteSqlRequest_QueryMode) EnumDescriptor() ([]byte, []int) {
- return file_google_spanner_v1_spanner_proto_rawDescGZIP(), []int{10, 0}
-}
-
-// An option to control the order in which rows are returned from a read.
-type ReadRequest_OrderBy int32
-
-const (
- // Default value.
- //
- // ORDER_BY_UNSPECIFIED is equivalent to ORDER_BY_PRIMARY_KEY.
- ReadRequest_ORDER_BY_UNSPECIFIED ReadRequest_OrderBy = 0
- // Read rows are returned in primary key order.
- //
- // In the event that this option is used in conjunction with the
- // `partition_token` field, the API will return an `INVALID_ARGUMENT` error.
- ReadRequest_ORDER_BY_PRIMARY_KEY ReadRequest_OrderBy = 1
- // Read rows are returned in any order.
- ReadRequest_ORDER_BY_NO_ORDER ReadRequest_OrderBy = 2
-)
-
-// Enum value maps for ReadRequest_OrderBy.
-var (
- ReadRequest_OrderBy_name = map[int32]string{
- 0: "ORDER_BY_UNSPECIFIED",
- 1: "ORDER_BY_PRIMARY_KEY",
- 2: "ORDER_BY_NO_ORDER",
- }
- ReadRequest_OrderBy_value = map[string]int32{
- "ORDER_BY_UNSPECIFIED": 0,
- "ORDER_BY_PRIMARY_KEY": 1,
- "ORDER_BY_NO_ORDER": 2,
- }
-)
-
-func (x ReadRequest_OrderBy) Enum() *ReadRequest_OrderBy {
- p := new(ReadRequest_OrderBy)
- *p = x
- return p
-}
-
-func (x ReadRequest_OrderBy) String() string {
- return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x))
-}
-
-func (ReadRequest_OrderBy) Descriptor() protoreflect.EnumDescriptor {
- return file_google_spanner_v1_spanner_proto_enumTypes[3].Descriptor()
-}
-
-func (ReadRequest_OrderBy) Type() protoreflect.EnumType {
- return &file_google_spanner_v1_spanner_proto_enumTypes[3]
-}
-
-func (x ReadRequest_OrderBy) Number() protoreflect.EnumNumber {
- return protoreflect.EnumNumber(x)
-}
-
-// Deprecated: Use ReadRequest_OrderBy.Descriptor instead.
-func (ReadRequest_OrderBy) EnumDescriptor() ([]byte, []int) {
- return file_google_spanner_v1_spanner_proto_rawDescGZIP(), []int{18, 0}
-}
-
-// A lock hint mechanism for reads done within a transaction.
-type ReadRequest_LockHint int32
-
-const (
- // Default value.
- //
- // LOCK_HINT_UNSPECIFIED is equivalent to LOCK_HINT_SHARED.
- ReadRequest_LOCK_HINT_UNSPECIFIED ReadRequest_LockHint = 0
- // Acquire shared locks.
- //
- // By default when you perform a read as part of a read-write transaction,
- // Spanner acquires shared read locks, which allows other reads to still
- // access the data until your transaction is ready to commit. When your
- // transaction is committing and writes are being applied, the transaction
- // attempts to upgrade to an exclusive lock for any data you are writing.
- // For more information about locks, see [Lock
- // modes](https://cloud.google.com/spanner/docs/introspection/lock-statistics#explain-lock-modes).
- ReadRequest_LOCK_HINT_SHARED ReadRequest_LockHint = 1
- // Acquire exclusive locks.
- //
- // Requesting exclusive locks is beneficial if you observe high write
- // contention, which means you notice that multiple transactions are
- // concurrently trying to read and write to the same data, resulting in a
- // large number of aborts. This problem occurs when two transactions
- // initially acquire shared locks and then both try to upgrade to exclusive
- // locks at the same time. In this situation both transactions are waiting
- // for the other to give up their lock, resulting in a deadlocked situation.
- // Spanner is able to detect this occurring and force one of the
- // transactions to abort. However, this is a slow and expensive operation
- // and results in lower performance. In this case it makes sense to acquire
- // exclusive locks at the start of the transaction because then when
- // multiple transactions try to act on the same data, they automatically get
- // serialized. Each transaction waits its turn to acquire the lock and
- // avoids getting into deadlock situations.
- //
- // Because the exclusive lock hint is just a hint, it should not be
- // considered equivalent to a mutex. In other words, you should not use
- // Spanner exclusive locks as a mutual exclusion mechanism for the execution
- // of code outside of Spanner.
- //
- // **Note:** Request exclusive locks judiciously because they block others
- // from reading that data for the entire transaction, rather than just when
- // the writes are being performed. Unless you observe high write contention,
- // you should use the default of shared read locks so you don't prematurely
- // block other clients from reading the data that you're writing to.
- ReadRequest_LOCK_HINT_EXCLUSIVE ReadRequest_LockHint = 2
-)
-
-// Enum value maps for ReadRequest_LockHint.
-var (
- ReadRequest_LockHint_name = map[int32]string{
- 0: "LOCK_HINT_UNSPECIFIED",
- 1: "LOCK_HINT_SHARED",
- 2: "LOCK_HINT_EXCLUSIVE",
- }
- ReadRequest_LockHint_value = map[string]int32{
- "LOCK_HINT_UNSPECIFIED": 0,
- "LOCK_HINT_SHARED": 1,
- "LOCK_HINT_EXCLUSIVE": 2,
- }
-)
-
-func (x ReadRequest_LockHint) Enum() *ReadRequest_LockHint {
- p := new(ReadRequest_LockHint)
- *p = x
- return p
-}
-
-func (x ReadRequest_LockHint) String() string {
- return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x))
-}
-
-func (ReadRequest_LockHint) Descriptor() protoreflect.EnumDescriptor {
- return file_google_spanner_v1_spanner_proto_enumTypes[4].Descriptor()
-}
-
-func (ReadRequest_LockHint) Type() protoreflect.EnumType {
- return &file_google_spanner_v1_spanner_proto_enumTypes[4]
-}
-
-func (x ReadRequest_LockHint) Number() protoreflect.EnumNumber {
- return protoreflect.EnumNumber(x)
-}
-
-// Deprecated: Use ReadRequest_LockHint.Descriptor instead.
-func (ReadRequest_LockHint) EnumDescriptor() ([]byte, []int) {
- return file_google_spanner_v1_spanner_proto_rawDescGZIP(), []int{18, 1}
-}
-
-// The request for [CreateSession][google.spanner.v1.Spanner.CreateSession].
-type CreateSessionRequest struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
- // Required. The database in which the new session is created.
- Database string `protobuf:"bytes,1,opt,name=database,proto3" json:"database,omitempty"`
- // Required. The session to create.
- Session *Session `protobuf:"bytes,2,opt,name=session,proto3" json:"session,omitempty"`
-}
-
-func (x *CreateSessionRequest) Reset() {
- *x = CreateSessionRequest{}
- if protoimpl.UnsafeEnabled {
- mi := &file_google_spanner_v1_spanner_proto_msgTypes[0]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
-}
-
-func (x *CreateSessionRequest) String() string {
- return protoimpl.X.MessageStringOf(x)
-}
-
-func (*CreateSessionRequest) ProtoMessage() {}
-
-func (x *CreateSessionRequest) ProtoReflect() protoreflect.Message {
- mi := &file_google_spanner_v1_spanner_proto_msgTypes[0]
- if protoimpl.UnsafeEnabled && x != nil {
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- if ms.LoadMessageInfo() == nil {
- ms.StoreMessageInfo(mi)
- }
- return ms
- }
- return mi.MessageOf(x)
-}
-
-// Deprecated: Use CreateSessionRequest.ProtoReflect.Descriptor instead.
-func (*CreateSessionRequest) Descriptor() ([]byte, []int) {
- return file_google_spanner_v1_spanner_proto_rawDescGZIP(), []int{0}
-}
-
-func (x *CreateSessionRequest) GetDatabase() string {
- if x != nil {
- return x.Database
- }
- return ""
-}
-
-func (x *CreateSessionRequest) GetSession() *Session {
- if x != nil {
- return x.Session
- }
- return nil
-}
-
-// The request for
-// [BatchCreateSessions][google.spanner.v1.Spanner.BatchCreateSessions].
-type BatchCreateSessionsRequest struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
- // Required. The database in which the new sessions are created.
- Database string `protobuf:"bytes,1,opt,name=database,proto3" json:"database,omitempty"`
- // Parameters to be applied to each created session.
- SessionTemplate *Session `protobuf:"bytes,2,opt,name=session_template,json=sessionTemplate,proto3" json:"session_template,omitempty"`
- // Required. The number of sessions to be created in this batch call.
- // The API may return fewer than the requested number of sessions. If a
- // specific number of sessions are desired, the client can make additional
- // calls to BatchCreateSessions (adjusting
- // [session_count][google.spanner.v1.BatchCreateSessionsRequest.session_count]
- // as necessary).
- SessionCount int32 `protobuf:"varint,3,opt,name=session_count,json=sessionCount,proto3" json:"session_count,omitempty"`
-}
-
-func (x *BatchCreateSessionsRequest) Reset() {
- *x = BatchCreateSessionsRequest{}
- if protoimpl.UnsafeEnabled {
- mi := &file_google_spanner_v1_spanner_proto_msgTypes[1]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
-}
-
-func (x *BatchCreateSessionsRequest) String() string {
- return protoimpl.X.MessageStringOf(x)
-}
-
-func (*BatchCreateSessionsRequest) ProtoMessage() {}
-
-func (x *BatchCreateSessionsRequest) ProtoReflect() protoreflect.Message {
- mi := &file_google_spanner_v1_spanner_proto_msgTypes[1]
- if protoimpl.UnsafeEnabled && x != nil {
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- if ms.LoadMessageInfo() == nil {
- ms.StoreMessageInfo(mi)
- }
- return ms
- }
- return mi.MessageOf(x)
-}
-
-// Deprecated: Use BatchCreateSessionsRequest.ProtoReflect.Descriptor instead.
-func (*BatchCreateSessionsRequest) Descriptor() ([]byte, []int) {
- return file_google_spanner_v1_spanner_proto_rawDescGZIP(), []int{1}
-}
-
-func (x *BatchCreateSessionsRequest) GetDatabase() string {
- if x != nil {
- return x.Database
- }
- return ""
-}
-
-func (x *BatchCreateSessionsRequest) GetSessionTemplate() *Session {
- if x != nil {
- return x.SessionTemplate
- }
- return nil
-}
-
-func (x *BatchCreateSessionsRequest) GetSessionCount() int32 {
- if x != nil {
- return x.SessionCount
- }
- return 0
-}
-
-// The response for
-// [BatchCreateSessions][google.spanner.v1.Spanner.BatchCreateSessions].
-type BatchCreateSessionsResponse struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
- // The freshly created sessions.
- Session []*Session `protobuf:"bytes,1,rep,name=session,proto3" json:"session,omitempty"`
-}
-
-func (x *BatchCreateSessionsResponse) Reset() {
- *x = BatchCreateSessionsResponse{}
- if protoimpl.UnsafeEnabled {
- mi := &file_google_spanner_v1_spanner_proto_msgTypes[2]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
-}
-
-func (x *BatchCreateSessionsResponse) String() string {
- return protoimpl.X.MessageStringOf(x)
-}
-
-func (*BatchCreateSessionsResponse) ProtoMessage() {}
-
-func (x *BatchCreateSessionsResponse) ProtoReflect() protoreflect.Message {
- mi := &file_google_spanner_v1_spanner_proto_msgTypes[2]
- if protoimpl.UnsafeEnabled && x != nil {
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- if ms.LoadMessageInfo() == nil {
- ms.StoreMessageInfo(mi)
- }
- return ms
- }
- return mi.MessageOf(x)
-}
-
-// Deprecated: Use BatchCreateSessionsResponse.ProtoReflect.Descriptor instead.
-func (*BatchCreateSessionsResponse) Descriptor() ([]byte, []int) {
- return file_google_spanner_v1_spanner_proto_rawDescGZIP(), []int{2}
-}
-
-func (x *BatchCreateSessionsResponse) GetSession() []*Session {
- if x != nil {
- return x.Session
- }
- return nil
-}
-
-// A session in the Cloud Spanner API.
-type Session struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
- // Output only. The name of the session. This is always system-assigned.
- Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"`
- // The labels for the session.
- //
- // - Label keys must be between 1 and 63 characters long and must conform to
- // the following regular expression: `[a-z]([-a-z0-9]*[a-z0-9])?`.
- // - Label values must be between 0 and 63 characters long and must conform
- // to the regular expression `([a-z]([-a-z0-9]*[a-z0-9])?)?`.
- // - No more than 64 labels can be associated with a given session.
- //
- // See https://goo.gl/xmQnxf for more information on and examples of labels.
- Labels map[string]string `protobuf:"bytes,2,rep,name=labels,proto3" json:"labels,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"`
- // Output only. The timestamp when the session is created.
- CreateTime *timestamppb.Timestamp `protobuf:"bytes,3,opt,name=create_time,json=createTime,proto3" json:"create_time,omitempty"`
- // Output only. The approximate timestamp when the session is last used. It is
- // typically earlier than the actual last use time.
- ApproximateLastUseTime *timestamppb.Timestamp `protobuf:"bytes,4,opt,name=approximate_last_use_time,json=approximateLastUseTime,proto3" json:"approximate_last_use_time,omitempty"`
- // The database role which created this session.
- CreatorRole string `protobuf:"bytes,5,opt,name=creator_role,json=creatorRole,proto3" json:"creator_role,omitempty"`
- // Optional. If true, specifies a multiplexed session. A multiplexed session
- // may be used for multiple, concurrent read-only operations but can not be
- // used for read-write transactions, partitioned reads, or partitioned
- // queries. Multiplexed sessions can be created via
- // [CreateSession][google.spanner.v1.Spanner.CreateSession] but not via
- // [BatchCreateSessions][google.spanner.v1.Spanner.BatchCreateSessions].
- // Multiplexed sessions may not be deleted nor listed.
- Multiplexed bool `protobuf:"varint,6,opt,name=multiplexed,proto3" json:"multiplexed,omitempty"`
-}
-
-func (x *Session) Reset() {
- *x = Session{}
- if protoimpl.UnsafeEnabled {
- mi := &file_google_spanner_v1_spanner_proto_msgTypes[3]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
-}
-
-func (x *Session) String() string {
- return protoimpl.X.MessageStringOf(x)
-}
-
-func (*Session) ProtoMessage() {}
-
-func (x *Session) ProtoReflect() protoreflect.Message {
- mi := &file_google_spanner_v1_spanner_proto_msgTypes[3]
- if protoimpl.UnsafeEnabled && x != nil {
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- if ms.LoadMessageInfo() == nil {
- ms.StoreMessageInfo(mi)
- }
- return ms
- }
- return mi.MessageOf(x)
-}
-
-// Deprecated: Use Session.ProtoReflect.Descriptor instead.
-func (*Session) Descriptor() ([]byte, []int) {
- return file_google_spanner_v1_spanner_proto_rawDescGZIP(), []int{3}
-}
-
-func (x *Session) GetName() string {
- if x != nil {
- return x.Name
- }
- return ""
-}
-
-func (x *Session) GetLabels() map[string]string {
- if x != nil {
- return x.Labels
- }
- return nil
-}
-
-func (x *Session) GetCreateTime() *timestamppb.Timestamp {
- if x != nil {
- return x.CreateTime
- }
- return nil
-}
-
-func (x *Session) GetApproximateLastUseTime() *timestamppb.Timestamp {
- if x != nil {
- return x.ApproximateLastUseTime
- }
- return nil
-}
-
-func (x *Session) GetCreatorRole() string {
- if x != nil {
- return x.CreatorRole
- }
- return ""
-}
-
-func (x *Session) GetMultiplexed() bool {
- if x != nil {
- return x.Multiplexed
- }
- return false
-}
-
-// The request for [GetSession][google.spanner.v1.Spanner.GetSession].
-type GetSessionRequest struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
- // Required. The name of the session to retrieve.
- Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"`
-}
-
-func (x *GetSessionRequest) Reset() {
- *x = GetSessionRequest{}
- if protoimpl.UnsafeEnabled {
- mi := &file_google_spanner_v1_spanner_proto_msgTypes[4]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
-}
-
-func (x *GetSessionRequest) String() string {
- return protoimpl.X.MessageStringOf(x)
-}
-
-func (*GetSessionRequest) ProtoMessage() {}
-
-func (x *GetSessionRequest) ProtoReflect() protoreflect.Message {
- mi := &file_google_spanner_v1_spanner_proto_msgTypes[4]
- if protoimpl.UnsafeEnabled && x != nil {
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- if ms.LoadMessageInfo() == nil {
- ms.StoreMessageInfo(mi)
- }
- return ms
- }
- return mi.MessageOf(x)
-}
-
-// Deprecated: Use GetSessionRequest.ProtoReflect.Descriptor instead.
-func (*GetSessionRequest) Descriptor() ([]byte, []int) {
- return file_google_spanner_v1_spanner_proto_rawDescGZIP(), []int{4}
-}
-
-func (x *GetSessionRequest) GetName() string {
- if x != nil {
- return x.Name
- }
- return ""
-}
-
-// The request for [ListSessions][google.spanner.v1.Spanner.ListSessions].
-type ListSessionsRequest struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
- // Required. The database in which to list sessions.
- Database string `protobuf:"bytes,1,opt,name=database,proto3" json:"database,omitempty"`
- // Number of sessions to be returned in the response. If 0 or less, defaults
- // to the server's maximum allowed page size.
- PageSize int32 `protobuf:"varint,2,opt,name=page_size,json=pageSize,proto3" json:"page_size,omitempty"`
- // If non-empty, `page_token` should contain a
- // [next_page_token][google.spanner.v1.ListSessionsResponse.next_page_token]
- // from a previous
- // [ListSessionsResponse][google.spanner.v1.ListSessionsResponse].
- PageToken string `protobuf:"bytes,3,opt,name=page_token,json=pageToken,proto3" json:"page_token,omitempty"`
- // An expression for filtering the results of the request. Filter rules are
- // case insensitive. The fields eligible for filtering are:
- //
- // - `labels.key` where key is the name of a label
- //
- // Some examples of using filters are:
- //
- // - `labels.env:*` --> The session has the label "env".
- // - `labels.env:dev` --> The session has the label "env" and the value of
- // the label contains the string "dev".
- Filter string `protobuf:"bytes,4,opt,name=filter,proto3" json:"filter,omitempty"`
-}
-
-func (x *ListSessionsRequest) Reset() {
- *x = ListSessionsRequest{}
- if protoimpl.UnsafeEnabled {
- mi := &file_google_spanner_v1_spanner_proto_msgTypes[5]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
-}
-
-func (x *ListSessionsRequest) String() string {
- return protoimpl.X.MessageStringOf(x)
-}
-
-func (*ListSessionsRequest) ProtoMessage() {}
-
-func (x *ListSessionsRequest) ProtoReflect() protoreflect.Message {
- mi := &file_google_spanner_v1_spanner_proto_msgTypes[5]
- if protoimpl.UnsafeEnabled && x != nil {
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- if ms.LoadMessageInfo() == nil {
- ms.StoreMessageInfo(mi)
- }
- return ms
- }
- return mi.MessageOf(x)
-}
-
-// Deprecated: Use ListSessionsRequest.ProtoReflect.Descriptor instead.
-func (*ListSessionsRequest) Descriptor() ([]byte, []int) {
- return file_google_spanner_v1_spanner_proto_rawDescGZIP(), []int{5}
-}
-
-func (x *ListSessionsRequest) GetDatabase() string {
- if x != nil {
- return x.Database
- }
- return ""
-}
-
-func (x *ListSessionsRequest) GetPageSize() int32 {
- if x != nil {
- return x.PageSize
- }
- return 0
-}
-
-func (x *ListSessionsRequest) GetPageToken() string {
- if x != nil {
- return x.PageToken
- }
- return ""
-}
-
-func (x *ListSessionsRequest) GetFilter() string {
- if x != nil {
- return x.Filter
- }
- return ""
-}
-
-// The response for [ListSessions][google.spanner.v1.Spanner.ListSessions].
-type ListSessionsResponse struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
- // The list of requested sessions.
- Sessions []*Session `protobuf:"bytes,1,rep,name=sessions,proto3" json:"sessions,omitempty"`
- // `next_page_token` can be sent in a subsequent
- // [ListSessions][google.spanner.v1.Spanner.ListSessions] call to fetch more
- // of the matching sessions.
- NextPageToken string `protobuf:"bytes,2,opt,name=next_page_token,json=nextPageToken,proto3" json:"next_page_token,omitempty"`
-}
-
-func (x *ListSessionsResponse) Reset() {
- *x = ListSessionsResponse{}
- if protoimpl.UnsafeEnabled {
- mi := &file_google_spanner_v1_spanner_proto_msgTypes[6]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
-}
-
-func (x *ListSessionsResponse) String() string {
- return protoimpl.X.MessageStringOf(x)
-}
-
-func (*ListSessionsResponse) ProtoMessage() {}
-
-func (x *ListSessionsResponse) ProtoReflect() protoreflect.Message {
- mi := &file_google_spanner_v1_spanner_proto_msgTypes[6]
- if protoimpl.UnsafeEnabled && x != nil {
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- if ms.LoadMessageInfo() == nil {
- ms.StoreMessageInfo(mi)
- }
- return ms
- }
- return mi.MessageOf(x)
-}
-
-// Deprecated: Use ListSessionsResponse.ProtoReflect.Descriptor instead.
-func (*ListSessionsResponse) Descriptor() ([]byte, []int) {
- return file_google_spanner_v1_spanner_proto_rawDescGZIP(), []int{6}
-}
-
-func (x *ListSessionsResponse) GetSessions() []*Session {
- if x != nil {
- return x.Sessions
- }
- return nil
-}
-
-func (x *ListSessionsResponse) GetNextPageToken() string {
- if x != nil {
- return x.NextPageToken
- }
- return ""
-}
-
-// The request for [DeleteSession][google.spanner.v1.Spanner.DeleteSession].
-type DeleteSessionRequest struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
- // Required. The name of the session to delete.
- Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"`
-}
-
-func (x *DeleteSessionRequest) Reset() {
- *x = DeleteSessionRequest{}
- if protoimpl.UnsafeEnabled {
- mi := &file_google_spanner_v1_spanner_proto_msgTypes[7]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
-}
-
-func (x *DeleteSessionRequest) String() string {
- return protoimpl.X.MessageStringOf(x)
-}
-
-func (*DeleteSessionRequest) ProtoMessage() {}
-
-func (x *DeleteSessionRequest) ProtoReflect() protoreflect.Message {
- mi := &file_google_spanner_v1_spanner_proto_msgTypes[7]
- if protoimpl.UnsafeEnabled && x != nil {
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- if ms.LoadMessageInfo() == nil {
- ms.StoreMessageInfo(mi)
- }
- return ms
- }
- return mi.MessageOf(x)
-}
-
-// Deprecated: Use DeleteSessionRequest.ProtoReflect.Descriptor instead.
-func (*DeleteSessionRequest) Descriptor() ([]byte, []int) {
- return file_google_spanner_v1_spanner_proto_rawDescGZIP(), []int{7}
-}
-
-func (x *DeleteSessionRequest) GetName() string {
- if x != nil {
- return x.Name
- }
- return ""
-}
-
-// Common request options for various APIs.
-type RequestOptions struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
- // Priority for the request.
- Priority RequestOptions_Priority `protobuf:"varint,1,opt,name=priority,proto3,enum=google.spanner.v1.RequestOptions_Priority" json:"priority,omitempty"`
- // A per-request tag which can be applied to queries or reads, used for
- // statistics collection.
- // Both request_tag and transaction_tag can be specified for a read or query
- // that belongs to a transaction.
- // This field is ignored for requests where it's not applicable (e.g.
- // CommitRequest).
- // Legal characters for `request_tag` values are all printable characters
- // (ASCII 32 - 126) and the length of a request_tag is limited to 50
- // characters. Values that exceed this limit are truncated.
- // Any leading underscore (_) characters will be removed from the string.
- RequestTag string `protobuf:"bytes,2,opt,name=request_tag,json=requestTag,proto3" json:"request_tag,omitempty"`
- // A tag used for statistics collection about this transaction.
- // Both request_tag and transaction_tag can be specified for a read or query
- // that belongs to a transaction.
- // The value of transaction_tag should be the same for all requests belonging
- // to the same transaction.
- // If this request doesn't belong to any transaction, transaction_tag will be
- // ignored.
- // Legal characters for `transaction_tag` values are all printable characters
- // (ASCII 32 - 126) and the length of a transaction_tag is limited to 50
- // characters. Values that exceed this limit are truncated.
- // Any leading underscore (_) characters will be removed from the string.
- TransactionTag string `protobuf:"bytes,3,opt,name=transaction_tag,json=transactionTag,proto3" json:"transaction_tag,omitempty"`
-}
-
-func (x *RequestOptions) Reset() {
- *x = RequestOptions{}
- if protoimpl.UnsafeEnabled {
- mi := &file_google_spanner_v1_spanner_proto_msgTypes[8]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
-}
-
-func (x *RequestOptions) String() string {
- return protoimpl.X.MessageStringOf(x)
-}
-
-func (*RequestOptions) ProtoMessage() {}
-
-func (x *RequestOptions) ProtoReflect() protoreflect.Message {
- mi := &file_google_spanner_v1_spanner_proto_msgTypes[8]
- if protoimpl.UnsafeEnabled && x != nil {
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- if ms.LoadMessageInfo() == nil {
- ms.StoreMessageInfo(mi)
- }
- return ms
- }
- return mi.MessageOf(x)
-}
-
-// Deprecated: Use RequestOptions.ProtoReflect.Descriptor instead.
-func (*RequestOptions) Descriptor() ([]byte, []int) {
- return file_google_spanner_v1_spanner_proto_rawDescGZIP(), []int{8}
-}
-
-func (x *RequestOptions) GetPriority() RequestOptions_Priority {
- if x != nil {
- return x.Priority
- }
- return RequestOptions_PRIORITY_UNSPECIFIED
-}
-
-func (x *RequestOptions) GetRequestTag() string {
- if x != nil {
- return x.RequestTag
- }
- return ""
-}
-
-func (x *RequestOptions) GetTransactionTag() string {
- if x != nil {
- return x.TransactionTag
- }
- return ""
-}
-
-// The DirectedReadOptions can be used to indicate which replicas or regions
-// should be used for non-transactional reads or queries.
-//
-// DirectedReadOptions may only be specified for a read-only transaction,
-// otherwise the API will return an `INVALID_ARGUMENT` error.
-type DirectedReadOptions struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
- // Required. At most one of either include_replicas or exclude_replicas
- // should be present in the message.
- //
- // Types that are assignable to Replicas:
- //
- // *DirectedReadOptions_IncludeReplicas_
- // *DirectedReadOptions_ExcludeReplicas_
- Replicas isDirectedReadOptions_Replicas `protobuf_oneof:"replicas"`
-}
-
-func (x *DirectedReadOptions) Reset() {
- *x = DirectedReadOptions{}
- if protoimpl.UnsafeEnabled {
- mi := &file_google_spanner_v1_spanner_proto_msgTypes[9]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
-}
-
-func (x *DirectedReadOptions) String() string {
- return protoimpl.X.MessageStringOf(x)
-}
-
-func (*DirectedReadOptions) ProtoMessage() {}
-
-func (x *DirectedReadOptions) ProtoReflect() protoreflect.Message {
- mi := &file_google_spanner_v1_spanner_proto_msgTypes[9]
- if protoimpl.UnsafeEnabled && x != nil {
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- if ms.LoadMessageInfo() == nil {
- ms.StoreMessageInfo(mi)
- }
- return ms
- }
- return mi.MessageOf(x)
-}
-
-// Deprecated: Use DirectedReadOptions.ProtoReflect.Descriptor instead.
-func (*DirectedReadOptions) Descriptor() ([]byte, []int) {
- return file_google_spanner_v1_spanner_proto_rawDescGZIP(), []int{9}
-}
-
-func (m *DirectedReadOptions) GetReplicas() isDirectedReadOptions_Replicas {
- if m != nil {
- return m.Replicas
- }
- return nil
-}
-
-func (x *DirectedReadOptions) GetIncludeReplicas() *DirectedReadOptions_IncludeReplicas {
- if x, ok := x.GetReplicas().(*DirectedReadOptions_IncludeReplicas_); ok {
- return x.IncludeReplicas
- }
- return nil
-}
-
-func (x *DirectedReadOptions) GetExcludeReplicas() *DirectedReadOptions_ExcludeReplicas {
- if x, ok := x.GetReplicas().(*DirectedReadOptions_ExcludeReplicas_); ok {
- return x.ExcludeReplicas
- }
- return nil
-}
-
-type isDirectedReadOptions_Replicas interface {
- isDirectedReadOptions_Replicas()
-}
-
-type DirectedReadOptions_IncludeReplicas_ struct {
- // Include_replicas indicates the order of replicas (as they appear in
- // this list) to process the request. If auto_failover_disabled is set to
- // true and all replicas are exhausted without finding a healthy replica,
- // Spanner will wait for a replica in the list to become available, requests
- // may fail due to `DEADLINE_EXCEEDED` errors.
- IncludeReplicas *DirectedReadOptions_IncludeReplicas `protobuf:"bytes,1,opt,name=include_replicas,json=includeReplicas,proto3,oneof"`
-}
-
-type DirectedReadOptions_ExcludeReplicas_ struct {
- // Exclude_replicas indicates that specified replicas should be excluded
- // from serving requests. Spanner will not route requests to the replicas
- // in this list.
- ExcludeReplicas *DirectedReadOptions_ExcludeReplicas `protobuf:"bytes,2,opt,name=exclude_replicas,json=excludeReplicas,proto3,oneof"`
-}
-
-func (*DirectedReadOptions_IncludeReplicas_) isDirectedReadOptions_Replicas() {}
-
-func (*DirectedReadOptions_ExcludeReplicas_) isDirectedReadOptions_Replicas() {}
-
-// The request for [ExecuteSql][google.spanner.v1.Spanner.ExecuteSql] and
-// [ExecuteStreamingSql][google.spanner.v1.Spanner.ExecuteStreamingSql].
-type ExecuteSqlRequest struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
- // Required. The session in which the SQL query should be performed.
- Session string `protobuf:"bytes,1,opt,name=session,proto3" json:"session,omitempty"`
- // The transaction to use.
- //
- // For queries, if none is provided, the default is a temporary read-only
- // transaction with strong concurrency.
- //
- // Standard DML statements require a read-write transaction. To protect
- // against replays, single-use transactions are not supported. The caller
- // must either supply an existing transaction ID or begin a new transaction.
- //
- // Partitioned DML requires an existing Partitioned DML transaction ID.
- Transaction *TransactionSelector `protobuf:"bytes,2,opt,name=transaction,proto3" json:"transaction,omitempty"`
- // Required. The SQL string.
- Sql string `protobuf:"bytes,3,opt,name=sql,proto3" json:"sql,omitempty"`
- // Parameter names and values that bind to placeholders in the SQL string.
- //
- // A parameter placeholder consists of the `@` character followed by the
- // parameter name (for example, `@firstName`). Parameter names must conform
- // to the naming requirements of identifiers as specified at
- // https://cloud.google.com/spanner/docs/lexical#identifiers.
- //
- // Parameters can appear anywhere that a literal value is expected. The same
- // parameter name can be used more than once, for example:
- //
- // `"WHERE id > @msg_id AND id < @msg_id + 100"`
- //
- // It is an error to execute a SQL statement with unbound parameters.
- Params *structpb.Struct `protobuf:"bytes,4,opt,name=params,proto3" json:"params,omitempty"`
- // It is not always possible for Cloud Spanner to infer the right SQL type
- // from a JSON value. For example, values of type `BYTES` and values
- // of type `STRING` both appear in
- // [params][google.spanner.v1.ExecuteSqlRequest.params] as JSON strings.
- //
- // In these cases, `param_types` can be used to specify the exact
- // SQL type for some or all of the SQL statement parameters. See the
- // definition of [Type][google.spanner.v1.Type] for more information
- // about SQL types.
- ParamTypes map[string]*Type `protobuf:"bytes,5,rep,name=param_types,json=paramTypes,proto3" json:"param_types,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"`
- // If this request is resuming a previously interrupted SQL statement
- // execution, `resume_token` should be copied from the last
- // [PartialResultSet][google.spanner.v1.PartialResultSet] yielded before the
- // interruption. Doing this enables the new SQL statement execution to resume
- // where the last one left off. The rest of the request parameters must
- // exactly match the request that yielded this token.
- ResumeToken []byte `protobuf:"bytes,6,opt,name=resume_token,json=resumeToken,proto3" json:"resume_token,omitempty"`
- // Used to control the amount of debugging information returned in
- // [ResultSetStats][google.spanner.v1.ResultSetStats]. If
- // [partition_token][google.spanner.v1.ExecuteSqlRequest.partition_token] is
- // set, [query_mode][google.spanner.v1.ExecuteSqlRequest.query_mode] can only
- // be set to
- // [QueryMode.NORMAL][google.spanner.v1.ExecuteSqlRequest.QueryMode.NORMAL].
- QueryMode ExecuteSqlRequest_QueryMode `protobuf:"varint,7,opt,name=query_mode,json=queryMode,proto3,enum=google.spanner.v1.ExecuteSqlRequest_QueryMode" json:"query_mode,omitempty"`
- // If present, results will be restricted to the specified partition
- // previously created using PartitionQuery(). There must be an exact
- // match for the values of fields common to this message and the
- // PartitionQueryRequest message used to create this partition_token.
- PartitionToken []byte `protobuf:"bytes,8,opt,name=partition_token,json=partitionToken,proto3" json:"partition_token,omitempty"`
- // A per-transaction sequence number used to identify this request. This field
- // makes each request idempotent such that if the request is received multiple
- // times, at most one will succeed.
- //
- // The sequence number must be monotonically increasing within the
- // transaction. If a request arrives for the first time with an out-of-order
- // sequence number, the transaction may be aborted. Replays of previously
- // handled requests will yield the same response as the first execution.
- //
- // Required for DML statements. Ignored for queries.
- Seqno int64 `protobuf:"varint,9,opt,name=seqno,proto3" json:"seqno,omitempty"`
- // Query optimizer configuration to use for the given query.
- QueryOptions *ExecuteSqlRequest_QueryOptions `protobuf:"bytes,10,opt,name=query_options,json=queryOptions,proto3" json:"query_options,omitempty"`
- // Common options for this request.
- RequestOptions *RequestOptions `protobuf:"bytes,11,opt,name=request_options,json=requestOptions,proto3" json:"request_options,omitempty"`
- // Directed read options for this request.
- DirectedReadOptions *DirectedReadOptions `protobuf:"bytes,15,opt,name=directed_read_options,json=directedReadOptions,proto3" json:"directed_read_options,omitempty"`
- // If this is for a partitioned query and this field is set to `true`, the
- // request is executed with Spanner Data Boost independent compute resources.
- //
- // If the field is set to `true` but the request does not set
- // `partition_token`, the API returns an `INVALID_ARGUMENT` error.
- DataBoostEnabled bool `protobuf:"varint,16,opt,name=data_boost_enabled,json=dataBoostEnabled,proto3" json:"data_boost_enabled,omitempty"`
-}
-
-func (x *ExecuteSqlRequest) Reset() {
- *x = ExecuteSqlRequest{}
- if protoimpl.UnsafeEnabled {
- mi := &file_google_spanner_v1_spanner_proto_msgTypes[10]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
-}
-
-func (x *ExecuteSqlRequest) String() string {
- return protoimpl.X.MessageStringOf(x)
-}
-
-func (*ExecuteSqlRequest) ProtoMessage() {}
-
-func (x *ExecuteSqlRequest) ProtoReflect() protoreflect.Message {
- mi := &file_google_spanner_v1_spanner_proto_msgTypes[10]
- if protoimpl.UnsafeEnabled && x != nil {
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- if ms.LoadMessageInfo() == nil {
- ms.StoreMessageInfo(mi)
- }
- return ms
- }
- return mi.MessageOf(x)
-}
-
-// Deprecated: Use ExecuteSqlRequest.ProtoReflect.Descriptor instead.
-func (*ExecuteSqlRequest) Descriptor() ([]byte, []int) {
- return file_google_spanner_v1_spanner_proto_rawDescGZIP(), []int{10}
-}
-
-func (x *ExecuteSqlRequest) GetSession() string {
- if x != nil {
- return x.Session
- }
- return ""
-}
-
-func (x *ExecuteSqlRequest) GetTransaction() *TransactionSelector {
- if x != nil {
- return x.Transaction
- }
- return nil
-}
-
-func (x *ExecuteSqlRequest) GetSql() string {
- if x != nil {
- return x.Sql
- }
- return ""
-}
-
-func (x *ExecuteSqlRequest) GetParams() *structpb.Struct {
- if x != nil {
- return x.Params
- }
- return nil
-}
-
-func (x *ExecuteSqlRequest) GetParamTypes() map[string]*Type {
- if x != nil {
- return x.ParamTypes
- }
- return nil
-}
-
-func (x *ExecuteSqlRequest) GetResumeToken() []byte {
- if x != nil {
- return x.ResumeToken
- }
- return nil
-}
-
-func (x *ExecuteSqlRequest) GetQueryMode() ExecuteSqlRequest_QueryMode {
- if x != nil {
- return x.QueryMode
- }
- return ExecuteSqlRequest_NORMAL
-}
-
-func (x *ExecuteSqlRequest) GetPartitionToken() []byte {
- if x != nil {
- return x.PartitionToken
- }
- return nil
-}
-
-func (x *ExecuteSqlRequest) GetSeqno() int64 {
- if x != nil {
- return x.Seqno
- }
- return 0
-}
-
-func (x *ExecuteSqlRequest) GetQueryOptions() *ExecuteSqlRequest_QueryOptions {
- if x != nil {
- return x.QueryOptions
- }
- return nil
-}
-
-func (x *ExecuteSqlRequest) GetRequestOptions() *RequestOptions {
- if x != nil {
- return x.RequestOptions
- }
- return nil
-}
-
-func (x *ExecuteSqlRequest) GetDirectedReadOptions() *DirectedReadOptions {
- if x != nil {
- return x.DirectedReadOptions
- }
- return nil
-}
-
-func (x *ExecuteSqlRequest) GetDataBoostEnabled() bool {
- if x != nil {
- return x.DataBoostEnabled
- }
- return false
-}
-
-// The request for [ExecuteBatchDml][google.spanner.v1.Spanner.ExecuteBatchDml].
-type ExecuteBatchDmlRequest struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
- // Required. The session in which the DML statements should be performed.
- Session string `protobuf:"bytes,1,opt,name=session,proto3" json:"session,omitempty"`
- // Required. The transaction to use. Must be a read-write transaction.
- //
- // To protect against replays, single-use transactions are not supported. The
- // caller must either supply an existing transaction ID or begin a new
- // transaction.
- Transaction *TransactionSelector `protobuf:"bytes,2,opt,name=transaction,proto3" json:"transaction,omitempty"`
- // Required. The list of statements to execute in this batch. Statements are
- // executed serially, such that the effects of statement `i` are visible to
- // statement `i+1`. Each statement must be a DML statement. Execution stops at
- // the first failed statement; the remaining statements are not executed.
- //
- // Callers must provide at least one statement.
- Statements []*ExecuteBatchDmlRequest_Statement `protobuf:"bytes,3,rep,name=statements,proto3" json:"statements,omitempty"`
- // Required. A per-transaction sequence number used to identify this request.
- // This field makes each request idempotent such that if the request is
- // received multiple times, at most one will succeed.
- //
- // The sequence number must be monotonically increasing within the
- // transaction. If a request arrives for the first time with an out-of-order
- // sequence number, the transaction may be aborted. Replays of previously
- // handled requests will yield the same response as the first execution.
- Seqno int64 `protobuf:"varint,4,opt,name=seqno,proto3" json:"seqno,omitempty"`
- // Common options for this request.
- RequestOptions *RequestOptions `protobuf:"bytes,5,opt,name=request_options,json=requestOptions,proto3" json:"request_options,omitempty"`
-}
-
-func (x *ExecuteBatchDmlRequest) Reset() {
- *x = ExecuteBatchDmlRequest{}
- if protoimpl.UnsafeEnabled {
- mi := &file_google_spanner_v1_spanner_proto_msgTypes[11]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
-}
-
-func (x *ExecuteBatchDmlRequest) String() string {
- return protoimpl.X.MessageStringOf(x)
-}
-
-func (*ExecuteBatchDmlRequest) ProtoMessage() {}
-
-func (x *ExecuteBatchDmlRequest) ProtoReflect() protoreflect.Message {
- mi := &file_google_spanner_v1_spanner_proto_msgTypes[11]
- if protoimpl.UnsafeEnabled && x != nil {
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- if ms.LoadMessageInfo() == nil {
- ms.StoreMessageInfo(mi)
- }
- return ms
- }
- return mi.MessageOf(x)
-}
-
-// Deprecated: Use ExecuteBatchDmlRequest.ProtoReflect.Descriptor instead.
-func (*ExecuteBatchDmlRequest) Descriptor() ([]byte, []int) {
- return file_google_spanner_v1_spanner_proto_rawDescGZIP(), []int{11}
-}
-
-func (x *ExecuteBatchDmlRequest) GetSession() string {
- if x != nil {
- return x.Session
- }
- return ""
-}
-
-func (x *ExecuteBatchDmlRequest) GetTransaction() *TransactionSelector {
- if x != nil {
- return x.Transaction
- }
- return nil
-}
-
-func (x *ExecuteBatchDmlRequest) GetStatements() []*ExecuteBatchDmlRequest_Statement {
- if x != nil {
- return x.Statements
- }
- return nil
-}
-
-func (x *ExecuteBatchDmlRequest) GetSeqno() int64 {
- if x != nil {
- return x.Seqno
- }
- return 0
-}
-
-func (x *ExecuteBatchDmlRequest) GetRequestOptions() *RequestOptions {
- if x != nil {
- return x.RequestOptions
- }
- return nil
-}
-
-// The response for
-// [ExecuteBatchDml][google.spanner.v1.Spanner.ExecuteBatchDml]. Contains a list
-// of [ResultSet][google.spanner.v1.ResultSet] messages, one for each DML
-// statement that has successfully executed, in the same order as the statements
-// in the request. If a statement fails, the status in the response body
-// identifies the cause of the failure.
-//
-// To check for DML statements that failed, use the following approach:
-//
-// 1. Check the status in the response message. The
-// [google.rpc.Code][google.rpc.Code] enum
-//
-// value `OK` indicates that all statements were executed successfully.
-// 2. If the status was not `OK`, check the number of result sets in the
-// response. If the response contains `N`
-// [ResultSet][google.spanner.v1.ResultSet] messages, then statement `N+1` in
-// the request failed.
-//
-// Example 1:
-//
-// * Request: 5 DML statements, all executed successfully.
-// * Response: 5 [ResultSet][google.spanner.v1.ResultSet] messages, with the
-// status `OK`.
-//
-// Example 2:
-//
-// * Request: 5 DML statements. The third statement has a syntax error.
-// * Response: 2 [ResultSet][google.spanner.v1.ResultSet] messages, and a syntax
-// error (`INVALID_ARGUMENT`)
-//
-// status. The number of [ResultSet][google.spanner.v1.ResultSet] messages
-// indicates that the third statement failed, and the fourth and fifth
-// statements were not executed.
-type ExecuteBatchDmlResponse struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
- // One [ResultSet][google.spanner.v1.ResultSet] for each statement in the
- // request that ran successfully, in the same order as the statements in the
- // request. Each [ResultSet][google.spanner.v1.ResultSet] does not contain any
- // rows. The [ResultSetStats][google.spanner.v1.ResultSetStats] in each
- // [ResultSet][google.spanner.v1.ResultSet] contain the number of rows
- // modified by the statement.
- //
- // Only the first [ResultSet][google.spanner.v1.ResultSet] in the response
- // contains valid [ResultSetMetadata][google.spanner.v1.ResultSetMetadata].
- ResultSets []*ResultSet `protobuf:"bytes,1,rep,name=result_sets,json=resultSets,proto3" json:"result_sets,omitempty"`
- // If all DML statements are executed successfully, the status is `OK`.
- // Otherwise, the error status of the first failed statement.
- Status *status.Status `protobuf:"bytes,2,opt,name=status,proto3" json:"status,omitempty"`
-}
-
-func (x *ExecuteBatchDmlResponse) Reset() {
- *x = ExecuteBatchDmlResponse{}
- if protoimpl.UnsafeEnabled {
- mi := &file_google_spanner_v1_spanner_proto_msgTypes[12]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
-}
-
-func (x *ExecuteBatchDmlResponse) String() string {
- return protoimpl.X.MessageStringOf(x)
-}
-
-func (*ExecuteBatchDmlResponse) ProtoMessage() {}
-
-func (x *ExecuteBatchDmlResponse) ProtoReflect() protoreflect.Message {
- mi := &file_google_spanner_v1_spanner_proto_msgTypes[12]
- if protoimpl.UnsafeEnabled && x != nil {
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- if ms.LoadMessageInfo() == nil {
- ms.StoreMessageInfo(mi)
- }
- return ms
- }
- return mi.MessageOf(x)
-}
-
-// Deprecated: Use ExecuteBatchDmlResponse.ProtoReflect.Descriptor instead.
-func (*ExecuteBatchDmlResponse) Descriptor() ([]byte, []int) {
- return file_google_spanner_v1_spanner_proto_rawDescGZIP(), []int{12}
-}
-
-func (x *ExecuteBatchDmlResponse) GetResultSets() []*ResultSet {
- if x != nil {
- return x.ResultSets
- }
- return nil
-}
-
-func (x *ExecuteBatchDmlResponse) GetStatus() *status.Status {
- if x != nil {
- return x.Status
- }
- return nil
-}
-
-// Options for a PartitionQueryRequest and
-// PartitionReadRequest.
-type PartitionOptions struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
- // **Note:** This hint is currently ignored by PartitionQuery and
- // PartitionRead requests.
- //
- // The desired data size for each partition generated. The default for this
- // option is currently 1 GiB. This is only a hint. The actual size of each
- // partition may be smaller or larger than this size request.
- PartitionSizeBytes int64 `protobuf:"varint,1,opt,name=partition_size_bytes,json=partitionSizeBytes,proto3" json:"partition_size_bytes,omitempty"`
- // **Note:** This hint is currently ignored by PartitionQuery and
- // PartitionRead requests.
- //
- // The desired maximum number of partitions to return. For example, this may
- // be set to the number of workers available. The default for this option
- // is currently 10,000. The maximum value is currently 200,000. This is only
- // a hint. The actual number of partitions returned may be smaller or larger
- // than this maximum count request.
- MaxPartitions int64 `protobuf:"varint,2,opt,name=max_partitions,json=maxPartitions,proto3" json:"max_partitions,omitempty"`
-}
-
-func (x *PartitionOptions) Reset() {
- *x = PartitionOptions{}
- if protoimpl.UnsafeEnabled {
- mi := &file_google_spanner_v1_spanner_proto_msgTypes[13]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
-}
-
-func (x *PartitionOptions) String() string {
- return protoimpl.X.MessageStringOf(x)
-}
-
-func (*PartitionOptions) ProtoMessage() {}
-
-func (x *PartitionOptions) ProtoReflect() protoreflect.Message {
- mi := &file_google_spanner_v1_spanner_proto_msgTypes[13]
- if protoimpl.UnsafeEnabled && x != nil {
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- if ms.LoadMessageInfo() == nil {
- ms.StoreMessageInfo(mi)
- }
- return ms
- }
- return mi.MessageOf(x)
-}
-
-// Deprecated: Use PartitionOptions.ProtoReflect.Descriptor instead.
-func (*PartitionOptions) Descriptor() ([]byte, []int) {
- return file_google_spanner_v1_spanner_proto_rawDescGZIP(), []int{13}
-}
-
-func (x *PartitionOptions) GetPartitionSizeBytes() int64 {
- if x != nil {
- return x.PartitionSizeBytes
- }
- return 0
-}
-
-func (x *PartitionOptions) GetMaxPartitions() int64 {
- if x != nil {
- return x.MaxPartitions
- }
- return 0
-}
-
-// The request for [PartitionQuery][google.spanner.v1.Spanner.PartitionQuery]
-type PartitionQueryRequest struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
- // Required. The session used to create the partitions.
- Session string `protobuf:"bytes,1,opt,name=session,proto3" json:"session,omitempty"`
- // Read only snapshot transactions are supported, read/write and single use
- // transactions are not.
- Transaction *TransactionSelector `protobuf:"bytes,2,opt,name=transaction,proto3" json:"transaction,omitempty"`
- // Required. The query request to generate partitions for. The request will
- // fail if the query is not root partitionable. For a query to be root
- // partitionable, it needs to satisfy a few conditions. For example, if the
- // query execution plan contains a distributed union operator, then it must be
- // the first operator in the plan. For more information about other
- // conditions, see [Read data in
- // parallel](https://cloud.google.com/spanner/docs/reads#read_data_in_parallel).
- //
- // The query request must not contain DML commands, such as INSERT, UPDATE, or
- // DELETE. Use
- // [ExecuteStreamingSql][google.spanner.v1.Spanner.ExecuteStreamingSql] with a
- // PartitionedDml transaction for large, partition-friendly DML operations.
- Sql string `protobuf:"bytes,3,opt,name=sql,proto3" json:"sql,omitempty"`
- // Parameter names and values that bind to placeholders in the SQL string.
- //
- // A parameter placeholder consists of the `@` character followed by the
- // parameter name (for example, `@firstName`). Parameter names can contain
- // letters, numbers, and underscores.
- //
- // Parameters can appear anywhere that a literal value is expected. The same
- // parameter name can be used more than once, for example:
- //
- // `"WHERE id > @msg_id AND id < @msg_id + 100"`
- //
- // It is an error to execute a SQL statement with unbound parameters.
- Params *structpb.Struct `protobuf:"bytes,4,opt,name=params,proto3" json:"params,omitempty"`
- // It is not always possible for Cloud Spanner to infer the right SQL type
- // from a JSON value. For example, values of type `BYTES` and values
- // of type `STRING` both appear in
- // [params][google.spanner.v1.PartitionQueryRequest.params] as JSON strings.
- //
- // In these cases, `param_types` can be used to specify the exact
- // SQL type for some or all of the SQL query parameters. See the
- // definition of [Type][google.spanner.v1.Type] for more information
- // about SQL types.
- ParamTypes map[string]*Type `protobuf:"bytes,5,rep,name=param_types,json=paramTypes,proto3" json:"param_types,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"`
- // Additional options that affect how many partitions are created.
- PartitionOptions *PartitionOptions `protobuf:"bytes,6,opt,name=partition_options,json=partitionOptions,proto3" json:"partition_options,omitempty"`
-}
-
-func (x *PartitionQueryRequest) Reset() {
- *x = PartitionQueryRequest{}
- if protoimpl.UnsafeEnabled {
- mi := &file_google_spanner_v1_spanner_proto_msgTypes[14]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
-}
-
-func (x *PartitionQueryRequest) String() string {
- return protoimpl.X.MessageStringOf(x)
-}
-
-func (*PartitionQueryRequest) ProtoMessage() {}
-
-func (x *PartitionQueryRequest) ProtoReflect() protoreflect.Message {
- mi := &file_google_spanner_v1_spanner_proto_msgTypes[14]
- if protoimpl.UnsafeEnabled && x != nil {
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- if ms.LoadMessageInfo() == nil {
- ms.StoreMessageInfo(mi)
- }
- return ms
- }
- return mi.MessageOf(x)
-}
-
-// Deprecated: Use PartitionQueryRequest.ProtoReflect.Descriptor instead.
-func (*PartitionQueryRequest) Descriptor() ([]byte, []int) {
- return file_google_spanner_v1_spanner_proto_rawDescGZIP(), []int{14}
-}
-
-func (x *PartitionQueryRequest) GetSession() string {
- if x != nil {
- return x.Session
- }
- return ""
-}
-
-func (x *PartitionQueryRequest) GetTransaction() *TransactionSelector {
- if x != nil {
- return x.Transaction
- }
- return nil
-}
-
-func (x *PartitionQueryRequest) GetSql() string {
- if x != nil {
- return x.Sql
- }
- return ""
-}
-
-func (x *PartitionQueryRequest) GetParams() *structpb.Struct {
- if x != nil {
- return x.Params
- }
- return nil
-}
-
-func (x *PartitionQueryRequest) GetParamTypes() map[string]*Type {
- if x != nil {
- return x.ParamTypes
- }
- return nil
-}
-
-func (x *PartitionQueryRequest) GetPartitionOptions() *PartitionOptions {
- if x != nil {
- return x.PartitionOptions
- }
- return nil
-}
-
-// The request for [PartitionRead][google.spanner.v1.Spanner.PartitionRead]
-type PartitionReadRequest struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
- // Required. The session used to create the partitions.
- Session string `protobuf:"bytes,1,opt,name=session,proto3" json:"session,omitempty"`
- // Read only snapshot transactions are supported, read/write and single use
- // transactions are not.
- Transaction *TransactionSelector `protobuf:"bytes,2,opt,name=transaction,proto3" json:"transaction,omitempty"`
- // Required. The name of the table in the database to be read.
- Table string `protobuf:"bytes,3,opt,name=table,proto3" json:"table,omitempty"`
- // If non-empty, the name of an index on
- // [table][google.spanner.v1.PartitionReadRequest.table]. This index is used
- // instead of the table primary key when interpreting
- // [key_set][google.spanner.v1.PartitionReadRequest.key_set] and sorting
- // result rows. See [key_set][google.spanner.v1.PartitionReadRequest.key_set]
- // for further information.
- Index string `protobuf:"bytes,4,opt,name=index,proto3" json:"index,omitempty"`
- // The columns of [table][google.spanner.v1.PartitionReadRequest.table] to be
- // returned for each row matching this request.
- Columns []string `protobuf:"bytes,5,rep,name=columns,proto3" json:"columns,omitempty"`
- // Required. `key_set` identifies the rows to be yielded. `key_set` names the
- // primary keys of the rows in
- // [table][google.spanner.v1.PartitionReadRequest.table] to be yielded, unless
- // [index][google.spanner.v1.PartitionReadRequest.index] is present. If
- // [index][google.spanner.v1.PartitionReadRequest.index] is present, then
- // [key_set][google.spanner.v1.PartitionReadRequest.key_set] instead names
- // index keys in [index][google.spanner.v1.PartitionReadRequest.index].
- //
- // It is not an error for the `key_set` to name rows that do not
- // exist in the database. Read yields nothing for nonexistent rows.
- KeySet *KeySet `protobuf:"bytes,6,opt,name=key_set,json=keySet,proto3" json:"key_set,omitempty"`
- // Additional options that affect how many partitions are created.
- PartitionOptions *PartitionOptions `protobuf:"bytes,9,opt,name=partition_options,json=partitionOptions,proto3" json:"partition_options,omitempty"`
-}
-
-func (x *PartitionReadRequest) Reset() {
- *x = PartitionReadRequest{}
- if protoimpl.UnsafeEnabled {
- mi := &file_google_spanner_v1_spanner_proto_msgTypes[15]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
-}
-
-func (x *PartitionReadRequest) String() string {
- return protoimpl.X.MessageStringOf(x)
-}
-
-func (*PartitionReadRequest) ProtoMessage() {}
-
-func (x *PartitionReadRequest) ProtoReflect() protoreflect.Message {
- mi := &file_google_spanner_v1_spanner_proto_msgTypes[15]
- if protoimpl.UnsafeEnabled && x != nil {
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- if ms.LoadMessageInfo() == nil {
- ms.StoreMessageInfo(mi)
- }
- return ms
- }
- return mi.MessageOf(x)
-}
-
-// Deprecated: Use PartitionReadRequest.ProtoReflect.Descriptor instead.
-func (*PartitionReadRequest) Descriptor() ([]byte, []int) {
- return file_google_spanner_v1_spanner_proto_rawDescGZIP(), []int{15}
-}
-
-func (x *PartitionReadRequest) GetSession() string {
- if x != nil {
- return x.Session
- }
- return ""
-}
-
-func (x *PartitionReadRequest) GetTransaction() *TransactionSelector {
- if x != nil {
- return x.Transaction
- }
- return nil
-}
-
-func (x *PartitionReadRequest) GetTable() string {
- if x != nil {
- return x.Table
- }
- return ""
-}
-
-func (x *PartitionReadRequest) GetIndex() string {
- if x != nil {
- return x.Index
- }
- return ""
-}
-
-func (x *PartitionReadRequest) GetColumns() []string {
- if x != nil {
- return x.Columns
- }
- return nil
-}
-
-func (x *PartitionReadRequest) GetKeySet() *KeySet {
- if x != nil {
- return x.KeySet
- }
- return nil
-}
-
-func (x *PartitionReadRequest) GetPartitionOptions() *PartitionOptions {
- if x != nil {
- return x.PartitionOptions
- }
- return nil
-}
-
-// Information returned for each partition returned in a
-// PartitionResponse.
-type Partition struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
- // This token can be passed to Read, StreamingRead, ExecuteSql, or
- // ExecuteStreamingSql requests to restrict the results to those identified by
- // this partition token.
- PartitionToken []byte `protobuf:"bytes,1,opt,name=partition_token,json=partitionToken,proto3" json:"partition_token,omitempty"`
-}
-
-func (x *Partition) Reset() {
- *x = Partition{}
- if protoimpl.UnsafeEnabled {
- mi := &file_google_spanner_v1_spanner_proto_msgTypes[16]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
-}
-
-func (x *Partition) String() string {
- return protoimpl.X.MessageStringOf(x)
-}
-
-func (*Partition) ProtoMessage() {}
-
-func (x *Partition) ProtoReflect() protoreflect.Message {
- mi := &file_google_spanner_v1_spanner_proto_msgTypes[16]
- if protoimpl.UnsafeEnabled && x != nil {
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- if ms.LoadMessageInfo() == nil {
- ms.StoreMessageInfo(mi)
- }
- return ms
- }
- return mi.MessageOf(x)
-}
-
-// Deprecated: Use Partition.ProtoReflect.Descriptor instead.
-func (*Partition) Descriptor() ([]byte, []int) {
- return file_google_spanner_v1_spanner_proto_rawDescGZIP(), []int{16}
-}
-
-func (x *Partition) GetPartitionToken() []byte {
- if x != nil {
- return x.PartitionToken
- }
- return nil
-}
-
-// The response for [PartitionQuery][google.spanner.v1.Spanner.PartitionQuery]
-// or [PartitionRead][google.spanner.v1.Spanner.PartitionRead]
-type PartitionResponse struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
- // Partitions created by this request.
- Partitions []*Partition `protobuf:"bytes,1,rep,name=partitions,proto3" json:"partitions,omitempty"`
- // Transaction created by this request.
- Transaction *Transaction `protobuf:"bytes,2,opt,name=transaction,proto3" json:"transaction,omitempty"`
-}
-
-func (x *PartitionResponse) Reset() {
- *x = PartitionResponse{}
- if protoimpl.UnsafeEnabled {
- mi := &file_google_spanner_v1_spanner_proto_msgTypes[17]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
-}
-
-func (x *PartitionResponse) String() string {
- return protoimpl.X.MessageStringOf(x)
-}
-
-func (*PartitionResponse) ProtoMessage() {}
-
-func (x *PartitionResponse) ProtoReflect() protoreflect.Message {
- mi := &file_google_spanner_v1_spanner_proto_msgTypes[17]
- if protoimpl.UnsafeEnabled && x != nil {
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- if ms.LoadMessageInfo() == nil {
- ms.StoreMessageInfo(mi)
- }
- return ms
- }
- return mi.MessageOf(x)
-}
-
-// Deprecated: Use PartitionResponse.ProtoReflect.Descriptor instead.
-func (*PartitionResponse) Descriptor() ([]byte, []int) {
- return file_google_spanner_v1_spanner_proto_rawDescGZIP(), []int{17}
-}
-
-func (x *PartitionResponse) GetPartitions() []*Partition {
- if x != nil {
- return x.Partitions
- }
- return nil
-}
-
-func (x *PartitionResponse) GetTransaction() *Transaction {
- if x != nil {
- return x.Transaction
- }
- return nil
-}
-
-// The request for [Read][google.spanner.v1.Spanner.Read] and
-// [StreamingRead][google.spanner.v1.Spanner.StreamingRead].
-type ReadRequest struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
- // Required. The session in which the read should be performed.
- Session string `protobuf:"bytes,1,opt,name=session,proto3" json:"session,omitempty"`
- // The transaction to use. If none is provided, the default is a
- // temporary read-only transaction with strong concurrency.
- Transaction *TransactionSelector `protobuf:"bytes,2,opt,name=transaction,proto3" json:"transaction,omitempty"`
- // Required. The name of the table in the database to be read.
- Table string `protobuf:"bytes,3,opt,name=table,proto3" json:"table,omitempty"`
- // If non-empty, the name of an index on
- // [table][google.spanner.v1.ReadRequest.table]. This index is used instead of
- // the table primary key when interpreting
- // [key_set][google.spanner.v1.ReadRequest.key_set] and sorting result rows.
- // See [key_set][google.spanner.v1.ReadRequest.key_set] for further
- // information.
- Index string `protobuf:"bytes,4,opt,name=index,proto3" json:"index,omitempty"`
- // Required. The columns of [table][google.spanner.v1.ReadRequest.table] to be
- // returned for each row matching this request.
- Columns []string `protobuf:"bytes,5,rep,name=columns,proto3" json:"columns,omitempty"`
- // Required. `key_set` identifies the rows to be yielded. `key_set` names the
- // primary keys of the rows in [table][google.spanner.v1.ReadRequest.table] to
- // be yielded, unless [index][google.spanner.v1.ReadRequest.index] is present.
- // If [index][google.spanner.v1.ReadRequest.index] is present, then
- // [key_set][google.spanner.v1.ReadRequest.key_set] instead names index keys
- // in [index][google.spanner.v1.ReadRequest.index].
- //
- // If the [partition_token][google.spanner.v1.ReadRequest.partition_token]
- // field is empty, rows are yielded in table primary key order (if
- // [index][google.spanner.v1.ReadRequest.index] is empty) or index key order
- // (if [index][google.spanner.v1.ReadRequest.index] is non-empty). If the
- // [partition_token][google.spanner.v1.ReadRequest.partition_token] field is
- // not empty, rows will be yielded in an unspecified order.
- //
- // It is not an error for the `key_set` to name rows that do not
- // exist in the database. Read yields nothing for nonexistent rows.
- KeySet *KeySet `protobuf:"bytes,6,opt,name=key_set,json=keySet,proto3" json:"key_set,omitempty"`
- // If greater than zero, only the first `limit` rows are yielded. If `limit`
- // is zero, the default is no limit. A limit cannot be specified if
- // `partition_token` is set.
- Limit int64 `protobuf:"varint,8,opt,name=limit,proto3" json:"limit,omitempty"`
- // If this request is resuming a previously interrupted read,
- // `resume_token` should be copied from the last
- // [PartialResultSet][google.spanner.v1.PartialResultSet] yielded before the
- // interruption. Doing this enables the new read to resume where the last read
- // left off. The rest of the request parameters must exactly match the request
- // that yielded this token.
- ResumeToken []byte `protobuf:"bytes,9,opt,name=resume_token,json=resumeToken,proto3" json:"resume_token,omitempty"`
- // If present, results will be restricted to the specified partition
- // previously created using PartitionRead(). There must be an exact
- // match for the values of fields common to this message and the
- // PartitionReadRequest message used to create this partition_token.
- PartitionToken []byte `protobuf:"bytes,10,opt,name=partition_token,json=partitionToken,proto3" json:"partition_token,omitempty"`
- // Common options for this request.
- RequestOptions *RequestOptions `protobuf:"bytes,11,opt,name=request_options,json=requestOptions,proto3" json:"request_options,omitempty"`
- // Directed read options for this request.
- DirectedReadOptions *DirectedReadOptions `protobuf:"bytes,14,opt,name=directed_read_options,json=directedReadOptions,proto3" json:"directed_read_options,omitempty"`
- // If this is for a partitioned read and this field is set to `true`, the
- // request is executed with Spanner Data Boost independent compute resources.
- //
- // If the field is set to `true` but the request does not set
- // `partition_token`, the API returns an `INVALID_ARGUMENT` error.
- DataBoostEnabled bool `protobuf:"varint,15,opt,name=data_boost_enabled,json=dataBoostEnabled,proto3" json:"data_boost_enabled,omitempty"`
- // Optional. Order for the returned rows.
- //
- // By default, Spanner will return result rows in primary key order except for
- // PartitionRead requests. For applications that do not require rows to be
- // returned in primary key (`ORDER_BY_PRIMARY_KEY`) order, setting
- // `ORDER_BY_NO_ORDER` option allows Spanner to optimize row retrieval,
- // resulting in lower latencies in certain cases (e.g. bulk point lookups).
- OrderBy ReadRequest_OrderBy `protobuf:"varint,16,opt,name=order_by,json=orderBy,proto3,enum=google.spanner.v1.ReadRequest_OrderBy" json:"order_by,omitempty"`
- // Optional. Lock Hint for the request, it can only be used with read-write
- // transactions.
- LockHint ReadRequest_LockHint `protobuf:"varint,17,opt,name=lock_hint,json=lockHint,proto3,enum=google.spanner.v1.ReadRequest_LockHint" json:"lock_hint,omitempty"`
-}
-
-func (x *ReadRequest) Reset() {
- *x = ReadRequest{}
- if protoimpl.UnsafeEnabled {
- mi := &file_google_spanner_v1_spanner_proto_msgTypes[18]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
-}
-
-func (x *ReadRequest) String() string {
- return protoimpl.X.MessageStringOf(x)
-}
-
-func (*ReadRequest) ProtoMessage() {}
-
-func (x *ReadRequest) ProtoReflect() protoreflect.Message {
- mi := &file_google_spanner_v1_spanner_proto_msgTypes[18]
- if protoimpl.UnsafeEnabled && x != nil {
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- if ms.LoadMessageInfo() == nil {
- ms.StoreMessageInfo(mi)
- }
- return ms
- }
- return mi.MessageOf(x)
-}
-
-// Deprecated: Use ReadRequest.ProtoReflect.Descriptor instead.
-func (*ReadRequest) Descriptor() ([]byte, []int) {
- return file_google_spanner_v1_spanner_proto_rawDescGZIP(), []int{18}
-}
-
-func (x *ReadRequest) GetSession() string {
- if x != nil {
- return x.Session
- }
- return ""
-}
-
-func (x *ReadRequest) GetTransaction() *TransactionSelector {
- if x != nil {
- return x.Transaction
- }
- return nil
-}
-
-func (x *ReadRequest) GetTable() string {
- if x != nil {
- return x.Table
- }
- return ""
-}
-
-func (x *ReadRequest) GetIndex() string {
- if x != nil {
- return x.Index
- }
- return ""
-}
-
-func (x *ReadRequest) GetColumns() []string {
- if x != nil {
- return x.Columns
- }
- return nil
-}
-
-func (x *ReadRequest) GetKeySet() *KeySet {
- if x != nil {
- return x.KeySet
- }
- return nil
-}
-
-func (x *ReadRequest) GetLimit() int64 {
- if x != nil {
- return x.Limit
- }
- return 0
-}
-
-func (x *ReadRequest) GetResumeToken() []byte {
- if x != nil {
- return x.ResumeToken
- }
- return nil
-}
-
-func (x *ReadRequest) GetPartitionToken() []byte {
- if x != nil {
- return x.PartitionToken
- }
- return nil
-}
-
-func (x *ReadRequest) GetRequestOptions() *RequestOptions {
- if x != nil {
- return x.RequestOptions
- }
- return nil
-}
-
-func (x *ReadRequest) GetDirectedReadOptions() *DirectedReadOptions {
- if x != nil {
- return x.DirectedReadOptions
- }
- return nil
-}
-
-func (x *ReadRequest) GetDataBoostEnabled() bool {
- if x != nil {
- return x.DataBoostEnabled
- }
- return false
-}
-
-func (x *ReadRequest) GetOrderBy() ReadRequest_OrderBy {
- if x != nil {
- return x.OrderBy
- }
- return ReadRequest_ORDER_BY_UNSPECIFIED
-}
-
-func (x *ReadRequest) GetLockHint() ReadRequest_LockHint {
- if x != nil {
- return x.LockHint
- }
- return ReadRequest_LOCK_HINT_UNSPECIFIED
-}
-
-// The request for
-// [BeginTransaction][google.spanner.v1.Spanner.BeginTransaction].
-type BeginTransactionRequest struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
- // Required. The session in which the transaction runs.
- Session string `protobuf:"bytes,1,opt,name=session,proto3" json:"session,omitempty"`
- // Required. Options for the new transaction.
- Options *TransactionOptions `protobuf:"bytes,2,opt,name=options,proto3" json:"options,omitempty"`
- // Common options for this request.
- // Priority is ignored for this request. Setting the priority in this
- // request_options struct will not do anything. To set the priority for a
- // transaction, set it on the reads and writes that are part of this
- // transaction instead.
- RequestOptions *RequestOptions `protobuf:"bytes,3,opt,name=request_options,json=requestOptions,proto3" json:"request_options,omitempty"`
-}
-
-func (x *BeginTransactionRequest) Reset() {
- *x = BeginTransactionRequest{}
- if protoimpl.UnsafeEnabled {
- mi := &file_google_spanner_v1_spanner_proto_msgTypes[19]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
-}
-
-func (x *BeginTransactionRequest) String() string {
- return protoimpl.X.MessageStringOf(x)
-}
-
-func (*BeginTransactionRequest) ProtoMessage() {}
-
-func (x *BeginTransactionRequest) ProtoReflect() protoreflect.Message {
- mi := &file_google_spanner_v1_spanner_proto_msgTypes[19]
- if protoimpl.UnsafeEnabled && x != nil {
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- if ms.LoadMessageInfo() == nil {
- ms.StoreMessageInfo(mi)
- }
- return ms
- }
- return mi.MessageOf(x)
-}
-
-// Deprecated: Use BeginTransactionRequest.ProtoReflect.Descriptor instead.
-func (*BeginTransactionRequest) Descriptor() ([]byte, []int) {
- return file_google_spanner_v1_spanner_proto_rawDescGZIP(), []int{19}
-}
-
-func (x *BeginTransactionRequest) GetSession() string {
- if x != nil {
- return x.Session
- }
- return ""
-}
-
-func (x *BeginTransactionRequest) GetOptions() *TransactionOptions {
- if x != nil {
- return x.Options
- }
- return nil
-}
-
-func (x *BeginTransactionRequest) GetRequestOptions() *RequestOptions {
- if x != nil {
- return x.RequestOptions
- }
- return nil
-}
-
-// The request for [Commit][google.spanner.v1.Spanner.Commit].
-type CommitRequest struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
- // Required. The session in which the transaction to be committed is running.
- Session string `protobuf:"bytes,1,opt,name=session,proto3" json:"session,omitempty"`
- // Required. The transaction in which to commit.
- //
- // Types that are assignable to Transaction:
- //
- // *CommitRequest_TransactionId
- // *CommitRequest_SingleUseTransaction
- Transaction isCommitRequest_Transaction `protobuf_oneof:"transaction"`
- // The mutations to be executed when this transaction commits. All
- // mutations are applied atomically, in the order they appear in
- // this list.
- Mutations []*Mutation `protobuf:"bytes,4,rep,name=mutations,proto3" json:"mutations,omitempty"`
- // If `true`, then statistics related to the transaction will be included in
- // the [CommitResponse][google.spanner.v1.CommitResponse.commit_stats].
- // Default value is `false`.
- ReturnCommitStats bool `protobuf:"varint,5,opt,name=return_commit_stats,json=returnCommitStats,proto3" json:"return_commit_stats,omitempty"`
- // Optional. The amount of latency this request is willing to incur in order
- // to improve throughput. If this field is not set, Spanner assumes requests
- // are relatively latency sensitive and automatically determines an
- // appropriate delay time. You can specify a batching delay value between 0
- // and 500 ms.
- MaxCommitDelay *durationpb.Duration `protobuf:"bytes,8,opt,name=max_commit_delay,json=maxCommitDelay,proto3" json:"max_commit_delay,omitempty"`
- // Common options for this request.
- RequestOptions *RequestOptions `protobuf:"bytes,6,opt,name=request_options,json=requestOptions,proto3" json:"request_options,omitempty"`
-}
-
-func (x *CommitRequest) Reset() {
- *x = CommitRequest{}
- if protoimpl.UnsafeEnabled {
- mi := &file_google_spanner_v1_spanner_proto_msgTypes[20]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
-}
-
-func (x *CommitRequest) String() string {
- return protoimpl.X.MessageStringOf(x)
-}
-
-func (*CommitRequest) ProtoMessage() {}
-
-func (x *CommitRequest) ProtoReflect() protoreflect.Message {
- mi := &file_google_spanner_v1_spanner_proto_msgTypes[20]
- if protoimpl.UnsafeEnabled && x != nil {
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- if ms.LoadMessageInfo() == nil {
- ms.StoreMessageInfo(mi)
- }
- return ms
- }
- return mi.MessageOf(x)
-}
-
-// Deprecated: Use CommitRequest.ProtoReflect.Descriptor instead.
-func (*CommitRequest) Descriptor() ([]byte, []int) {
- return file_google_spanner_v1_spanner_proto_rawDescGZIP(), []int{20}
-}
-
-func (x *CommitRequest) GetSession() string {
- if x != nil {
- return x.Session
- }
- return ""
-}
-
-func (m *CommitRequest) GetTransaction() isCommitRequest_Transaction {
- if m != nil {
- return m.Transaction
- }
- return nil
-}
-
-func (x *CommitRequest) GetTransactionId() []byte {
- if x, ok := x.GetTransaction().(*CommitRequest_TransactionId); ok {
- return x.TransactionId
- }
- return nil
-}
-
-func (x *CommitRequest) GetSingleUseTransaction() *TransactionOptions {
- if x, ok := x.GetTransaction().(*CommitRequest_SingleUseTransaction); ok {
- return x.SingleUseTransaction
- }
- return nil
-}
-
-func (x *CommitRequest) GetMutations() []*Mutation {
- if x != nil {
- return x.Mutations
- }
- return nil
-}
-
-func (x *CommitRequest) GetReturnCommitStats() bool {
- if x != nil {
- return x.ReturnCommitStats
- }
- return false
-}
-
-func (x *CommitRequest) GetMaxCommitDelay() *durationpb.Duration {
- if x != nil {
- return x.MaxCommitDelay
- }
- return nil
-}
-
-func (x *CommitRequest) GetRequestOptions() *RequestOptions {
- if x != nil {
- return x.RequestOptions
- }
- return nil
-}
-
-type isCommitRequest_Transaction interface {
- isCommitRequest_Transaction()
-}
-
-type CommitRequest_TransactionId struct {
- // Commit a previously-started transaction.
- TransactionId []byte `protobuf:"bytes,2,opt,name=transaction_id,json=transactionId,proto3,oneof"`
-}
-
-type CommitRequest_SingleUseTransaction struct {
- // Execute mutations in a temporary transaction. Note that unlike
- // commit of a previously-started transaction, commit with a
- // temporary transaction is non-idempotent. That is, if the
- // `CommitRequest` is sent to Cloud Spanner more than once (for
- // instance, due to retries in the application, or in the
- // transport library), it is possible that the mutations are
- // executed more than once. If this is undesirable, use
- // [BeginTransaction][google.spanner.v1.Spanner.BeginTransaction] and
- // [Commit][google.spanner.v1.Spanner.Commit] instead.
- SingleUseTransaction *TransactionOptions `protobuf:"bytes,3,opt,name=single_use_transaction,json=singleUseTransaction,proto3,oneof"`
-}
-
-func (*CommitRequest_TransactionId) isCommitRequest_Transaction() {}
-
-func (*CommitRequest_SingleUseTransaction) isCommitRequest_Transaction() {}
-
-// The request for [Rollback][google.spanner.v1.Spanner.Rollback].
-type RollbackRequest struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
- // Required. The session in which the transaction to roll back is running.
- Session string `protobuf:"bytes,1,opt,name=session,proto3" json:"session,omitempty"`
- // Required. The transaction to roll back.
- TransactionId []byte `protobuf:"bytes,2,opt,name=transaction_id,json=transactionId,proto3" json:"transaction_id,omitempty"`
-}
-
-func (x *RollbackRequest) Reset() {
- *x = RollbackRequest{}
- if protoimpl.UnsafeEnabled {
- mi := &file_google_spanner_v1_spanner_proto_msgTypes[21]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
-}
-
-func (x *RollbackRequest) String() string {
- return protoimpl.X.MessageStringOf(x)
-}
-
-func (*RollbackRequest) ProtoMessage() {}
-
-func (x *RollbackRequest) ProtoReflect() protoreflect.Message {
- mi := &file_google_spanner_v1_spanner_proto_msgTypes[21]
- if protoimpl.UnsafeEnabled && x != nil {
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- if ms.LoadMessageInfo() == nil {
- ms.StoreMessageInfo(mi)
- }
- return ms
- }
- return mi.MessageOf(x)
-}
-
-// Deprecated: Use RollbackRequest.ProtoReflect.Descriptor instead.
-func (*RollbackRequest) Descriptor() ([]byte, []int) {
- return file_google_spanner_v1_spanner_proto_rawDescGZIP(), []int{21}
-}
-
-func (x *RollbackRequest) GetSession() string {
- if x != nil {
- return x.Session
- }
- return ""
-}
-
-func (x *RollbackRequest) GetTransactionId() []byte {
- if x != nil {
- return x.TransactionId
- }
- return nil
-}
-
-// The request for [BatchWrite][google.spanner.v1.Spanner.BatchWrite].
-type BatchWriteRequest struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
- // Required. The session in which the batch request is to be run.
- Session string `protobuf:"bytes,1,opt,name=session,proto3" json:"session,omitempty"`
- // Common options for this request.
- RequestOptions *RequestOptions `protobuf:"bytes,3,opt,name=request_options,json=requestOptions,proto3" json:"request_options,omitempty"`
- // Required. The groups of mutations to be applied.
- MutationGroups []*BatchWriteRequest_MutationGroup `protobuf:"bytes,4,rep,name=mutation_groups,json=mutationGroups,proto3" json:"mutation_groups,omitempty"`
- // Optional. When `exclude_txn_from_change_streams` is set to `true`:
- // - Mutations from all transactions in this batch write operation will not
- // be recorded in change streams with DDL option `allow_txn_exclusion=true`
- // that are tracking columns modified by these transactions.
- // - Mutations from all transactions in this batch write operation will be
- // recorded in change streams with DDL option `allow_txn_exclusion=false or
- // not set` that are tracking columns modified by these transactions.
- //
- // When `exclude_txn_from_change_streams` is set to `false` or not set,
- // mutations from all transactions in this batch write operation will be
- // recorded in all change streams that are tracking columns modified by these
- // transactions.
- ExcludeTxnFromChangeStreams bool `protobuf:"varint,5,opt,name=exclude_txn_from_change_streams,json=excludeTxnFromChangeStreams,proto3" json:"exclude_txn_from_change_streams,omitempty"`
-}
-
-func (x *BatchWriteRequest) Reset() {
- *x = BatchWriteRequest{}
- if protoimpl.UnsafeEnabled {
- mi := &file_google_spanner_v1_spanner_proto_msgTypes[22]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
-}
-
-func (x *BatchWriteRequest) String() string {
- return protoimpl.X.MessageStringOf(x)
-}
-
-func (*BatchWriteRequest) ProtoMessage() {}
-
-func (x *BatchWriteRequest) ProtoReflect() protoreflect.Message {
- mi := &file_google_spanner_v1_spanner_proto_msgTypes[22]
- if protoimpl.UnsafeEnabled && x != nil {
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- if ms.LoadMessageInfo() == nil {
- ms.StoreMessageInfo(mi)
- }
- return ms
- }
- return mi.MessageOf(x)
-}
-
-// Deprecated: Use BatchWriteRequest.ProtoReflect.Descriptor instead.
-func (*BatchWriteRequest) Descriptor() ([]byte, []int) {
- return file_google_spanner_v1_spanner_proto_rawDescGZIP(), []int{22}
-}
-
-func (x *BatchWriteRequest) GetSession() string {
- if x != nil {
- return x.Session
- }
- return ""
-}
-
-func (x *BatchWriteRequest) GetRequestOptions() *RequestOptions {
- if x != nil {
- return x.RequestOptions
- }
- return nil
-}
-
-func (x *BatchWriteRequest) GetMutationGroups() []*BatchWriteRequest_MutationGroup {
- if x != nil {
- return x.MutationGroups
- }
- return nil
-}
-
-func (x *BatchWriteRequest) GetExcludeTxnFromChangeStreams() bool {
- if x != nil {
- return x.ExcludeTxnFromChangeStreams
- }
- return false
-}
-
-// The result of applying a batch of mutations.
-type BatchWriteResponse struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
- // The mutation groups applied in this batch. The values index into the
- // `mutation_groups` field in the corresponding `BatchWriteRequest`.
- Indexes []int32 `protobuf:"varint,1,rep,packed,name=indexes,proto3" json:"indexes,omitempty"`
- // An `OK` status indicates success. Any other status indicates a failure.
- Status *status.Status `protobuf:"bytes,2,opt,name=status,proto3" json:"status,omitempty"`
- // The commit timestamp of the transaction that applied this batch.
- // Present if `status` is `OK`, absent otherwise.
- CommitTimestamp *timestamppb.Timestamp `protobuf:"bytes,3,opt,name=commit_timestamp,json=commitTimestamp,proto3" json:"commit_timestamp,omitempty"`
-}
-
-func (x *BatchWriteResponse) Reset() {
- *x = BatchWriteResponse{}
- if protoimpl.UnsafeEnabled {
- mi := &file_google_spanner_v1_spanner_proto_msgTypes[23]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
-}
-
-func (x *BatchWriteResponse) String() string {
- return protoimpl.X.MessageStringOf(x)
-}
-
-func (*BatchWriteResponse) ProtoMessage() {}
-
-func (x *BatchWriteResponse) ProtoReflect() protoreflect.Message {
- mi := &file_google_spanner_v1_spanner_proto_msgTypes[23]
- if protoimpl.UnsafeEnabled && x != nil {
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- if ms.LoadMessageInfo() == nil {
- ms.StoreMessageInfo(mi)
- }
- return ms
- }
- return mi.MessageOf(x)
-}
-
-// Deprecated: Use BatchWriteResponse.ProtoReflect.Descriptor instead.
-func (*BatchWriteResponse) Descriptor() ([]byte, []int) {
- return file_google_spanner_v1_spanner_proto_rawDescGZIP(), []int{23}
-}
-
-func (x *BatchWriteResponse) GetIndexes() []int32 {
- if x != nil {
- return x.Indexes
- }
- return nil
-}
-
-func (x *BatchWriteResponse) GetStatus() *status.Status {
- if x != nil {
- return x.Status
- }
- return nil
-}
-
-func (x *BatchWriteResponse) GetCommitTimestamp() *timestamppb.Timestamp {
- if x != nil {
- return x.CommitTimestamp
- }
- return nil
-}
-
-// The directed read replica selector.
-// Callers must provide one or more of the following fields for replica
-// selection:
-//
-// - `location` - The location must be one of the regions within the
-// multi-region configuration of your database.
-// - `type` - The type of the replica.
-//
-// Some examples of using replica_selectors are:
-//
-// - `location:us-east1` --> The "us-east1" replica(s) of any available type
-// will be used to process the request.
-// - `type:READ_ONLY` --> The "READ_ONLY" type replica(s) in nearest
-// available location will be used to process the
-// request.
-// - `location:us-east1 type:READ_ONLY` --> The "READ_ONLY" type replica(s)
-// in location "us-east1" will be used to process
-// the request.
-type DirectedReadOptions_ReplicaSelection struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
- // The location or region of the serving requests, e.g. "us-east1".
- Location string `protobuf:"bytes,1,opt,name=location,proto3" json:"location,omitempty"`
- // The type of replica.
- Type DirectedReadOptions_ReplicaSelection_Type `protobuf:"varint,2,opt,name=type,proto3,enum=google.spanner.v1.DirectedReadOptions_ReplicaSelection_Type" json:"type,omitempty"`
-}
-
-func (x *DirectedReadOptions_ReplicaSelection) Reset() {
- *x = DirectedReadOptions_ReplicaSelection{}
- if protoimpl.UnsafeEnabled {
- mi := &file_google_spanner_v1_spanner_proto_msgTypes[25]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
-}
-
-func (x *DirectedReadOptions_ReplicaSelection) String() string {
- return protoimpl.X.MessageStringOf(x)
-}
-
-func (*DirectedReadOptions_ReplicaSelection) ProtoMessage() {}
-
-func (x *DirectedReadOptions_ReplicaSelection) ProtoReflect() protoreflect.Message {
- mi := &file_google_spanner_v1_spanner_proto_msgTypes[25]
- if protoimpl.UnsafeEnabled && x != nil {
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- if ms.LoadMessageInfo() == nil {
- ms.StoreMessageInfo(mi)
- }
- return ms
- }
- return mi.MessageOf(x)
-}
-
-// Deprecated: Use DirectedReadOptions_ReplicaSelection.ProtoReflect.Descriptor instead.
-func (*DirectedReadOptions_ReplicaSelection) Descriptor() ([]byte, []int) {
- return file_google_spanner_v1_spanner_proto_rawDescGZIP(), []int{9, 0}
-}
-
-func (x *DirectedReadOptions_ReplicaSelection) GetLocation() string {
- if x != nil {
- return x.Location
- }
- return ""
-}
-
-func (x *DirectedReadOptions_ReplicaSelection) GetType() DirectedReadOptions_ReplicaSelection_Type {
- if x != nil {
- return x.Type
- }
- return DirectedReadOptions_ReplicaSelection_TYPE_UNSPECIFIED
-}
-
-// An IncludeReplicas contains a repeated set of ReplicaSelection which
-// indicates the order in which replicas should be considered.
-type DirectedReadOptions_IncludeReplicas struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
- // The directed read replica selector.
- ReplicaSelections []*DirectedReadOptions_ReplicaSelection `protobuf:"bytes,1,rep,name=replica_selections,json=replicaSelections,proto3" json:"replica_selections,omitempty"`
- // If true, Spanner will not route requests to a replica outside the
- // include_replicas list when all of the specified replicas are unavailable
- // or unhealthy. Default value is `false`.
- AutoFailoverDisabled bool `protobuf:"varint,2,opt,name=auto_failover_disabled,json=autoFailoverDisabled,proto3" json:"auto_failover_disabled,omitempty"`
-}
-
-func (x *DirectedReadOptions_IncludeReplicas) Reset() {
- *x = DirectedReadOptions_IncludeReplicas{}
- if protoimpl.UnsafeEnabled {
- mi := &file_google_spanner_v1_spanner_proto_msgTypes[26]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
-}
-
-func (x *DirectedReadOptions_IncludeReplicas) String() string {
- return protoimpl.X.MessageStringOf(x)
-}
-
-func (*DirectedReadOptions_IncludeReplicas) ProtoMessage() {}
-
-func (x *DirectedReadOptions_IncludeReplicas) ProtoReflect() protoreflect.Message {
- mi := &file_google_spanner_v1_spanner_proto_msgTypes[26]
- if protoimpl.UnsafeEnabled && x != nil {
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- if ms.LoadMessageInfo() == nil {
- ms.StoreMessageInfo(mi)
- }
- return ms
- }
- return mi.MessageOf(x)
-}
-
-// Deprecated: Use DirectedReadOptions_IncludeReplicas.ProtoReflect.Descriptor instead.
-func (*DirectedReadOptions_IncludeReplicas) Descriptor() ([]byte, []int) {
- return file_google_spanner_v1_spanner_proto_rawDescGZIP(), []int{9, 1}
-}
-
-func (x *DirectedReadOptions_IncludeReplicas) GetReplicaSelections() []*DirectedReadOptions_ReplicaSelection {
- if x != nil {
- return x.ReplicaSelections
- }
- return nil
-}
-
-func (x *DirectedReadOptions_IncludeReplicas) GetAutoFailoverDisabled() bool {
- if x != nil {
- return x.AutoFailoverDisabled
- }
- return false
-}
-
-// An ExcludeReplicas contains a repeated set of ReplicaSelection that should
-// be excluded from serving requests.
-type DirectedReadOptions_ExcludeReplicas struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
- // The directed read replica selector.
- ReplicaSelections []*DirectedReadOptions_ReplicaSelection `protobuf:"bytes,1,rep,name=replica_selections,json=replicaSelections,proto3" json:"replica_selections,omitempty"`
-}
-
-func (x *DirectedReadOptions_ExcludeReplicas) Reset() {
- *x = DirectedReadOptions_ExcludeReplicas{}
- if protoimpl.UnsafeEnabled {
- mi := &file_google_spanner_v1_spanner_proto_msgTypes[27]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
-}
-
-func (x *DirectedReadOptions_ExcludeReplicas) String() string {
- return protoimpl.X.MessageStringOf(x)
-}
-
-func (*DirectedReadOptions_ExcludeReplicas) ProtoMessage() {}
-
-func (x *DirectedReadOptions_ExcludeReplicas) ProtoReflect() protoreflect.Message {
- mi := &file_google_spanner_v1_spanner_proto_msgTypes[27]
- if protoimpl.UnsafeEnabled && x != nil {
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- if ms.LoadMessageInfo() == nil {
- ms.StoreMessageInfo(mi)
- }
- return ms
- }
- return mi.MessageOf(x)
-}
-
-// Deprecated: Use DirectedReadOptions_ExcludeReplicas.ProtoReflect.Descriptor instead.
-func (*DirectedReadOptions_ExcludeReplicas) Descriptor() ([]byte, []int) {
- return file_google_spanner_v1_spanner_proto_rawDescGZIP(), []int{9, 2}
-}
-
-func (x *DirectedReadOptions_ExcludeReplicas) GetReplicaSelections() []*DirectedReadOptions_ReplicaSelection {
- if x != nil {
- return x.ReplicaSelections
- }
- return nil
-}
-
-// Query optimizer configuration.
-type ExecuteSqlRequest_QueryOptions struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
- // An option to control the selection of optimizer version.
- //
- // This parameter allows individual queries to pick different query
- // optimizer versions.
- //
- // Specifying `latest` as a value instructs Cloud Spanner to use the
- // latest supported query optimizer version. If not specified, Cloud Spanner
- // uses the optimizer version set at the database level options. Any other
- // positive integer (from the list of supported optimizer versions)
- // overrides the default optimizer version for query execution.
- //
- // The list of supported optimizer versions can be queried from
- // SPANNER_SYS.SUPPORTED_OPTIMIZER_VERSIONS.
- //
- // Executing a SQL statement with an invalid optimizer version fails with
- // an `INVALID_ARGUMENT` error.
- //
- // See
- // https://cloud.google.com/spanner/docs/query-optimizer/manage-query-optimizer
- // for more information on managing the query optimizer.
- //
- // The `optimizer_version` statement hint has precedence over this setting.
- OptimizerVersion string `protobuf:"bytes,1,opt,name=optimizer_version,json=optimizerVersion,proto3" json:"optimizer_version,omitempty"`
- // An option to control the selection of optimizer statistics package.
- //
- // This parameter allows individual queries to use a different query
- // optimizer statistics package.
- //
- // Specifying `latest` as a value instructs Cloud Spanner to use the latest
- // generated statistics package. If not specified, Cloud Spanner uses
- // the statistics package set at the database level options, or the latest
- // package if the database option is not set.
- //
- // The statistics package requested by the query has to be exempt from
- // garbage collection. This can be achieved with the following DDL
- // statement:
- //
- // ```
- // ALTER STATISTICS <package_name> SET OPTIONS (allow_gc=false)
- // ```
- //
- // The list of available statistics packages can be queried from
- // `INFORMATION_SCHEMA.SPANNER_STATISTICS`.
- //
- // Executing a SQL statement with an invalid optimizer statistics package
- // or with a statistics package that allows garbage collection fails with
- // an `INVALID_ARGUMENT` error.
- OptimizerStatisticsPackage string `protobuf:"bytes,2,opt,name=optimizer_statistics_package,json=optimizerStatisticsPackage,proto3" json:"optimizer_statistics_package,omitempty"`
-}
-
-func (x *ExecuteSqlRequest_QueryOptions) Reset() {
- *x = ExecuteSqlRequest_QueryOptions{}
- if protoimpl.UnsafeEnabled {
- mi := &file_google_spanner_v1_spanner_proto_msgTypes[28]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
-}
-
-func (x *ExecuteSqlRequest_QueryOptions) String() string {
- return protoimpl.X.MessageStringOf(x)
-}
-
-func (*ExecuteSqlRequest_QueryOptions) ProtoMessage() {}
-
-func (x *ExecuteSqlRequest_QueryOptions) ProtoReflect() protoreflect.Message {
- mi := &file_google_spanner_v1_spanner_proto_msgTypes[28]
- if protoimpl.UnsafeEnabled && x != nil {
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- if ms.LoadMessageInfo() == nil {
- ms.StoreMessageInfo(mi)
- }
- return ms
- }
- return mi.MessageOf(x)
-}
-
-// Deprecated: Use ExecuteSqlRequest_QueryOptions.ProtoReflect.Descriptor instead.
-func (*ExecuteSqlRequest_QueryOptions) Descriptor() ([]byte, []int) {
- return file_google_spanner_v1_spanner_proto_rawDescGZIP(), []int{10, 0}
-}
-
-func (x *ExecuteSqlRequest_QueryOptions) GetOptimizerVersion() string {
- if x != nil {
- return x.OptimizerVersion
- }
- return ""
-}
-
-func (x *ExecuteSqlRequest_QueryOptions) GetOptimizerStatisticsPackage() string {
- if x != nil {
- return x.OptimizerStatisticsPackage
- }
- return ""
-}
-
-// A single DML statement.
-type ExecuteBatchDmlRequest_Statement struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
- // Required. The DML string.
- Sql string `protobuf:"bytes,1,opt,name=sql,proto3" json:"sql,omitempty"`
- // Parameter names and values that bind to placeholders in the DML string.
- //
- // A parameter placeholder consists of the `@` character followed by the
- // parameter name (for example, `@firstName`). Parameter names can contain
- // letters, numbers, and underscores.
- //
- // Parameters can appear anywhere that a literal value is expected. The
- // same parameter name can be used more than once, for example:
- //
- // `"WHERE id > @msg_id AND id < @msg_id + 100"`
- //
- // It is an error to execute a SQL statement with unbound parameters.
- Params *structpb.Struct `protobuf:"bytes,2,opt,name=params,proto3" json:"params,omitempty"`
- // It is not always possible for Cloud Spanner to infer the right SQL type
- // from a JSON value. For example, values of type `BYTES` and values
- // of type `STRING` both appear in
- // [params][google.spanner.v1.ExecuteBatchDmlRequest.Statement.params] as
- // JSON strings.
- //
- // In these cases, `param_types` can be used to specify the exact
- // SQL type for some or all of the SQL statement parameters. See the
- // definition of [Type][google.spanner.v1.Type] for more information
- // about SQL types.
- ParamTypes map[string]*Type `protobuf:"bytes,3,rep,name=param_types,json=paramTypes,proto3" json:"param_types,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"`
-}
-
-func (x *ExecuteBatchDmlRequest_Statement) Reset() {
- *x = ExecuteBatchDmlRequest_Statement{}
- if protoimpl.UnsafeEnabled {
- mi := &file_google_spanner_v1_spanner_proto_msgTypes[30]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
-}
-
-func (x *ExecuteBatchDmlRequest_Statement) String() string {
- return protoimpl.X.MessageStringOf(x)
-}
-
-func (*ExecuteBatchDmlRequest_Statement) ProtoMessage() {}
-
-func (x *ExecuteBatchDmlRequest_Statement) ProtoReflect() protoreflect.Message {
- mi := &file_google_spanner_v1_spanner_proto_msgTypes[30]
- if protoimpl.UnsafeEnabled && x != nil {
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- if ms.LoadMessageInfo() == nil {
- ms.StoreMessageInfo(mi)
- }
- return ms
- }
- return mi.MessageOf(x)
-}
-
-// Deprecated: Use ExecuteBatchDmlRequest_Statement.ProtoReflect.Descriptor instead.
-func (*ExecuteBatchDmlRequest_Statement) Descriptor() ([]byte, []int) {
- return file_google_spanner_v1_spanner_proto_rawDescGZIP(), []int{11, 0}
-}
-
-func (x *ExecuteBatchDmlRequest_Statement) GetSql() string {
- if x != nil {
- return x.Sql
- }
- return ""
-}
-
-func (x *ExecuteBatchDmlRequest_Statement) GetParams() *structpb.Struct {
- if x != nil {
- return x.Params
- }
- return nil
-}
-
-func (x *ExecuteBatchDmlRequest_Statement) GetParamTypes() map[string]*Type {
- if x != nil {
- return x.ParamTypes
- }
- return nil
-}
-
-// A group of mutations to be committed together. Related mutations should be
-// placed in a group. For example, two mutations inserting rows with the same
-// primary key prefix in both parent and child tables are related.
-type BatchWriteRequest_MutationGroup struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
- // Required. The mutations in this group.
- Mutations []*Mutation `protobuf:"bytes,1,rep,name=mutations,proto3" json:"mutations,omitempty"`
-}
-
-func (x *BatchWriteRequest_MutationGroup) Reset() {
- *x = BatchWriteRequest_MutationGroup{}
- if protoimpl.UnsafeEnabled {
- mi := &file_google_spanner_v1_spanner_proto_msgTypes[33]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
-}
-
-func (x *BatchWriteRequest_MutationGroup) String() string {
- return protoimpl.X.MessageStringOf(x)
-}
-
-func (*BatchWriteRequest_MutationGroup) ProtoMessage() {}
-
-func (x *BatchWriteRequest_MutationGroup) ProtoReflect() protoreflect.Message {
- mi := &file_google_spanner_v1_spanner_proto_msgTypes[33]
- if protoimpl.UnsafeEnabled && x != nil {
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- if ms.LoadMessageInfo() == nil {
- ms.StoreMessageInfo(mi)
- }
- return ms
- }
- return mi.MessageOf(x)
-}
-
-// Deprecated: Use BatchWriteRequest_MutationGroup.ProtoReflect.Descriptor instead.
-func (*BatchWriteRequest_MutationGroup) Descriptor() ([]byte, []int) {
- return file_google_spanner_v1_spanner_proto_rawDescGZIP(), []int{22, 0}
-}
-
-func (x *BatchWriteRequest_MutationGroup) GetMutations() []*Mutation {
- if x != nil {
- return x.Mutations
- }
- return nil
-}
-
-var File_google_spanner_v1_spanner_proto protoreflect.FileDescriptor
-
-var file_google_spanner_v1_spanner_proto_rawDesc = []byte{
- 0x0a, 0x1f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x73, 0x70, 0x61, 0x6e, 0x6e, 0x65, 0x72,
- 0x2f, 0x76, 0x31, 0x2f, 0x73, 0x70, 0x61, 0x6e, 0x6e, 0x65, 0x72, 0x2e, 0x70, 0x72, 0x6f, 0x74,
- 0x6f, 0x12, 0x11, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x70, 0x61, 0x6e, 0x6e, 0x65,
- 0x72, 0x2e, 0x76, 0x31, 0x1a, 0x27, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x73, 0x70, 0x61,
- 0x6e, 0x6e, 0x65, 0x72, 0x2f, 0x76, 0x31, 0x2f, 0x63, 0x6f, 0x6d, 0x6d, 0x69, 0x74, 0x5f, 0x72,
- 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1c, 0x67,
- 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61,
- 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x17, 0x67, 0x6f, 0x6f,
- 0x67, 0x6c, 0x65, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x2e, 0x70,
- 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x61, 0x70, 0x69,
- 0x2f, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x5f, 0x62, 0x65, 0x68, 0x61, 0x76, 0x69, 0x6f, 0x72, 0x2e,
- 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x19, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x61, 0x70,
- 0x69, 0x2f, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f,
- 0x1a, 0x1e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75,
- 0x66, 0x2f, 0x64, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f,
- 0x1a, 0x1b, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75,
- 0x66, 0x2f, 0x65, 0x6d, 0x70, 0x74, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1c, 0x67,
- 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x73,
- 0x74, 0x72, 0x75, 0x63, 0x74, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1f, 0x67, 0x6f, 0x6f,
- 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x74, 0x69, 0x6d,
- 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x17, 0x67, 0x6f,
- 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x72, 0x70, 0x63, 0x2f, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x2e,
- 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1c, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x73, 0x70,
- 0x61, 0x6e, 0x6e, 0x65, 0x72, 0x2f, 0x76, 0x31, 0x2f, 0x6b, 0x65, 0x79, 0x73, 0x2e, 0x70, 0x72,
- 0x6f, 0x74, 0x6f, 0x1a, 0x20, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x73, 0x70, 0x61, 0x6e,
- 0x6e, 0x65, 0x72, 0x2f, 0x76, 0x31, 0x2f, 0x6d, 0x75, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2e,
- 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x22, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x73, 0x70,
- 0x61, 0x6e, 0x6e, 0x65, 0x72, 0x2f, 0x76, 0x31, 0x2f, 0x72, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x5f,
- 0x73, 0x65, 0x74, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x23, 0x67, 0x6f, 0x6f, 0x67, 0x6c,
- 0x65, 0x2f, 0x73, 0x70, 0x61, 0x6e, 0x6e, 0x65, 0x72, 0x2f, 0x76, 0x31, 0x2f, 0x74, 0x72, 0x61,
- 0x6e, 0x73, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1c,
- 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x73, 0x70, 0x61, 0x6e, 0x6e, 0x65, 0x72, 0x2f, 0x76,
- 0x31, 0x2f, 0x74, 0x79, 0x70, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0x96, 0x01, 0x0a,
- 0x14, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x53, 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x65,
- 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x43, 0x0a, 0x08, 0x64, 0x61, 0x74, 0x61, 0x62, 0x61, 0x73,
- 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x27, 0xe0, 0x41, 0x02, 0xfa, 0x41, 0x21, 0x0a,
- 0x1f, 0x73, 0x70, 0x61, 0x6e, 0x6e, 0x65, 0x72, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61,
- 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x44, 0x61, 0x74, 0x61, 0x62, 0x61, 0x73, 0x65,
- 0x52, 0x08, 0x64, 0x61, 0x74, 0x61, 0x62, 0x61, 0x73, 0x65, 0x12, 0x39, 0x0a, 0x07, 0x73, 0x65,
- 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f,
- 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x70, 0x61, 0x6e, 0x6e, 0x65, 0x72, 0x2e, 0x76, 0x31, 0x2e,
- 0x53, 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x07, 0x73, 0x65,
- 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x22, 0xd2, 0x01, 0x0a, 0x1a, 0x42, 0x61, 0x74, 0x63, 0x68, 0x43,
- 0x72, 0x65, 0x61, 0x74, 0x65, 0x53, 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x65, 0x71,
- 0x75, 0x65, 0x73, 0x74, 0x12, 0x43, 0x0a, 0x08, 0x64, 0x61, 0x74, 0x61, 0x62, 0x61, 0x73, 0x65,
- 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x27, 0xe0, 0x41, 0x02, 0xfa, 0x41, 0x21, 0x0a, 0x1f,
- 0x73, 0x70, 0x61, 0x6e, 0x6e, 0x65, 0x72, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70,
- 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x44, 0x61, 0x74, 0x61, 0x62, 0x61, 0x73, 0x65, 0x52,
- 0x08, 0x64, 0x61, 0x74, 0x61, 0x62, 0x61, 0x73, 0x65, 0x12, 0x45, 0x0a, 0x10, 0x73, 0x65, 0x73,
- 0x73, 0x69, 0x6f, 0x6e, 0x5f, 0x74, 0x65, 0x6d, 0x70, 0x6c, 0x61, 0x74, 0x65, 0x18, 0x02, 0x20,
- 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x70, 0x61,
- 0x6e, 0x6e, 0x65, 0x72, 0x2e, 0x76, 0x31, 0x2e, 0x53, 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x52,
- 0x0f, 0x73, 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x54, 0x65, 0x6d, 0x70, 0x6c, 0x61, 0x74, 0x65,
- 0x12, 0x28, 0x0a, 0x0d, 0x73, 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x5f, 0x63, 0x6f, 0x75, 0x6e,
- 0x74, 0x18, 0x03, 0x20, 0x01, 0x28, 0x05, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x0c, 0x73, 0x65,
- 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x22, 0x53, 0x0a, 0x1b, 0x42, 0x61,
- 0x74, 0x63, 0x68, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x53, 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e,
- 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x34, 0x0a, 0x07, 0x73, 0x65, 0x73,
- 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f,
- 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x70, 0x61, 0x6e, 0x6e, 0x65, 0x72, 0x2e, 0x76, 0x31, 0x2e, 0x53,
- 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x07, 0x73, 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x22,
- 0xfb, 0x03, 0x0a, 0x07, 0x53, 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x17, 0x0a, 0x04, 0x6e,
- 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x03, 0x52, 0x04,
- 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x3e, 0x0a, 0x06, 0x6c, 0x61, 0x62, 0x65, 0x6c, 0x73, 0x18, 0x02,
- 0x20, 0x03, 0x28, 0x0b, 0x32, 0x26, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x70,
- 0x61, 0x6e, 0x6e, 0x65, 0x72, 0x2e, 0x76, 0x31, 0x2e, 0x53, 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e,
- 0x2e, 0x4c, 0x61, 0x62, 0x65, 0x6c, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x06, 0x6c, 0x61,
- 0x62, 0x65, 0x6c, 0x73, 0x12, 0x40, 0x0a, 0x0b, 0x63, 0x72, 0x65, 0x61, 0x74, 0x65, 0x5f, 0x74,
- 0x69, 0x6d, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67,
- 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65,
- 0x73, 0x74, 0x61, 0x6d, 0x70, 0x42, 0x03, 0xe0, 0x41, 0x03, 0x52, 0x0a, 0x63, 0x72, 0x65, 0x61,
- 0x74, 0x65, 0x54, 0x69, 0x6d, 0x65, 0x12, 0x5a, 0x0a, 0x19, 0x61, 0x70, 0x70, 0x72, 0x6f, 0x78,
- 0x69, 0x6d, 0x61, 0x74, 0x65, 0x5f, 0x6c, 0x61, 0x73, 0x74, 0x5f, 0x75, 0x73, 0x65, 0x5f, 0x74,
- 0x69, 0x6d, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67,
- 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65,
- 0x73, 0x74, 0x61, 0x6d, 0x70, 0x42, 0x03, 0xe0, 0x41, 0x03, 0x52, 0x16, 0x61, 0x70, 0x70, 0x72,
- 0x6f, 0x78, 0x69, 0x6d, 0x61, 0x74, 0x65, 0x4c, 0x61, 0x73, 0x74, 0x55, 0x73, 0x65, 0x54, 0x69,
- 0x6d, 0x65, 0x12, 0x21, 0x0a, 0x0c, 0x63, 0x72, 0x65, 0x61, 0x74, 0x6f, 0x72, 0x5f, 0x72, 0x6f,
- 0x6c, 0x65, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x63, 0x72, 0x65, 0x61, 0x74, 0x6f,
- 0x72, 0x52, 0x6f, 0x6c, 0x65, 0x12, 0x25, 0x0a, 0x0b, 0x6d, 0x75, 0x6c, 0x74, 0x69, 0x70, 0x6c,
- 0x65, 0x78, 0x65, 0x64, 0x18, 0x06, 0x20, 0x01, 0x28, 0x08, 0x42, 0x03, 0xe0, 0x41, 0x01, 0x52,
- 0x0b, 0x6d, 0x75, 0x6c, 0x74, 0x69, 0x70, 0x6c, 0x65, 0x78, 0x65, 0x64, 0x1a, 0x39, 0x0a, 0x0b,
- 0x4c, 0x61, 0x62, 0x65, 0x6c, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b,
- 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a,
- 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, 0x61,
- 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x3a, 0x74, 0xea, 0x41, 0x71, 0x0a, 0x1e, 0x73, 0x70,
- 0x61, 0x6e, 0x6e, 0x65, 0x72, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73,
- 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x53, 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x4f, 0x70, 0x72,
- 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x7b, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x7d,
- 0x2f, 0x69, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x73, 0x2f, 0x7b, 0x69, 0x6e, 0x73, 0x74,
- 0x61, 0x6e, 0x63, 0x65, 0x7d, 0x2f, 0x64, 0x61, 0x74, 0x61, 0x62, 0x61, 0x73, 0x65, 0x73, 0x2f,
- 0x7b, 0x64, 0x61, 0x74, 0x61, 0x62, 0x61, 0x73, 0x65, 0x7d, 0x2f, 0x73, 0x65, 0x73, 0x73, 0x69,
- 0x6f, 0x6e, 0x73, 0x2f, 0x7b, 0x73, 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x7d, 0x22, 0x4f, 0x0a,
- 0x11, 0x47, 0x65, 0x74, 0x53, 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65,
- 0x73, 0x74, 0x12, 0x3a, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09,
- 0x42, 0x26, 0xe0, 0x41, 0x02, 0xfa, 0x41, 0x20, 0x0a, 0x1e, 0x73, 0x70, 0x61, 0x6e, 0x6e, 0x65,
- 0x72, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d,
- 0x2f, 0x53, 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x22, 0xae,
- 0x01, 0x0a, 0x13, 0x4c, 0x69, 0x73, 0x74, 0x53, 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x52,
- 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x43, 0x0a, 0x08, 0x64, 0x61, 0x74, 0x61, 0x62, 0x61,
- 0x73, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x27, 0xe0, 0x41, 0x02, 0xfa, 0x41, 0x21,
- 0x0a, 0x1f, 0x73, 0x70, 0x61, 0x6e, 0x6e, 0x65, 0x72, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65,
- 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x44, 0x61, 0x74, 0x61, 0x62, 0x61, 0x73,
- 0x65, 0x52, 0x08, 0x64, 0x61, 0x74, 0x61, 0x62, 0x61, 0x73, 0x65, 0x12, 0x1b, 0x0a, 0x09, 0x70,
- 0x61, 0x67, 0x65, 0x5f, 0x73, 0x69, 0x7a, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x05, 0x52, 0x08,
- 0x70, 0x61, 0x67, 0x65, 0x53, 0x69, 0x7a, 0x65, 0x12, 0x1d, 0x0a, 0x0a, 0x70, 0x61, 0x67, 0x65,
- 0x5f, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x70, 0x61,
- 0x67, 0x65, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x12, 0x16, 0x0a, 0x06, 0x66, 0x69, 0x6c, 0x74, 0x65,
- 0x72, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x22,
- 0x76, 0x0a, 0x14, 0x4c, 0x69, 0x73, 0x74, 0x53, 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x52,
- 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x36, 0x0a, 0x08, 0x73, 0x65, 0x73, 0x73, 0x69,
- 0x6f, 0x6e, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67,
- 0x6c, 0x65, 0x2e, 0x73, 0x70, 0x61, 0x6e, 0x6e, 0x65, 0x72, 0x2e, 0x76, 0x31, 0x2e, 0x53, 0x65,
- 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x08, 0x73, 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x12,
- 0x26, 0x0a, 0x0f, 0x6e, 0x65, 0x78, 0x74, 0x5f, 0x70, 0x61, 0x67, 0x65, 0x5f, 0x74, 0x6f, 0x6b,
- 0x65, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0d, 0x6e, 0x65, 0x78, 0x74, 0x50, 0x61,
- 0x67, 0x65, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x22, 0x52, 0x0a, 0x14, 0x44, 0x65, 0x6c, 0x65, 0x74,
- 0x65, 0x53, 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12,
- 0x3a, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x26, 0xe0,
- 0x41, 0x02, 0xfa, 0x41, 0x20, 0x0a, 0x1e, 0x73, 0x70, 0x61, 0x6e, 0x6e, 0x65, 0x72, 0x2e, 0x67,
- 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x53, 0x65,
- 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x22, 0x82, 0x02, 0x0a, 0x0e,
- 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x46,
- 0x0a, 0x08, 0x70, 0x72, 0x69, 0x6f, 0x72, 0x69, 0x74, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0e,
- 0x32, 0x2a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x70, 0x61, 0x6e, 0x6e, 0x65,
- 0x72, 0x2e, 0x76, 0x31, 0x2e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x4f, 0x70, 0x74, 0x69,
- 0x6f, 0x6e, 0x73, 0x2e, 0x50, 0x72, 0x69, 0x6f, 0x72, 0x69, 0x74, 0x79, 0x52, 0x08, 0x70, 0x72,
- 0x69, 0x6f, 0x72, 0x69, 0x74, 0x79, 0x12, 0x1f, 0x0a, 0x0b, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73,
- 0x74, 0x5f, 0x74, 0x61, 0x67, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, 0x72, 0x65, 0x71,
- 0x75, 0x65, 0x73, 0x74, 0x54, 0x61, 0x67, 0x12, 0x27, 0x0a, 0x0f, 0x74, 0x72, 0x61, 0x6e, 0x73,
- 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x74, 0x61, 0x67, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09,
- 0x52, 0x0e, 0x74, 0x72, 0x61, 0x6e, 0x73, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x54, 0x61, 0x67,
- 0x22, 0x5e, 0x0a, 0x08, 0x50, 0x72, 0x69, 0x6f, 0x72, 0x69, 0x74, 0x79, 0x12, 0x18, 0x0a, 0x14,
- 0x50, 0x52, 0x49, 0x4f, 0x52, 0x49, 0x54, 0x59, 0x5f, 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49,
- 0x46, 0x49, 0x45, 0x44, 0x10, 0x00, 0x12, 0x10, 0x0a, 0x0c, 0x50, 0x52, 0x49, 0x4f, 0x52, 0x49,
- 0x54, 0x59, 0x5f, 0x4c, 0x4f, 0x57, 0x10, 0x01, 0x12, 0x13, 0x0a, 0x0f, 0x50, 0x52, 0x49, 0x4f,
- 0x52, 0x49, 0x54, 0x59, 0x5f, 0x4d, 0x45, 0x44, 0x49, 0x55, 0x4d, 0x10, 0x02, 0x12, 0x11, 0x0a,
- 0x0d, 0x50, 0x52, 0x49, 0x4f, 0x52, 0x49, 0x54, 0x59, 0x5f, 0x48, 0x49, 0x47, 0x48, 0x10, 0x03,
- 0x22, 0xd8, 0x05, 0x0a, 0x13, 0x44, 0x69, 0x72, 0x65, 0x63, 0x74, 0x65, 0x64, 0x52, 0x65, 0x61,
- 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x63, 0x0a, 0x10, 0x69, 0x6e, 0x63, 0x6c,
- 0x75, 0x64, 0x65, 0x5f, 0x72, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x73, 0x18, 0x01, 0x20, 0x01,
- 0x28, 0x0b, 0x32, 0x36, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x70, 0x61, 0x6e,
- 0x6e, 0x65, 0x72, 0x2e, 0x76, 0x31, 0x2e, 0x44, 0x69, 0x72, 0x65, 0x63, 0x74, 0x65, 0x64, 0x52,
- 0x65, 0x61, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x49, 0x6e, 0x63, 0x6c, 0x75,
- 0x64, 0x65, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x73, 0x48, 0x00, 0x52, 0x0f, 0x69, 0x6e,
- 0x63, 0x6c, 0x75, 0x64, 0x65, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x73, 0x12, 0x63, 0x0a,
- 0x10, 0x65, 0x78, 0x63, 0x6c, 0x75, 0x64, 0x65, 0x5f, 0x72, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61,
- 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x36, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65,
- 0x2e, 0x73, 0x70, 0x61, 0x6e, 0x6e, 0x65, 0x72, 0x2e, 0x76, 0x31, 0x2e, 0x44, 0x69, 0x72, 0x65,
- 0x63, 0x74, 0x65, 0x64, 0x52, 0x65, 0x61, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e,
- 0x45, 0x78, 0x63, 0x6c, 0x75, 0x64, 0x65, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x73, 0x48,
- 0x00, 0x52, 0x0f, 0x65, 0x78, 0x63, 0x6c, 0x75, 0x64, 0x65, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63,
- 0x61, 0x73, 0x1a, 0xbd, 0x01, 0x0a, 0x10, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x53, 0x65,
- 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x1a, 0x0a, 0x08, 0x6c, 0x6f, 0x63, 0x61, 0x74,
- 0x69, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x6c, 0x6f, 0x63, 0x61, 0x74,
- 0x69, 0x6f, 0x6e, 0x12, 0x50, 0x0a, 0x04, 0x74, 0x79, 0x70, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28,
- 0x0e, 0x32, 0x3c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x70, 0x61, 0x6e, 0x6e,
- 0x65, 0x72, 0x2e, 0x76, 0x31, 0x2e, 0x44, 0x69, 0x72, 0x65, 0x63, 0x74, 0x65, 0x64, 0x52, 0x65,
- 0x61, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63,
- 0x61, 0x53, 0x65, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x54, 0x79, 0x70, 0x65, 0x52,
- 0x04, 0x74, 0x79, 0x70, 0x65, 0x22, 0x3b, 0x0a, 0x04, 0x54, 0x79, 0x70, 0x65, 0x12, 0x14, 0x0a,
- 0x10, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, 0x49, 0x45,
- 0x44, 0x10, 0x00, 0x12, 0x0e, 0x0a, 0x0a, 0x52, 0x45, 0x41, 0x44, 0x5f, 0x57, 0x52, 0x49, 0x54,
- 0x45, 0x10, 0x01, 0x12, 0x0d, 0x0a, 0x09, 0x52, 0x45, 0x41, 0x44, 0x5f, 0x4f, 0x4e, 0x4c, 0x59,
- 0x10, 0x02, 0x1a, 0xaf, 0x01, 0x0a, 0x0f, 0x49, 0x6e, 0x63, 0x6c, 0x75, 0x64, 0x65, 0x52, 0x65,
- 0x70, 0x6c, 0x69, 0x63, 0x61, 0x73, 0x12, 0x66, 0x0a, 0x12, 0x72, 0x65, 0x70, 0x6c, 0x69, 0x63,
- 0x61, 0x5f, 0x73, 0x65, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x01, 0x20, 0x03,
- 0x28, 0x0b, 0x32, 0x37, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x70, 0x61, 0x6e,
- 0x6e, 0x65, 0x72, 0x2e, 0x76, 0x31, 0x2e, 0x44, 0x69, 0x72, 0x65, 0x63, 0x74, 0x65, 0x64, 0x52,
- 0x65, 0x61, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x52, 0x65, 0x70, 0x6c, 0x69,
- 0x63, 0x61, 0x53, 0x65, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x11, 0x72, 0x65, 0x70,
- 0x6c, 0x69, 0x63, 0x61, 0x53, 0x65, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x34,
- 0x0a, 0x16, 0x61, 0x75, 0x74, 0x6f, 0x5f, 0x66, 0x61, 0x69, 0x6c, 0x6f, 0x76, 0x65, 0x72, 0x5f,
- 0x64, 0x69, 0x73, 0x61, 0x62, 0x6c, 0x65, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x08, 0x52, 0x14,
- 0x61, 0x75, 0x74, 0x6f, 0x46, 0x61, 0x69, 0x6c, 0x6f, 0x76, 0x65, 0x72, 0x44, 0x69, 0x73, 0x61,
- 0x62, 0x6c, 0x65, 0x64, 0x1a, 0x79, 0x0a, 0x0f, 0x45, 0x78, 0x63, 0x6c, 0x75, 0x64, 0x65, 0x52,
- 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x73, 0x12, 0x66, 0x0a, 0x12, 0x72, 0x65, 0x70, 0x6c, 0x69,
- 0x63, 0x61, 0x5f, 0x73, 0x65, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x01, 0x20,
- 0x03, 0x28, 0x0b, 0x32, 0x37, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x70, 0x61,
- 0x6e, 0x6e, 0x65, 0x72, 0x2e, 0x76, 0x31, 0x2e, 0x44, 0x69, 0x72, 0x65, 0x63, 0x74, 0x65, 0x64,
- 0x52, 0x65, 0x61, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x52, 0x65, 0x70, 0x6c,
- 0x69, 0x63, 0x61, 0x53, 0x65, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x11, 0x72, 0x65,
- 0x70, 0x6c, 0x69, 0x63, 0x61, 0x53, 0x65, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x42,
- 0x0a, 0x0a, 0x08, 0x72, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x73, 0x22, 0xa4, 0x08, 0x0a, 0x11,
- 0x45, 0x78, 0x65, 0x63, 0x75, 0x74, 0x65, 0x53, 0x71, 0x6c, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73,
- 0x74, 0x12, 0x40, 0x0a, 0x07, 0x73, 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01,
- 0x28, 0x09, 0x42, 0x26, 0xe0, 0x41, 0x02, 0xfa, 0x41, 0x20, 0x0a, 0x1e, 0x73, 0x70, 0x61, 0x6e,
- 0x6e, 0x65, 0x72, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63,
- 0x6f, 0x6d, 0x2f, 0x53, 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x07, 0x73, 0x65, 0x73, 0x73,
- 0x69, 0x6f, 0x6e, 0x12, 0x48, 0x0a, 0x0b, 0x74, 0x72, 0x61, 0x6e, 0x73, 0x61, 0x63, 0x74, 0x69,
- 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x26, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c,
- 0x65, 0x2e, 0x73, 0x70, 0x61, 0x6e, 0x6e, 0x65, 0x72, 0x2e, 0x76, 0x31, 0x2e, 0x54, 0x72, 0x61,
- 0x6e, 0x73, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x53, 0x65, 0x6c, 0x65, 0x63, 0x74, 0x6f, 0x72,
- 0x52, 0x0b, 0x74, 0x72, 0x61, 0x6e, 0x73, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x15, 0x0a,
- 0x03, 0x73, 0x71, 0x6c, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52,
- 0x03, 0x73, 0x71, 0x6c, 0x12, 0x2f, 0x0a, 0x06, 0x70, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x18, 0x04,
- 0x20, 0x01, 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72,
- 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x53, 0x74, 0x72, 0x75, 0x63, 0x74, 0x52, 0x06, 0x70,
- 0x61, 0x72, 0x61, 0x6d, 0x73, 0x12, 0x55, 0x0a, 0x0b, 0x70, 0x61, 0x72, 0x61, 0x6d, 0x5f, 0x74,
- 0x79, 0x70, 0x65, 0x73, 0x18, 0x05, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x34, 0x2e, 0x67, 0x6f, 0x6f,
- 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x70, 0x61, 0x6e, 0x6e, 0x65, 0x72, 0x2e, 0x76, 0x31, 0x2e, 0x45,
- 0x78, 0x65, 0x63, 0x75, 0x74, 0x65, 0x53, 0x71, 0x6c, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74,
- 0x2e, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x54, 0x79, 0x70, 0x65, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79,
- 0x52, 0x0a, 0x70, 0x61, 0x72, 0x61, 0x6d, 0x54, 0x79, 0x70, 0x65, 0x73, 0x12, 0x21, 0x0a, 0x0c,
- 0x72, 0x65, 0x73, 0x75, 0x6d, 0x65, 0x5f, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x18, 0x06, 0x20, 0x01,
- 0x28, 0x0c, 0x52, 0x0b, 0x72, 0x65, 0x73, 0x75, 0x6d, 0x65, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x12,
- 0x4d, 0x0a, 0x0a, 0x71, 0x75, 0x65, 0x72, 0x79, 0x5f, 0x6d, 0x6f, 0x64, 0x65, 0x18, 0x07, 0x20,
- 0x01, 0x28, 0x0e, 0x32, 0x2e, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x70, 0x61,
- 0x6e, 0x6e, 0x65, 0x72, 0x2e, 0x76, 0x31, 0x2e, 0x45, 0x78, 0x65, 0x63, 0x75, 0x74, 0x65, 0x53,
- 0x71, 0x6c, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x2e, 0x51, 0x75, 0x65, 0x72, 0x79, 0x4d,
- 0x6f, 0x64, 0x65, 0x52, 0x09, 0x71, 0x75, 0x65, 0x72, 0x79, 0x4d, 0x6f, 0x64, 0x65, 0x12, 0x27,
- 0x0a, 0x0f, 0x70, 0x61, 0x72, 0x74, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x74, 0x6f, 0x6b, 0x65,
- 0x6e, 0x18, 0x08, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x0e, 0x70, 0x61, 0x72, 0x74, 0x69, 0x74, 0x69,
- 0x6f, 0x6e, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x12, 0x14, 0x0a, 0x05, 0x73, 0x65, 0x71, 0x6e, 0x6f,
- 0x18, 0x09, 0x20, 0x01, 0x28, 0x03, 0x52, 0x05, 0x73, 0x65, 0x71, 0x6e, 0x6f, 0x12, 0x56, 0x0a,
- 0x0d, 0x71, 0x75, 0x65, 0x72, 0x79, 0x5f, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x0a,
- 0x20, 0x01, 0x28, 0x0b, 0x32, 0x31, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x70,
- 0x61, 0x6e, 0x6e, 0x65, 0x72, 0x2e, 0x76, 0x31, 0x2e, 0x45, 0x78, 0x65, 0x63, 0x75, 0x74, 0x65,
- 0x53, 0x71, 0x6c, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x2e, 0x51, 0x75, 0x65, 0x72, 0x79,
- 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x0c, 0x71, 0x75, 0x65, 0x72, 0x79, 0x4f, 0x70,
- 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x4a, 0x0a, 0x0f, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74,
- 0x5f, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x0b, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x21,
- 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x70, 0x61, 0x6e, 0x6e, 0x65, 0x72, 0x2e,
- 0x76, 0x31, 0x2e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e,
- 0x73, 0x52, 0x0e, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e,
- 0x73, 0x12, 0x5a, 0x0a, 0x15, 0x64, 0x69, 0x72, 0x65, 0x63, 0x74, 0x65, 0x64, 0x5f, 0x72, 0x65,
- 0x61, 0x64, 0x5f, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x0f, 0x20, 0x01, 0x28, 0x0b,
- 0x32, 0x26, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x70, 0x61, 0x6e, 0x6e, 0x65,
- 0x72, 0x2e, 0x76, 0x31, 0x2e, 0x44, 0x69, 0x72, 0x65, 0x63, 0x74, 0x65, 0x64, 0x52, 0x65, 0x61,
- 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x13, 0x64, 0x69, 0x72, 0x65, 0x63, 0x74,
- 0x65, 0x64, 0x52, 0x65, 0x61, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x2c, 0x0a,
- 0x12, 0x64, 0x61, 0x74, 0x61, 0x5f, 0x62, 0x6f, 0x6f, 0x73, 0x74, 0x5f, 0x65, 0x6e, 0x61, 0x62,
- 0x6c, 0x65, 0x64, 0x18, 0x10, 0x20, 0x01, 0x28, 0x08, 0x52, 0x10, 0x64, 0x61, 0x74, 0x61, 0x42,
- 0x6f, 0x6f, 0x73, 0x74, 0x45, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x64, 0x1a, 0x7d, 0x0a, 0x0c, 0x51,
- 0x75, 0x65, 0x72, 0x79, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x2b, 0x0a, 0x11, 0x6f,
- 0x70, 0x74, 0x69, 0x6d, 0x69, 0x7a, 0x65, 0x72, 0x5f, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e,
- 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x10, 0x6f, 0x70, 0x74, 0x69, 0x6d, 0x69, 0x7a, 0x65,
- 0x72, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x40, 0x0a, 0x1c, 0x6f, 0x70, 0x74, 0x69,
- 0x6d, 0x69, 0x7a, 0x65, 0x72, 0x5f, 0x73, 0x74, 0x61, 0x74, 0x69, 0x73, 0x74, 0x69, 0x63, 0x73,
- 0x5f, 0x70, 0x61, 0x63, 0x6b, 0x61, 0x67, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x1a,
- 0x6f, 0x70, 0x74, 0x69, 0x6d, 0x69, 0x7a, 0x65, 0x72, 0x53, 0x74, 0x61, 0x74, 0x69, 0x73, 0x74,
- 0x69, 0x63, 0x73, 0x50, 0x61, 0x63, 0x6b, 0x61, 0x67, 0x65, 0x1a, 0x56, 0x0a, 0x0f, 0x50, 0x61,
- 0x72, 0x61, 0x6d, 0x54, 0x79, 0x70, 0x65, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a,
- 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12,
- 0x2d, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x17,
- 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x70, 0x61, 0x6e, 0x6e, 0x65, 0x72, 0x2e,
- 0x76, 0x31, 0x2e, 0x54, 0x79, 0x70, 0x65, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02,
- 0x38, 0x01, 0x22, 0x2e, 0x0a, 0x09, 0x51, 0x75, 0x65, 0x72, 0x79, 0x4d, 0x6f, 0x64, 0x65, 0x12,
- 0x0a, 0x0a, 0x06, 0x4e, 0x4f, 0x52, 0x4d, 0x41, 0x4c, 0x10, 0x00, 0x12, 0x08, 0x0a, 0x04, 0x50,
- 0x4c, 0x41, 0x4e, 0x10, 0x01, 0x12, 0x0b, 0x0a, 0x07, 0x50, 0x52, 0x4f, 0x46, 0x49, 0x4c, 0x45,
- 0x10, 0x02, 0x22, 0xfe, 0x04, 0x0a, 0x16, 0x45, 0x78, 0x65, 0x63, 0x75, 0x74, 0x65, 0x42, 0x61,
- 0x74, 0x63, 0x68, 0x44, 0x6d, 0x6c, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x40, 0x0a,
- 0x07, 0x73, 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x26,
- 0xe0, 0x41, 0x02, 0xfa, 0x41, 0x20, 0x0a, 0x1e, 0x73, 0x70, 0x61, 0x6e, 0x6e, 0x65, 0x72, 0x2e,
- 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x53,
- 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x07, 0x73, 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x12,
- 0x4d, 0x0a, 0x0b, 0x74, 0x72, 0x61, 0x6e, 0x73, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x02,
- 0x20, 0x01, 0x28, 0x0b, 0x32, 0x26, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x70,
- 0x61, 0x6e, 0x6e, 0x65, 0x72, 0x2e, 0x76, 0x31, 0x2e, 0x54, 0x72, 0x61, 0x6e, 0x73, 0x61, 0x63,
- 0x74, 0x69, 0x6f, 0x6e, 0x53, 0x65, 0x6c, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x42, 0x03, 0xe0, 0x41,
- 0x02, 0x52, 0x0b, 0x74, 0x72, 0x61, 0x6e, 0x73, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x58,
- 0x0a, 0x0a, 0x73, 0x74, 0x61, 0x74, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x73, 0x18, 0x03, 0x20, 0x03,
- 0x28, 0x0b, 0x32, 0x33, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x70, 0x61, 0x6e,
- 0x6e, 0x65, 0x72, 0x2e, 0x76, 0x31, 0x2e, 0x45, 0x78, 0x65, 0x63, 0x75, 0x74, 0x65, 0x42, 0x61,
- 0x74, 0x63, 0x68, 0x44, 0x6d, 0x6c, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x2e, 0x53, 0x74,
- 0x61, 0x74, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x0a, 0x73, 0x74,
- 0x61, 0x74, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x73, 0x12, 0x19, 0x0a, 0x05, 0x73, 0x65, 0x71, 0x6e,
- 0x6f, 0x18, 0x04, 0x20, 0x01, 0x28, 0x03, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x05, 0x73, 0x65,
- 0x71, 0x6e, 0x6f, 0x12, 0x4a, 0x0a, 0x0f, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x5f, 0x6f,
- 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x21, 0x2e, 0x67,
- 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x70, 0x61, 0x6e, 0x6e, 0x65, 0x72, 0x2e, 0x76, 0x31,
- 0x2e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52,
- 0x0e, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x1a,
- 0x91, 0x02, 0x0a, 0x09, 0x53, 0x74, 0x61, 0x74, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x12, 0x15, 0x0a,
- 0x03, 0x73, 0x71, 0x6c, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52,
- 0x03, 0x73, 0x71, 0x6c, 0x12, 0x2f, 0x0a, 0x06, 0x70, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x18, 0x02,
- 0x20, 0x01, 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72,
- 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x53, 0x74, 0x72, 0x75, 0x63, 0x74, 0x52, 0x06, 0x70,
- 0x61, 0x72, 0x61, 0x6d, 0x73, 0x12, 0x64, 0x0a, 0x0b, 0x70, 0x61, 0x72, 0x61, 0x6d, 0x5f, 0x74,
- 0x79, 0x70, 0x65, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x43, 0x2e, 0x67, 0x6f, 0x6f,
- 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x70, 0x61, 0x6e, 0x6e, 0x65, 0x72, 0x2e, 0x76, 0x31, 0x2e, 0x45,
- 0x78, 0x65, 0x63, 0x75, 0x74, 0x65, 0x42, 0x61, 0x74, 0x63, 0x68, 0x44, 0x6d, 0x6c, 0x52, 0x65,
- 0x71, 0x75, 0x65, 0x73, 0x74, 0x2e, 0x53, 0x74, 0x61, 0x74, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x2e,
- 0x50, 0x61, 0x72, 0x61, 0x6d, 0x54, 0x79, 0x70, 0x65, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52,
- 0x0a, 0x70, 0x61, 0x72, 0x61, 0x6d, 0x54, 0x79, 0x70, 0x65, 0x73, 0x1a, 0x56, 0x0a, 0x0f, 0x50,
- 0x61, 0x72, 0x61, 0x6d, 0x54, 0x79, 0x70, 0x65, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10,
- 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79,
- 0x12, 0x2d, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32,
- 0x17, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x70, 0x61, 0x6e, 0x6e, 0x65, 0x72,
- 0x2e, 0x76, 0x31, 0x2e, 0x54, 0x79, 0x70, 0x65, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a,
- 0x02, 0x38, 0x01, 0x22, 0x84, 0x01, 0x0a, 0x17, 0x45, 0x78, 0x65, 0x63, 0x75, 0x74, 0x65, 0x42,
- 0x61, 0x74, 0x63, 0x68, 0x44, 0x6d, 0x6c, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12,
- 0x3d, 0x0a, 0x0b, 0x72, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x5f, 0x73, 0x65, 0x74, 0x73, 0x18, 0x01,
- 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x70,
- 0x61, 0x6e, 0x6e, 0x65, 0x72, 0x2e, 0x76, 0x31, 0x2e, 0x52, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x53,
- 0x65, 0x74, 0x52, 0x0a, 0x72, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x53, 0x65, 0x74, 0x73, 0x12, 0x2a,
- 0x0a, 0x06, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x12,
- 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x72, 0x70, 0x63, 0x2e, 0x53, 0x74, 0x61, 0x74,
- 0x75, 0x73, 0x52, 0x06, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x22, 0x6b, 0x0a, 0x10, 0x50, 0x61,
- 0x72, 0x74, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x30,
- 0x0a, 0x14, 0x70, 0x61, 0x72, 0x74, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x73, 0x69, 0x7a, 0x65,
- 0x5f, 0x62, 0x79, 0x74, 0x65, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x03, 0x52, 0x12, 0x70, 0x61,
- 0x72, 0x74, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x53, 0x69, 0x7a, 0x65, 0x42, 0x79, 0x74, 0x65, 0x73,
- 0x12, 0x25, 0x0a, 0x0e, 0x6d, 0x61, 0x78, 0x5f, 0x70, 0x61, 0x72, 0x74, 0x69, 0x74, 0x69, 0x6f,
- 0x6e, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x03, 0x52, 0x0d, 0x6d, 0x61, 0x78, 0x50, 0x61, 0x72,
- 0x74, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x22, 0xf0, 0x03, 0x0a, 0x15, 0x50, 0x61, 0x72, 0x74,
- 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x51, 0x75, 0x65, 0x72, 0x79, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73,
- 0x74, 0x12, 0x40, 0x0a, 0x07, 0x73, 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01,
- 0x28, 0x09, 0x42, 0x26, 0xe0, 0x41, 0x02, 0xfa, 0x41, 0x20, 0x0a, 0x1e, 0x73, 0x70, 0x61, 0x6e,
- 0x6e, 0x65, 0x72, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63,
- 0x6f, 0x6d, 0x2f, 0x53, 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x07, 0x73, 0x65, 0x73, 0x73,
- 0x69, 0x6f, 0x6e, 0x12, 0x48, 0x0a, 0x0b, 0x74, 0x72, 0x61, 0x6e, 0x73, 0x61, 0x63, 0x74, 0x69,
- 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x26, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c,
- 0x65, 0x2e, 0x73, 0x70, 0x61, 0x6e, 0x6e, 0x65, 0x72, 0x2e, 0x76, 0x31, 0x2e, 0x54, 0x72, 0x61,
- 0x6e, 0x73, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x53, 0x65, 0x6c, 0x65, 0x63, 0x74, 0x6f, 0x72,
- 0x52, 0x0b, 0x74, 0x72, 0x61, 0x6e, 0x73, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x15, 0x0a,
- 0x03, 0x73, 0x71, 0x6c, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52,
- 0x03, 0x73, 0x71, 0x6c, 0x12, 0x2f, 0x0a, 0x06, 0x70, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x18, 0x04,
- 0x20, 0x01, 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72,
- 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x53, 0x74, 0x72, 0x75, 0x63, 0x74, 0x52, 0x06, 0x70,
- 0x61, 0x72, 0x61, 0x6d, 0x73, 0x12, 0x59, 0x0a, 0x0b, 0x70, 0x61, 0x72, 0x61, 0x6d, 0x5f, 0x74,
- 0x79, 0x70, 0x65, 0x73, 0x18, 0x05, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x38, 0x2e, 0x67, 0x6f, 0x6f,
- 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x70, 0x61, 0x6e, 0x6e, 0x65, 0x72, 0x2e, 0x76, 0x31, 0x2e, 0x50,
- 0x61, 0x72, 0x74, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x51, 0x75, 0x65, 0x72, 0x79, 0x52, 0x65, 0x71,
- 0x75, 0x65, 0x73, 0x74, 0x2e, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x54, 0x79, 0x70, 0x65, 0x73, 0x45,
- 0x6e, 0x74, 0x72, 0x79, 0x52, 0x0a, 0x70, 0x61, 0x72, 0x61, 0x6d, 0x54, 0x79, 0x70, 0x65, 0x73,
- 0x12, 0x50, 0x0a, 0x11, 0x70, 0x61, 0x72, 0x74, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6f, 0x70,
- 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x23, 0x2e, 0x67, 0x6f,
- 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x70, 0x61, 0x6e, 0x6e, 0x65, 0x72, 0x2e, 0x76, 0x31, 0x2e,
- 0x50, 0x61, 0x72, 0x74, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73,
- 0x52, 0x10, 0x70, 0x61, 0x72, 0x74, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x4f, 0x70, 0x74, 0x69, 0x6f,
- 0x6e, 0x73, 0x1a, 0x56, 0x0a, 0x0f, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x54, 0x79, 0x70, 0x65, 0x73,
- 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01,
- 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x2d, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65,
- 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e,
- 0x73, 0x70, 0x61, 0x6e, 0x6e, 0x65, 0x72, 0x2e, 0x76, 0x31, 0x2e, 0x54, 0x79, 0x70, 0x65, 0x52,
- 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x22, 0xf8, 0x02, 0x0a, 0x14, 0x50,
- 0x61, 0x72, 0x74, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x61, 0x64, 0x52, 0x65, 0x71, 0x75,
- 0x65, 0x73, 0x74, 0x12, 0x40, 0x0a, 0x07, 0x73, 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x01,
- 0x20, 0x01, 0x28, 0x09, 0x42, 0x26, 0xe0, 0x41, 0x02, 0xfa, 0x41, 0x20, 0x0a, 0x1e, 0x73, 0x70,
- 0x61, 0x6e, 0x6e, 0x65, 0x72, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73,
- 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x53, 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x07, 0x73, 0x65,
- 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x48, 0x0a, 0x0b, 0x74, 0x72, 0x61, 0x6e, 0x73, 0x61, 0x63,
- 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x26, 0x2e, 0x67, 0x6f, 0x6f,
- 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x70, 0x61, 0x6e, 0x6e, 0x65, 0x72, 0x2e, 0x76, 0x31, 0x2e, 0x54,
- 0x72, 0x61, 0x6e, 0x73, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x53, 0x65, 0x6c, 0x65, 0x63, 0x74,
- 0x6f, 0x72, 0x52, 0x0b, 0x74, 0x72, 0x61, 0x6e, 0x73, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x12,
- 0x19, 0x0a, 0x05, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x42, 0x03,
- 0xe0, 0x41, 0x02, 0x52, 0x05, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x69, 0x6e,
- 0x64, 0x65, 0x78, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x69, 0x6e, 0x64, 0x65, 0x78,
- 0x12, 0x18, 0x0a, 0x07, 0x63, 0x6f, 0x6c, 0x75, 0x6d, 0x6e, 0x73, 0x18, 0x05, 0x20, 0x03, 0x28,
- 0x09, 0x52, 0x07, 0x63, 0x6f, 0x6c, 0x75, 0x6d, 0x6e, 0x73, 0x12, 0x37, 0x0a, 0x07, 0x6b, 0x65,
- 0x79, 0x5f, 0x73, 0x65, 0x74, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f,
- 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x70, 0x61, 0x6e, 0x6e, 0x65, 0x72, 0x2e, 0x76, 0x31, 0x2e,
- 0x4b, 0x65, 0x79, 0x53, 0x65, 0x74, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x06, 0x6b, 0x65, 0x79,
- 0x53, 0x65, 0x74, 0x12, 0x50, 0x0a, 0x11, 0x70, 0x61, 0x72, 0x74, 0x69, 0x74, 0x69, 0x6f, 0x6e,
- 0x5f, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x09, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x23,
- 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x70, 0x61, 0x6e, 0x6e, 0x65, 0x72, 0x2e,
- 0x76, 0x31, 0x2e, 0x50, 0x61, 0x72, 0x74, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x4f, 0x70, 0x74, 0x69,
- 0x6f, 0x6e, 0x73, 0x52, 0x10, 0x70, 0x61, 0x72, 0x74, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x4f, 0x70,
- 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x22, 0x34, 0x0a, 0x09, 0x50, 0x61, 0x72, 0x74, 0x69, 0x74, 0x69,
- 0x6f, 0x6e, 0x12, 0x27, 0x0a, 0x0f, 0x70, 0x61, 0x72, 0x74, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x5f,
- 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x0e, 0x70, 0x61, 0x72,
- 0x74, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x22, 0x93, 0x01, 0x0a, 0x11,
- 0x50, 0x61, 0x72, 0x74, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73,
- 0x65, 0x12, 0x3c, 0x0a, 0x0a, 0x70, 0x61, 0x72, 0x74, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18,
- 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73,
- 0x70, 0x61, 0x6e, 0x6e, 0x65, 0x72, 0x2e, 0x76, 0x31, 0x2e, 0x50, 0x61, 0x72, 0x74, 0x69, 0x74,
- 0x69, 0x6f, 0x6e, 0x52, 0x0a, 0x70, 0x61, 0x72, 0x74, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12,
- 0x40, 0x0a, 0x0b, 0x74, 0x72, 0x61, 0x6e, 0x73, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x02,
- 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1e, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x70,
- 0x61, 0x6e, 0x6e, 0x65, 0x72, 0x2e, 0x76, 0x31, 0x2e, 0x54, 0x72, 0x61, 0x6e, 0x73, 0x61, 0x63,
- 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x0b, 0x74, 0x72, 0x61, 0x6e, 0x73, 0x61, 0x63, 0x74, 0x69, 0x6f,
- 0x6e, 0x22, 0x99, 0x07, 0x0a, 0x0b, 0x52, 0x65, 0x61, 0x64, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73,
- 0x74, 0x12, 0x40, 0x0a, 0x07, 0x73, 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01,
- 0x28, 0x09, 0x42, 0x26, 0xe0, 0x41, 0x02, 0xfa, 0x41, 0x20, 0x0a, 0x1e, 0x73, 0x70, 0x61, 0x6e,
- 0x6e, 0x65, 0x72, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63,
- 0x6f, 0x6d, 0x2f, 0x53, 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x07, 0x73, 0x65, 0x73, 0x73,
- 0x69, 0x6f, 0x6e, 0x12, 0x48, 0x0a, 0x0b, 0x74, 0x72, 0x61, 0x6e, 0x73, 0x61, 0x63, 0x74, 0x69,
- 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x26, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c,
- 0x65, 0x2e, 0x73, 0x70, 0x61, 0x6e, 0x6e, 0x65, 0x72, 0x2e, 0x76, 0x31, 0x2e, 0x54, 0x72, 0x61,
- 0x6e, 0x73, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x53, 0x65, 0x6c, 0x65, 0x63, 0x74, 0x6f, 0x72,
- 0x52, 0x0b, 0x74, 0x72, 0x61, 0x6e, 0x73, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x19, 0x0a,
- 0x05, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41,
- 0x02, 0x52, 0x05, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x69, 0x6e, 0x64, 0x65,
- 0x78, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x69, 0x6e, 0x64, 0x65, 0x78, 0x12, 0x1d,
- 0x0a, 0x07, 0x63, 0x6f, 0x6c, 0x75, 0x6d, 0x6e, 0x73, 0x18, 0x05, 0x20, 0x03, 0x28, 0x09, 0x42,
- 0x03, 0xe0, 0x41, 0x02, 0x52, 0x07, 0x63, 0x6f, 0x6c, 0x75, 0x6d, 0x6e, 0x73, 0x12, 0x37, 0x0a,
- 0x07, 0x6b, 0x65, 0x79, 0x5f, 0x73, 0x65, 0x74, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19,
- 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x70, 0x61, 0x6e, 0x6e, 0x65, 0x72, 0x2e,
- 0x76, 0x31, 0x2e, 0x4b, 0x65, 0x79, 0x53, 0x65, 0x74, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x06,
- 0x6b, 0x65, 0x79, 0x53, 0x65, 0x74, 0x12, 0x14, 0x0a, 0x05, 0x6c, 0x69, 0x6d, 0x69, 0x74, 0x18,
- 0x08, 0x20, 0x01, 0x28, 0x03, 0x52, 0x05, 0x6c, 0x69, 0x6d, 0x69, 0x74, 0x12, 0x21, 0x0a, 0x0c,
- 0x72, 0x65, 0x73, 0x75, 0x6d, 0x65, 0x5f, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x18, 0x09, 0x20, 0x01,
- 0x28, 0x0c, 0x52, 0x0b, 0x72, 0x65, 0x73, 0x75, 0x6d, 0x65, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x12,
- 0x27, 0x0a, 0x0f, 0x70, 0x61, 0x72, 0x74, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x74, 0x6f, 0x6b,
- 0x65, 0x6e, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x0e, 0x70, 0x61, 0x72, 0x74, 0x69, 0x74,
- 0x69, 0x6f, 0x6e, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x12, 0x4a, 0x0a, 0x0f, 0x72, 0x65, 0x71, 0x75,
- 0x65, 0x73, 0x74, 0x5f, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x0b, 0x20, 0x01, 0x28,
- 0x0b, 0x32, 0x21, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x70, 0x61, 0x6e, 0x6e,
- 0x65, 0x72, 0x2e, 0x76, 0x31, 0x2e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x4f, 0x70, 0x74,
- 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x0e, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x4f, 0x70, 0x74,
- 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x5a, 0x0a, 0x15, 0x64, 0x69, 0x72, 0x65, 0x63, 0x74, 0x65, 0x64,
- 0x5f, 0x72, 0x65, 0x61, 0x64, 0x5f, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x0e, 0x20,
- 0x01, 0x28, 0x0b, 0x32, 0x26, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x70, 0x61,
- 0x6e, 0x6e, 0x65, 0x72, 0x2e, 0x76, 0x31, 0x2e, 0x44, 0x69, 0x72, 0x65, 0x63, 0x74, 0x65, 0x64,
- 0x52, 0x65, 0x61, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x13, 0x64, 0x69, 0x72,
- 0x65, 0x63, 0x74, 0x65, 0x64, 0x52, 0x65, 0x61, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73,
- 0x12, 0x2c, 0x0a, 0x12, 0x64, 0x61, 0x74, 0x61, 0x5f, 0x62, 0x6f, 0x6f, 0x73, 0x74, 0x5f, 0x65,
- 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x64, 0x18, 0x0f, 0x20, 0x01, 0x28, 0x08, 0x52, 0x10, 0x64, 0x61,
- 0x74, 0x61, 0x42, 0x6f, 0x6f, 0x73, 0x74, 0x45, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x64, 0x12, 0x46,
- 0x0a, 0x08, 0x6f, 0x72, 0x64, 0x65, 0x72, 0x5f, 0x62, 0x79, 0x18, 0x10, 0x20, 0x01, 0x28, 0x0e,
- 0x32, 0x26, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x70, 0x61, 0x6e, 0x6e, 0x65,
- 0x72, 0x2e, 0x76, 0x31, 0x2e, 0x52, 0x65, 0x61, 0x64, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74,
- 0x2e, 0x4f, 0x72, 0x64, 0x65, 0x72, 0x42, 0x79, 0x42, 0x03, 0xe0, 0x41, 0x01, 0x52, 0x07, 0x6f,
- 0x72, 0x64, 0x65, 0x72, 0x42, 0x79, 0x12, 0x49, 0x0a, 0x09, 0x6c, 0x6f, 0x63, 0x6b, 0x5f, 0x68,
- 0x69, 0x6e, 0x74, 0x18, 0x11, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x27, 0x2e, 0x67, 0x6f, 0x6f, 0x67,
- 0x6c, 0x65, 0x2e, 0x73, 0x70, 0x61, 0x6e, 0x6e, 0x65, 0x72, 0x2e, 0x76, 0x31, 0x2e, 0x52, 0x65,
- 0x61, 0x64, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x2e, 0x4c, 0x6f, 0x63, 0x6b, 0x48, 0x69,
- 0x6e, 0x74, 0x42, 0x03, 0xe0, 0x41, 0x01, 0x52, 0x08, 0x6c, 0x6f, 0x63, 0x6b, 0x48, 0x69, 0x6e,
- 0x74, 0x22, 0x54, 0x0a, 0x07, 0x4f, 0x72, 0x64, 0x65, 0x72, 0x42, 0x79, 0x12, 0x18, 0x0a, 0x14,
- 0x4f, 0x52, 0x44, 0x45, 0x52, 0x5f, 0x42, 0x59, 0x5f, 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49,
- 0x46, 0x49, 0x45, 0x44, 0x10, 0x00, 0x12, 0x18, 0x0a, 0x14, 0x4f, 0x52, 0x44, 0x45, 0x52, 0x5f,
- 0x42, 0x59, 0x5f, 0x50, 0x52, 0x49, 0x4d, 0x41, 0x52, 0x59, 0x5f, 0x4b, 0x45, 0x59, 0x10, 0x01,
- 0x12, 0x15, 0x0a, 0x11, 0x4f, 0x52, 0x44, 0x45, 0x52, 0x5f, 0x42, 0x59, 0x5f, 0x4e, 0x4f, 0x5f,
- 0x4f, 0x52, 0x44, 0x45, 0x52, 0x10, 0x02, 0x22, 0x54, 0x0a, 0x08, 0x4c, 0x6f, 0x63, 0x6b, 0x48,
- 0x69, 0x6e, 0x74, 0x12, 0x19, 0x0a, 0x15, 0x4c, 0x4f, 0x43, 0x4b, 0x5f, 0x48, 0x49, 0x4e, 0x54,
- 0x5f, 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, 0x49, 0x45, 0x44, 0x10, 0x00, 0x12, 0x14,
- 0x0a, 0x10, 0x4c, 0x4f, 0x43, 0x4b, 0x5f, 0x48, 0x49, 0x4e, 0x54, 0x5f, 0x53, 0x48, 0x41, 0x52,
- 0x45, 0x44, 0x10, 0x01, 0x12, 0x17, 0x0a, 0x13, 0x4c, 0x4f, 0x43, 0x4b, 0x5f, 0x48, 0x49, 0x4e,
- 0x54, 0x5f, 0x45, 0x58, 0x43, 0x4c, 0x55, 0x53, 0x49, 0x56, 0x45, 0x10, 0x02, 0x22, 0xed, 0x01,
- 0x0a, 0x17, 0x42, 0x65, 0x67, 0x69, 0x6e, 0x54, 0x72, 0x61, 0x6e, 0x73, 0x61, 0x63, 0x74, 0x69,
- 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x40, 0x0a, 0x07, 0x73, 0x65, 0x73,
- 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x26, 0xe0, 0x41, 0x02, 0xfa,
- 0x41, 0x20, 0x0a, 0x1e, 0x73, 0x70, 0x61, 0x6e, 0x6e, 0x65, 0x72, 0x2e, 0x67, 0x6f, 0x6f, 0x67,
- 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x53, 0x65, 0x73, 0x73, 0x69,
- 0x6f, 0x6e, 0x52, 0x07, 0x73, 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x44, 0x0a, 0x07, 0x6f,
- 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x25, 0x2e, 0x67,
- 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x70, 0x61, 0x6e, 0x6e, 0x65, 0x72, 0x2e, 0x76, 0x31,
- 0x2e, 0x54, 0x72, 0x61, 0x6e, 0x73, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x4f, 0x70, 0x74, 0x69,
- 0x6f, 0x6e, 0x73, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e,
- 0x73, 0x12, 0x4a, 0x0a, 0x0f, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x5f, 0x6f, 0x70, 0x74,
- 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x21, 0x2e, 0x67, 0x6f, 0x6f,
- 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x70, 0x61, 0x6e, 0x6e, 0x65, 0x72, 0x2e, 0x76, 0x31, 0x2e, 0x52,
- 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x0e, 0x72,
- 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x22, 0xe9, 0x03,
- 0x0a, 0x0d, 0x43, 0x6f, 0x6d, 0x6d, 0x69, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12,
- 0x40, 0x0a, 0x07, 0x73, 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09,
- 0x42, 0x26, 0xe0, 0x41, 0x02, 0xfa, 0x41, 0x20, 0x0a, 0x1e, 0x73, 0x70, 0x61, 0x6e, 0x6e, 0x65,
- 0x72, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d,
- 0x2f, 0x53, 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x07, 0x73, 0x65, 0x73, 0x73, 0x69, 0x6f,
- 0x6e, 0x12, 0x27, 0x0a, 0x0e, 0x74, 0x72, 0x61, 0x6e, 0x73, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e,
- 0x5f, 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0c, 0x48, 0x00, 0x52, 0x0d, 0x74, 0x72, 0x61,
- 0x6e, 0x73, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x49, 0x64, 0x12, 0x5d, 0x0a, 0x16, 0x73, 0x69,
- 0x6e, 0x67, 0x6c, 0x65, 0x5f, 0x75, 0x73, 0x65, 0x5f, 0x74, 0x72, 0x61, 0x6e, 0x73, 0x61, 0x63,
- 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x25, 0x2e, 0x67, 0x6f, 0x6f,
- 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x70, 0x61, 0x6e, 0x6e, 0x65, 0x72, 0x2e, 0x76, 0x31, 0x2e, 0x54,
- 0x72, 0x61, 0x6e, 0x73, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e,
- 0x73, 0x48, 0x00, 0x52, 0x14, 0x73, 0x69, 0x6e, 0x67, 0x6c, 0x65, 0x55, 0x73, 0x65, 0x54, 0x72,
- 0x61, 0x6e, 0x73, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x39, 0x0a, 0x09, 0x6d, 0x75, 0x74,
- 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x04, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x67,
- 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x70, 0x61, 0x6e, 0x6e, 0x65, 0x72, 0x2e, 0x76, 0x31,
- 0x2e, 0x4d, 0x75, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x09, 0x6d, 0x75, 0x74, 0x61, 0x74,
- 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x2e, 0x0a, 0x13, 0x72, 0x65, 0x74, 0x75, 0x72, 0x6e, 0x5f, 0x63,
- 0x6f, 0x6d, 0x6d, 0x69, 0x74, 0x5f, 0x73, 0x74, 0x61, 0x74, 0x73, 0x18, 0x05, 0x20, 0x01, 0x28,
- 0x08, 0x52, 0x11, 0x72, 0x65, 0x74, 0x75, 0x72, 0x6e, 0x43, 0x6f, 0x6d, 0x6d, 0x69, 0x74, 0x53,
- 0x74, 0x61, 0x74, 0x73, 0x12, 0x48, 0x0a, 0x10, 0x6d, 0x61, 0x78, 0x5f, 0x63, 0x6f, 0x6d, 0x6d,
- 0x69, 0x74, 0x5f, 0x64, 0x65, 0x6c, 0x61, 0x79, 0x18, 0x08, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19,
- 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66,
- 0x2e, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x42, 0x03, 0xe0, 0x41, 0x01, 0x52, 0x0e,
- 0x6d, 0x61, 0x78, 0x43, 0x6f, 0x6d, 0x6d, 0x69, 0x74, 0x44, 0x65, 0x6c, 0x61, 0x79, 0x12, 0x4a,
- 0x0a, 0x0f, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x5f, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e,
- 0x73, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x21, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65,
- 0x2e, 0x73, 0x70, 0x61, 0x6e, 0x6e, 0x65, 0x72, 0x2e, 0x76, 0x31, 0x2e, 0x52, 0x65, 0x71, 0x75,
- 0x65, 0x73, 0x74, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x0e, 0x72, 0x65, 0x71, 0x75,
- 0x65, 0x73, 0x74, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x42, 0x0d, 0x0a, 0x0b, 0x74, 0x72,
- 0x61, 0x6e, 0x73, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x22, 0x7f, 0x0a, 0x0f, 0x52, 0x6f, 0x6c,
- 0x6c, 0x62, 0x61, 0x63, 0x6b, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x40, 0x0a, 0x07,
- 0x73, 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x26, 0xe0,
- 0x41, 0x02, 0xfa, 0x41, 0x20, 0x0a, 0x1e, 0x73, 0x70, 0x61, 0x6e, 0x6e, 0x65, 0x72, 0x2e, 0x67,
- 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x53, 0x65,
- 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x07, 0x73, 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x2a,
- 0x0a, 0x0e, 0x74, 0x72, 0x61, 0x6e, 0x73, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x69, 0x64,
- 0x18, 0x02, 0x20, 0x01, 0x28, 0x0c, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x0d, 0x74, 0x72, 0x61,
- 0x6e, 0x73, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x49, 0x64, 0x22, 0x9f, 0x03, 0x0a, 0x11, 0x42,
- 0x61, 0x74, 0x63, 0x68, 0x57, 0x72, 0x69, 0x74, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74,
- 0x12, 0x40, 0x0a, 0x07, 0x73, 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28,
- 0x09, 0x42, 0x26, 0xe0, 0x41, 0x02, 0xfa, 0x41, 0x20, 0x0a, 0x1e, 0x73, 0x70, 0x61, 0x6e, 0x6e,
- 0x65, 0x72, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f,
- 0x6d, 0x2f, 0x53, 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x07, 0x73, 0x65, 0x73, 0x73, 0x69,
- 0x6f, 0x6e, 0x12, 0x4a, 0x0a, 0x0f, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x5f, 0x6f, 0x70,
- 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x21, 0x2e, 0x67, 0x6f,
- 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x70, 0x61, 0x6e, 0x6e, 0x65, 0x72, 0x2e, 0x76, 0x31, 0x2e,
- 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x0e,
- 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x60,
- 0x0a, 0x0f, 0x6d, 0x75, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x67, 0x72, 0x6f, 0x75, 0x70,
- 0x73, 0x18, 0x04, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x32, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65,
- 0x2e, 0x73, 0x70, 0x61, 0x6e, 0x6e, 0x65, 0x72, 0x2e, 0x76, 0x31, 0x2e, 0x42, 0x61, 0x74, 0x63,
- 0x68, 0x57, 0x72, 0x69, 0x74, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x2e, 0x4d, 0x75,
- 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x47, 0x72, 0x6f, 0x75, 0x70, 0x42, 0x03, 0xe0, 0x41, 0x02,
- 0x52, 0x0e, 0x6d, 0x75, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x47, 0x72, 0x6f, 0x75, 0x70, 0x73,
- 0x12, 0x49, 0x0a, 0x1f, 0x65, 0x78, 0x63, 0x6c, 0x75, 0x64, 0x65, 0x5f, 0x74, 0x78, 0x6e, 0x5f,
- 0x66, 0x72, 0x6f, 0x6d, 0x5f, 0x63, 0x68, 0x61, 0x6e, 0x67, 0x65, 0x5f, 0x73, 0x74, 0x72, 0x65,
- 0x61, 0x6d, 0x73, 0x18, 0x05, 0x20, 0x01, 0x28, 0x08, 0x42, 0x03, 0xe0, 0x41, 0x01, 0x52, 0x1b,
- 0x65, 0x78, 0x63, 0x6c, 0x75, 0x64, 0x65, 0x54, 0x78, 0x6e, 0x46, 0x72, 0x6f, 0x6d, 0x43, 0x68,
- 0x61, 0x6e, 0x67, 0x65, 0x53, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x73, 0x1a, 0x4f, 0x0a, 0x0d, 0x4d,
- 0x75, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x47, 0x72, 0x6f, 0x75, 0x70, 0x12, 0x3e, 0x0a, 0x09,
- 0x6d, 0x75, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32,
- 0x1b, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x70, 0x61, 0x6e, 0x6e, 0x65, 0x72,
- 0x2e, 0x76, 0x31, 0x2e, 0x4d, 0x75, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x42, 0x03, 0xe0, 0x41,
- 0x02, 0x52, 0x09, 0x6d, 0x75, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x22, 0xa1, 0x01, 0x0a,
- 0x12, 0x42, 0x61, 0x74, 0x63, 0x68, 0x57, 0x72, 0x69, 0x74, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f,
- 0x6e, 0x73, 0x65, 0x12, 0x18, 0x0a, 0x07, 0x69, 0x6e, 0x64, 0x65, 0x78, 0x65, 0x73, 0x18, 0x01,
- 0x20, 0x03, 0x28, 0x05, 0x52, 0x07, 0x69, 0x6e, 0x64, 0x65, 0x78, 0x65, 0x73, 0x12, 0x2a, 0x0a,
- 0x06, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x12, 0x2e,
- 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x72, 0x70, 0x63, 0x2e, 0x53, 0x74, 0x61, 0x74, 0x75,
- 0x73, 0x52, 0x06, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, 0x45, 0x0a, 0x10, 0x63, 0x6f, 0x6d,
- 0x6d, 0x69, 0x74, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x18, 0x03, 0x20,
- 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f,
- 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52,
- 0x0f, 0x63, 0x6f, 0x6d, 0x6d, 0x69, 0x74, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70,
- 0x32, 0x8b, 0x18, 0x0a, 0x07, 0x53, 0x70, 0x61, 0x6e, 0x6e, 0x65, 0x72, 0x12, 0xa6, 0x01, 0x0a,
- 0x0d, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x53, 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x27,
- 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x70, 0x61, 0x6e, 0x6e, 0x65, 0x72, 0x2e,
- 0x76, 0x31, 0x2e, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x53, 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e,
- 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65,
- 0x2e, 0x73, 0x70, 0x61, 0x6e, 0x6e, 0x65, 0x72, 0x2e, 0x76, 0x31, 0x2e, 0x53, 0x65, 0x73, 0x73,
- 0x69, 0x6f, 0x6e, 0x22, 0x50, 0xda, 0x41, 0x08, 0x64, 0x61, 0x74, 0x61, 0x62, 0x61, 0x73, 0x65,
- 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x3f, 0x3a, 0x01, 0x2a, 0x22, 0x3a, 0x2f, 0x76, 0x31, 0x2f, 0x7b,
- 0x64, 0x61, 0x74, 0x61, 0x62, 0x61, 0x73, 0x65, 0x3d, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74,
- 0x73, 0x2f, 0x2a, 0x2f, 0x69, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x73, 0x2f, 0x2a, 0x2f,
- 0x64, 0x61, 0x74, 0x61, 0x62, 0x61, 0x73, 0x65, 0x73, 0x2f, 0x2a, 0x7d, 0x2f, 0x73, 0x65, 0x73,
- 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0xe0, 0x01, 0x0a, 0x13, 0x42, 0x61, 0x74, 0x63, 0x68, 0x43,
- 0x72, 0x65, 0x61, 0x74, 0x65, 0x53, 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x2d, 0x2e,
- 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x70, 0x61, 0x6e, 0x6e, 0x65, 0x72, 0x2e, 0x76,
- 0x31, 0x2e, 0x42, 0x61, 0x74, 0x63, 0x68, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x53, 0x65, 0x73,
- 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x2e, 0x2e, 0x67,
- 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x70, 0x61, 0x6e, 0x6e, 0x65, 0x72, 0x2e, 0x76, 0x31,
- 0x2e, 0x42, 0x61, 0x74, 0x63, 0x68, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x53, 0x65, 0x73, 0x73,
- 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x6a, 0xda, 0x41,
- 0x16, 0x64, 0x61, 0x74, 0x61, 0x62, 0x61, 0x73, 0x65, 0x2c, 0x73, 0x65, 0x73, 0x73, 0x69, 0x6f,
- 0x6e, 0x5f, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x4b, 0x3a, 0x01, 0x2a,
- 0x22, 0x46, 0x2f, 0x76, 0x31, 0x2f, 0x7b, 0x64, 0x61, 0x74, 0x61, 0x62, 0x61, 0x73, 0x65, 0x3d,
- 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x2a, 0x2f, 0x69, 0x6e, 0x73, 0x74, 0x61,
- 0x6e, 0x63, 0x65, 0x73, 0x2f, 0x2a, 0x2f, 0x64, 0x61, 0x74, 0x61, 0x62, 0x61, 0x73, 0x65, 0x73,
- 0x2f, 0x2a, 0x7d, 0x2f, 0x73, 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x3a, 0x62, 0x61, 0x74,
- 0x63, 0x68, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x12, 0x97, 0x01, 0x0a, 0x0a, 0x47, 0x65, 0x74,
- 0x53, 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65,
- 0x2e, 0x73, 0x70, 0x61, 0x6e, 0x6e, 0x65, 0x72, 0x2e, 0x76, 0x31, 0x2e, 0x47, 0x65, 0x74, 0x53,
- 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1a, 0x2e,
- 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x70, 0x61, 0x6e, 0x6e, 0x65, 0x72, 0x2e, 0x76,
- 0x31, 0x2e, 0x53, 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x22, 0x47, 0xda, 0x41, 0x04, 0x6e, 0x61,
- 0x6d, 0x65, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x3a, 0x12, 0x38, 0x2f, 0x76, 0x31, 0x2f, 0x7b, 0x6e,
- 0x61, 0x6d, 0x65, 0x3d, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x2a, 0x2f, 0x69,
- 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x73, 0x2f, 0x2a, 0x2f, 0x64, 0x61, 0x74, 0x61, 0x62,
- 0x61, 0x73, 0x65, 0x73, 0x2f, 0x2a, 0x2f, 0x73, 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x2f,
- 0x2a, 0x7d, 0x12, 0xae, 0x01, 0x0a, 0x0c, 0x4c, 0x69, 0x73, 0x74, 0x53, 0x65, 0x73, 0x73, 0x69,
- 0x6f, 0x6e, 0x73, 0x12, 0x26, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x70, 0x61,
- 0x6e, 0x6e, 0x65, 0x72, 0x2e, 0x76, 0x31, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x53, 0x65, 0x73, 0x73,
- 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x27, 0x2e, 0x67, 0x6f,
- 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x70, 0x61, 0x6e, 0x6e, 0x65, 0x72, 0x2e, 0x76, 0x31, 0x2e,
- 0x4c, 0x69, 0x73, 0x74, 0x53, 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x65, 0x73, 0x70,
- 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x4d, 0xda, 0x41, 0x08, 0x64, 0x61, 0x74, 0x61, 0x62, 0x61, 0x73,
- 0x65, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x3c, 0x12, 0x3a, 0x2f, 0x76, 0x31, 0x2f, 0x7b, 0x64, 0x61,
- 0x74, 0x61, 0x62, 0x61, 0x73, 0x65, 0x3d, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f,
- 0x2a, 0x2f, 0x69, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x73, 0x2f, 0x2a, 0x2f, 0x64, 0x61,
- 0x74, 0x61, 0x62, 0x61, 0x73, 0x65, 0x73, 0x2f, 0x2a, 0x7d, 0x2f, 0x73, 0x65, 0x73, 0x73, 0x69,
- 0x6f, 0x6e, 0x73, 0x12, 0x99, 0x01, 0x0a, 0x0d, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x53, 0x65,
- 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x27, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73,
- 0x70, 0x61, 0x6e, 0x6e, 0x65, 0x72, 0x2e, 0x76, 0x31, 0x2e, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65,
- 0x53, 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x16,
- 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66,
- 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x22, 0x47, 0xda, 0x41, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x82,
- 0xd3, 0xe4, 0x93, 0x02, 0x3a, 0x2a, 0x38, 0x2f, 0x76, 0x31, 0x2f, 0x7b, 0x6e, 0x61, 0x6d, 0x65,
- 0x3d, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x2a, 0x2f, 0x69, 0x6e, 0x73, 0x74,
- 0x61, 0x6e, 0x63, 0x65, 0x73, 0x2f, 0x2a, 0x2f, 0x64, 0x61, 0x74, 0x61, 0x62, 0x61, 0x73, 0x65,
- 0x73, 0x2f, 0x2a, 0x2f, 0x73, 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x2a, 0x7d, 0x12,
- 0xa3, 0x01, 0x0a, 0x0a, 0x45, 0x78, 0x65, 0x63, 0x75, 0x74, 0x65, 0x53, 0x71, 0x6c, 0x12, 0x24,
- 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x70, 0x61, 0x6e, 0x6e, 0x65, 0x72, 0x2e,
- 0x76, 0x31, 0x2e, 0x45, 0x78, 0x65, 0x63, 0x75, 0x74, 0x65, 0x53, 0x71, 0x6c, 0x52, 0x65, 0x71,
- 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x70,
- 0x61, 0x6e, 0x6e, 0x65, 0x72, 0x2e, 0x76, 0x31, 0x2e, 0x52, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x53,
- 0x65, 0x74, 0x22, 0x51, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x4b, 0x3a, 0x01, 0x2a, 0x22, 0x46, 0x2f,
- 0x76, 0x31, 0x2f, 0x7b, 0x73, 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x3d, 0x70, 0x72, 0x6f, 0x6a,
- 0x65, 0x63, 0x74, 0x73, 0x2f, 0x2a, 0x2f, 0x69, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x73,
- 0x2f, 0x2a, 0x2f, 0x64, 0x61, 0x74, 0x61, 0x62, 0x61, 0x73, 0x65, 0x73, 0x2f, 0x2a, 0x2f, 0x73,
- 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x2a, 0x7d, 0x3a, 0x65, 0x78, 0x65, 0x63, 0x75,
- 0x74, 0x65, 0x53, 0x71, 0x6c, 0x12, 0xbe, 0x01, 0x0a, 0x13, 0x45, 0x78, 0x65, 0x63, 0x75, 0x74,
- 0x65, 0x53, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x69, 0x6e, 0x67, 0x53, 0x71, 0x6c, 0x12, 0x24, 0x2e,
- 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x70, 0x61, 0x6e, 0x6e, 0x65, 0x72, 0x2e, 0x76,
- 0x31, 0x2e, 0x45, 0x78, 0x65, 0x63, 0x75, 0x74, 0x65, 0x53, 0x71, 0x6c, 0x52, 0x65, 0x71, 0x75,
- 0x65, 0x73, 0x74, 0x1a, 0x23, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x70, 0x61,
- 0x6e, 0x6e, 0x65, 0x72, 0x2e, 0x76, 0x31, 0x2e, 0x50, 0x61, 0x72, 0x74, 0x69, 0x61, 0x6c, 0x52,
- 0x65, 0x73, 0x75, 0x6c, 0x74, 0x53, 0x65, 0x74, 0x22, 0x5a, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x54,
- 0x3a, 0x01, 0x2a, 0x22, 0x4f, 0x2f, 0x76, 0x31, 0x2f, 0x7b, 0x73, 0x65, 0x73, 0x73, 0x69, 0x6f,
- 0x6e, 0x3d, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x2a, 0x2f, 0x69, 0x6e, 0x73,
- 0x74, 0x61, 0x6e, 0x63, 0x65, 0x73, 0x2f, 0x2a, 0x2f, 0x64, 0x61, 0x74, 0x61, 0x62, 0x61, 0x73,
- 0x65, 0x73, 0x2f, 0x2a, 0x2f, 0x73, 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x2a, 0x7d,
- 0x3a, 0x65, 0x78, 0x65, 0x63, 0x75, 0x74, 0x65, 0x53, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x69, 0x6e,
- 0x67, 0x53, 0x71, 0x6c, 0x30, 0x01, 0x12, 0xc0, 0x01, 0x0a, 0x0f, 0x45, 0x78, 0x65, 0x63, 0x75,
- 0x74, 0x65, 0x42, 0x61, 0x74, 0x63, 0x68, 0x44, 0x6d, 0x6c, 0x12, 0x29, 0x2e, 0x67, 0x6f, 0x6f,
- 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x70, 0x61, 0x6e, 0x6e, 0x65, 0x72, 0x2e, 0x76, 0x31, 0x2e, 0x45,
- 0x78, 0x65, 0x63, 0x75, 0x74, 0x65, 0x42, 0x61, 0x74, 0x63, 0x68, 0x44, 0x6d, 0x6c, 0x52, 0x65,
- 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x2a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73,
- 0x70, 0x61, 0x6e, 0x6e, 0x65, 0x72, 0x2e, 0x76, 0x31, 0x2e, 0x45, 0x78, 0x65, 0x63, 0x75, 0x74,
- 0x65, 0x42, 0x61, 0x74, 0x63, 0x68, 0x44, 0x6d, 0x6c, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73,
- 0x65, 0x22, 0x56, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x50, 0x3a, 0x01, 0x2a, 0x22, 0x4b, 0x2f, 0x76,
- 0x31, 0x2f, 0x7b, 0x73, 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x3d, 0x70, 0x72, 0x6f, 0x6a, 0x65,
- 0x63, 0x74, 0x73, 0x2f, 0x2a, 0x2f, 0x69, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x73, 0x2f,
- 0x2a, 0x2f, 0x64, 0x61, 0x74, 0x61, 0x62, 0x61, 0x73, 0x65, 0x73, 0x2f, 0x2a, 0x2f, 0x73, 0x65,
- 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x2a, 0x7d, 0x3a, 0x65, 0x78, 0x65, 0x63, 0x75, 0x74,
- 0x65, 0x42, 0x61, 0x74, 0x63, 0x68, 0x44, 0x6d, 0x6c, 0x12, 0x91, 0x01, 0x0a, 0x04, 0x52, 0x65,
- 0x61, 0x64, 0x12, 0x1e, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x70, 0x61, 0x6e,
- 0x6e, 0x65, 0x72, 0x2e, 0x76, 0x31, 0x2e, 0x52, 0x65, 0x61, 0x64, 0x52, 0x65, 0x71, 0x75, 0x65,
- 0x73, 0x74, 0x1a, 0x1c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x70, 0x61, 0x6e,
- 0x6e, 0x65, 0x72, 0x2e, 0x76, 0x31, 0x2e, 0x52, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x53, 0x65, 0x74,
- 0x22, 0x4b, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x45, 0x3a, 0x01, 0x2a, 0x22, 0x40, 0x2f, 0x76, 0x31,
- 0x2f, 0x7b, 0x73, 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x3d, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63,
- 0x74, 0x73, 0x2f, 0x2a, 0x2f, 0x69, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x73, 0x2f, 0x2a,
- 0x2f, 0x64, 0x61, 0x74, 0x61, 0x62, 0x61, 0x73, 0x65, 0x73, 0x2f, 0x2a, 0x2f, 0x73, 0x65, 0x73,
- 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x2a, 0x7d, 0x3a, 0x72, 0x65, 0x61, 0x64, 0x12, 0xac, 0x01,
- 0x0a, 0x0d, 0x53, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x69, 0x6e, 0x67, 0x52, 0x65, 0x61, 0x64, 0x12,
- 0x1e, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x70, 0x61, 0x6e, 0x6e, 0x65, 0x72,
- 0x2e, 0x76, 0x31, 0x2e, 0x52, 0x65, 0x61, 0x64, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a,
- 0x23, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x70, 0x61, 0x6e, 0x6e, 0x65, 0x72,
- 0x2e, 0x76, 0x31, 0x2e, 0x50, 0x61, 0x72, 0x74, 0x69, 0x61, 0x6c, 0x52, 0x65, 0x73, 0x75, 0x6c,
- 0x74, 0x53, 0x65, 0x74, 0x22, 0x54, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x4e, 0x3a, 0x01, 0x2a, 0x22,
- 0x49, 0x2f, 0x76, 0x31, 0x2f, 0x7b, 0x73, 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x3d, 0x70, 0x72,
- 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x2a, 0x2f, 0x69, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63,
- 0x65, 0x73, 0x2f, 0x2a, 0x2f, 0x64, 0x61, 0x74, 0x61, 0x62, 0x61, 0x73, 0x65, 0x73, 0x2f, 0x2a,
- 0x2f, 0x73, 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x2a, 0x7d, 0x3a, 0x73, 0x74, 0x72,
- 0x65, 0x61, 0x6d, 0x69, 0x6e, 0x67, 0x52, 0x65, 0x61, 0x64, 0x30, 0x01, 0x12, 0xc9, 0x01, 0x0a,
- 0x10, 0x42, 0x65, 0x67, 0x69, 0x6e, 0x54, 0x72, 0x61, 0x6e, 0x73, 0x61, 0x63, 0x74, 0x69, 0x6f,
- 0x6e, 0x12, 0x2a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x70, 0x61, 0x6e, 0x6e,
- 0x65, 0x72, 0x2e, 0x76, 0x31, 0x2e, 0x42, 0x65, 0x67, 0x69, 0x6e, 0x54, 0x72, 0x61, 0x6e, 0x73,
- 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1e, 0x2e,
- 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x70, 0x61, 0x6e, 0x6e, 0x65, 0x72, 0x2e, 0x76,
- 0x31, 0x2e, 0x54, 0x72, 0x61, 0x6e, 0x73, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x22, 0x69, 0xda,
- 0x41, 0x0f, 0x73, 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x2c, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e,
- 0x73, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x51, 0x3a, 0x01, 0x2a, 0x22, 0x4c, 0x2f, 0x76, 0x31, 0x2f,
- 0x7b, 0x73, 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x3d, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74,
- 0x73, 0x2f, 0x2a, 0x2f, 0x69, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x73, 0x2f, 0x2a, 0x2f,
- 0x64, 0x61, 0x74, 0x61, 0x62, 0x61, 0x73, 0x65, 0x73, 0x2f, 0x2a, 0x2f, 0x73, 0x65, 0x73, 0x73,
- 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x2a, 0x7d, 0x3a, 0x62, 0x65, 0x67, 0x69, 0x6e, 0x54, 0x72, 0x61,
- 0x6e, 0x73, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0xeb, 0x01, 0x0a, 0x06, 0x43, 0x6f, 0x6d,
- 0x6d, 0x69, 0x74, 0x12, 0x20, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x70, 0x61,
- 0x6e, 0x6e, 0x65, 0x72, 0x2e, 0x76, 0x31, 0x2e, 0x43, 0x6f, 0x6d, 0x6d, 0x69, 0x74, 0x52, 0x65,
- 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x21, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73,
- 0x70, 0x61, 0x6e, 0x6e, 0x65, 0x72, 0x2e, 0x76, 0x31, 0x2e, 0x43, 0x6f, 0x6d, 0x6d, 0x69, 0x74,
- 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x9b, 0x01, 0xda, 0x41, 0x20, 0x73, 0x65,
- 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x2c, 0x74, 0x72, 0x61, 0x6e, 0x73, 0x61, 0x63, 0x74, 0x69, 0x6f,
- 0x6e, 0x5f, 0x69, 0x64, 0x2c, 0x6d, 0x75, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0xda, 0x41,
- 0x28, 0x73, 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x2c, 0x73, 0x69, 0x6e, 0x67, 0x6c, 0x65, 0x5f,
- 0x75, 0x73, 0x65, 0x5f, 0x74, 0x72, 0x61, 0x6e, 0x73, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x2c,
- 0x6d, 0x75, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x47, 0x3a,
- 0x01, 0x2a, 0x22, 0x42, 0x2f, 0x76, 0x31, 0x2f, 0x7b, 0x73, 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e,
- 0x3d, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x2a, 0x2f, 0x69, 0x6e, 0x73, 0x74,
- 0x61, 0x6e, 0x63, 0x65, 0x73, 0x2f, 0x2a, 0x2f, 0x64, 0x61, 0x74, 0x61, 0x62, 0x61, 0x73, 0x65,
- 0x73, 0x2f, 0x2a, 0x2f, 0x73, 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x2a, 0x7d, 0x3a,
- 0x63, 0x6f, 0x6d, 0x6d, 0x69, 0x74, 0x12, 0xb0, 0x01, 0x0a, 0x08, 0x52, 0x6f, 0x6c, 0x6c, 0x62,
- 0x61, 0x63, 0x6b, 0x12, 0x22, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x70, 0x61,
- 0x6e, 0x6e, 0x65, 0x72, 0x2e, 0x76, 0x31, 0x2e, 0x52, 0x6f, 0x6c, 0x6c, 0x62, 0x61, 0x63, 0x6b,
- 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x16, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65,
- 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x22,
- 0x68, 0xda, 0x41, 0x16, 0x73, 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x2c, 0x74, 0x72, 0x61, 0x6e,
- 0x73, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x69, 0x64, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x49,
- 0x3a, 0x01, 0x2a, 0x22, 0x44, 0x2f, 0x76, 0x31, 0x2f, 0x7b, 0x73, 0x65, 0x73, 0x73, 0x69, 0x6f,
- 0x6e, 0x3d, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x2a, 0x2f, 0x69, 0x6e, 0x73,
- 0x74, 0x61, 0x6e, 0x63, 0x65, 0x73, 0x2f, 0x2a, 0x2f, 0x64, 0x61, 0x74, 0x61, 0x62, 0x61, 0x73,
- 0x65, 0x73, 0x2f, 0x2a, 0x2f, 0x73, 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x2a, 0x7d,
- 0x3a, 0x72, 0x6f, 0x6c, 0x6c, 0x62, 0x61, 0x63, 0x6b, 0x12, 0xb7, 0x01, 0x0a, 0x0e, 0x50, 0x61,
- 0x72, 0x74, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x51, 0x75, 0x65, 0x72, 0x79, 0x12, 0x28, 0x2e, 0x67,
- 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x70, 0x61, 0x6e, 0x6e, 0x65, 0x72, 0x2e, 0x76, 0x31,
- 0x2e, 0x50, 0x61, 0x72, 0x74, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x51, 0x75, 0x65, 0x72, 0x79, 0x52,
- 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e,
- 0x73, 0x70, 0x61, 0x6e, 0x6e, 0x65, 0x72, 0x2e, 0x76, 0x31, 0x2e, 0x50, 0x61, 0x72, 0x74, 0x69,
- 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x55, 0x82, 0xd3,
- 0xe4, 0x93, 0x02, 0x4f, 0x3a, 0x01, 0x2a, 0x22, 0x4a, 0x2f, 0x76, 0x31, 0x2f, 0x7b, 0x73, 0x65,
- 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x3d, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x2a,
- 0x2f, 0x69, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x73, 0x2f, 0x2a, 0x2f, 0x64, 0x61, 0x74,
- 0x61, 0x62, 0x61, 0x73, 0x65, 0x73, 0x2f, 0x2a, 0x2f, 0x73, 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e,
- 0x73, 0x2f, 0x2a, 0x7d, 0x3a, 0x70, 0x61, 0x72, 0x74, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x51, 0x75,
- 0x65, 0x72, 0x79, 0x12, 0xb4, 0x01, 0x0a, 0x0d, 0x50, 0x61, 0x72, 0x74, 0x69, 0x74, 0x69, 0x6f,
- 0x6e, 0x52, 0x65, 0x61, 0x64, 0x12, 0x27, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73,
- 0x70, 0x61, 0x6e, 0x6e, 0x65, 0x72, 0x2e, 0x76, 0x31, 0x2e, 0x50, 0x61, 0x72, 0x74, 0x69, 0x74,
- 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x61, 0x64, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x24,
- 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x70, 0x61, 0x6e, 0x6e, 0x65, 0x72, 0x2e,
- 0x76, 0x31, 0x2e, 0x50, 0x61, 0x72, 0x74, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x73, 0x70,
- 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x54, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x4e, 0x3a, 0x01, 0x2a, 0x22,
- 0x49, 0x2f, 0x76, 0x31, 0x2f, 0x7b, 0x73, 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x3d, 0x70, 0x72,
- 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x2a, 0x2f, 0x69, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63,
- 0x65, 0x73, 0x2f, 0x2a, 0x2f, 0x64, 0x61, 0x74, 0x61, 0x62, 0x61, 0x73, 0x65, 0x73, 0x2f, 0x2a,
- 0x2f, 0x73, 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x2a, 0x7d, 0x3a, 0x70, 0x61, 0x72,
- 0x74, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x61, 0x64, 0x12, 0xc8, 0x01, 0x0a, 0x0a, 0x42,
- 0x61, 0x74, 0x63, 0x68, 0x57, 0x72, 0x69, 0x74, 0x65, 0x12, 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67,
- 0x6c, 0x65, 0x2e, 0x73, 0x70, 0x61, 0x6e, 0x6e, 0x65, 0x72, 0x2e, 0x76, 0x31, 0x2e, 0x42, 0x61,
- 0x74, 0x63, 0x68, 0x57, 0x72, 0x69, 0x74, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a,
- 0x25, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x70, 0x61, 0x6e, 0x6e, 0x65, 0x72,
- 0x2e, 0x76, 0x31, 0x2e, 0x42, 0x61, 0x74, 0x63, 0x68, 0x57, 0x72, 0x69, 0x74, 0x65, 0x52, 0x65,
- 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x6b, 0xda, 0x41, 0x17, 0x73, 0x65, 0x73, 0x73, 0x69,
- 0x6f, 0x6e, 0x2c, 0x6d, 0x75, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x67, 0x72, 0x6f, 0x75,
- 0x70, 0x73, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x4b, 0x3a, 0x01, 0x2a, 0x22, 0x46, 0x2f, 0x76, 0x31,
- 0x2f, 0x7b, 0x73, 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x3d, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63,
- 0x74, 0x73, 0x2f, 0x2a, 0x2f, 0x69, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x73, 0x2f, 0x2a,
- 0x2f, 0x64, 0x61, 0x74, 0x61, 0x62, 0x61, 0x73, 0x65, 0x73, 0x2f, 0x2a, 0x2f, 0x73, 0x65, 0x73,
- 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x2a, 0x7d, 0x3a, 0x62, 0x61, 0x74, 0x63, 0x68, 0x57, 0x72,
- 0x69, 0x74, 0x65, 0x30, 0x01, 0x1a, 0x77, 0xca, 0x41, 0x16, 0x73, 0x70, 0x61, 0x6e, 0x6e, 0x65,
- 0x72, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d,
- 0xd2, 0x41, 0x5b, 0x68, 0x74, 0x74, 0x70, 0x73, 0x3a, 0x2f, 0x2f, 0x77, 0x77, 0x77, 0x2e, 0x67,
- 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x61, 0x75,
- 0x74, 0x68, 0x2f, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2d, 0x70, 0x6c, 0x61, 0x74, 0x66, 0x6f, 0x72,
- 0x6d, 0x2c, 0x68, 0x74, 0x74, 0x70, 0x73, 0x3a, 0x2f, 0x2f, 0x77, 0x77, 0x77, 0x2e, 0x67, 0x6f,
- 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x61, 0x75, 0x74,
- 0x68, 0x2f, 0x73, 0x70, 0x61, 0x6e, 0x6e, 0x65, 0x72, 0x2e, 0x64, 0x61, 0x74, 0x61, 0x42, 0x91,
- 0x02, 0xea, 0x41, 0x5f, 0x0a, 0x1f, 0x73, 0x70, 0x61, 0x6e, 0x6e, 0x65, 0x72, 0x2e, 0x67, 0x6f,
- 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x44, 0x61, 0x74,
- 0x61, 0x62, 0x61, 0x73, 0x65, 0x12, 0x3c, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f,
- 0x7b, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x7d, 0x2f, 0x69, 0x6e, 0x73, 0x74, 0x61, 0x6e,
- 0x63, 0x65, 0x73, 0x2f, 0x7b, 0x69, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x7d, 0x2f, 0x64,
- 0x61, 0x74, 0x61, 0x62, 0x61, 0x73, 0x65, 0x73, 0x2f, 0x7b, 0x64, 0x61, 0x74, 0x61, 0x62, 0x61,
- 0x73, 0x65, 0x7d, 0x0a, 0x15, 0x63, 0x6f, 0x6d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e,
- 0x73, 0x70, 0x61, 0x6e, 0x6e, 0x65, 0x72, 0x2e, 0x76, 0x31, 0x42, 0x0c, 0x53, 0x70, 0x61, 0x6e,
- 0x6e, 0x65, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x35, 0x63, 0x6c, 0x6f, 0x75,
- 0x64, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x67, 0x6f, 0x2f,
- 0x73, 0x70, 0x61, 0x6e, 0x6e, 0x65, 0x72, 0x2f, 0x61, 0x70, 0x69, 0x76, 0x31, 0x2f, 0x73, 0x70,
- 0x61, 0x6e, 0x6e, 0x65, 0x72, 0x70, 0x62, 0x3b, 0x73, 0x70, 0x61, 0x6e, 0x6e, 0x65, 0x72, 0x70,
- 0x62, 0xaa, 0x02, 0x17, 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x43, 0x6c, 0x6f, 0x75, 0x64,
- 0x2e, 0x53, 0x70, 0x61, 0x6e, 0x6e, 0x65, 0x72, 0x2e, 0x56, 0x31, 0xca, 0x02, 0x17, 0x47, 0x6f,
- 0x6f, 0x67, 0x6c, 0x65, 0x5c, 0x43, 0x6c, 0x6f, 0x75, 0x64, 0x5c, 0x53, 0x70, 0x61, 0x6e, 0x6e,
- 0x65, 0x72, 0x5c, 0x56, 0x31, 0xea, 0x02, 0x1a, 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x3a, 0x3a,
- 0x43, 0x6c, 0x6f, 0x75, 0x64, 0x3a, 0x3a, 0x53, 0x70, 0x61, 0x6e, 0x6e, 0x65, 0x72, 0x3a, 0x3a,
- 0x56, 0x31, 0x50, 0x00, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33,
-}
-
-var (
- file_google_spanner_v1_spanner_proto_rawDescOnce sync.Once
- file_google_spanner_v1_spanner_proto_rawDescData = file_google_spanner_v1_spanner_proto_rawDesc
-)
-
-func file_google_spanner_v1_spanner_proto_rawDescGZIP() []byte {
- file_google_spanner_v1_spanner_proto_rawDescOnce.Do(func() {
- file_google_spanner_v1_spanner_proto_rawDescData = protoimpl.X.CompressGZIP(file_google_spanner_v1_spanner_proto_rawDescData)
- })
- return file_google_spanner_v1_spanner_proto_rawDescData
-}
-
-var file_google_spanner_v1_spanner_proto_enumTypes = make([]protoimpl.EnumInfo, 5)
-var file_google_spanner_v1_spanner_proto_msgTypes = make([]protoimpl.MessageInfo, 34)
-var file_google_spanner_v1_spanner_proto_goTypes = []any{
- (RequestOptions_Priority)(0), // 0: google.spanner.v1.RequestOptions.Priority
- (DirectedReadOptions_ReplicaSelection_Type)(0), // 1: google.spanner.v1.DirectedReadOptions.ReplicaSelection.Type
- (ExecuteSqlRequest_QueryMode)(0), // 2: google.spanner.v1.ExecuteSqlRequest.QueryMode
- (ReadRequest_OrderBy)(0), // 3: google.spanner.v1.ReadRequest.OrderBy
- (ReadRequest_LockHint)(0), // 4: google.spanner.v1.ReadRequest.LockHint
- (*CreateSessionRequest)(nil), // 5: google.spanner.v1.CreateSessionRequest
- (*BatchCreateSessionsRequest)(nil), // 6: google.spanner.v1.BatchCreateSessionsRequest
- (*BatchCreateSessionsResponse)(nil), // 7: google.spanner.v1.BatchCreateSessionsResponse
- (*Session)(nil), // 8: google.spanner.v1.Session
- (*GetSessionRequest)(nil), // 9: google.spanner.v1.GetSessionRequest
- (*ListSessionsRequest)(nil), // 10: google.spanner.v1.ListSessionsRequest
- (*ListSessionsResponse)(nil), // 11: google.spanner.v1.ListSessionsResponse
- (*DeleteSessionRequest)(nil), // 12: google.spanner.v1.DeleteSessionRequest
- (*RequestOptions)(nil), // 13: google.spanner.v1.RequestOptions
- (*DirectedReadOptions)(nil), // 14: google.spanner.v1.DirectedReadOptions
- (*ExecuteSqlRequest)(nil), // 15: google.spanner.v1.ExecuteSqlRequest
- (*ExecuteBatchDmlRequest)(nil), // 16: google.spanner.v1.ExecuteBatchDmlRequest
- (*ExecuteBatchDmlResponse)(nil), // 17: google.spanner.v1.ExecuteBatchDmlResponse
- (*PartitionOptions)(nil), // 18: google.spanner.v1.PartitionOptions
- (*PartitionQueryRequest)(nil), // 19: google.spanner.v1.PartitionQueryRequest
- (*PartitionReadRequest)(nil), // 20: google.spanner.v1.PartitionReadRequest
- (*Partition)(nil), // 21: google.spanner.v1.Partition
- (*PartitionResponse)(nil), // 22: google.spanner.v1.PartitionResponse
- (*ReadRequest)(nil), // 23: google.spanner.v1.ReadRequest
- (*BeginTransactionRequest)(nil), // 24: google.spanner.v1.BeginTransactionRequest
- (*CommitRequest)(nil), // 25: google.spanner.v1.CommitRequest
- (*RollbackRequest)(nil), // 26: google.spanner.v1.RollbackRequest
- (*BatchWriteRequest)(nil), // 27: google.spanner.v1.BatchWriteRequest
- (*BatchWriteResponse)(nil), // 28: google.spanner.v1.BatchWriteResponse
- nil, // 29: google.spanner.v1.Session.LabelsEntry
- (*DirectedReadOptions_ReplicaSelection)(nil), // 30: google.spanner.v1.DirectedReadOptions.ReplicaSelection
- (*DirectedReadOptions_IncludeReplicas)(nil), // 31: google.spanner.v1.DirectedReadOptions.IncludeReplicas
- (*DirectedReadOptions_ExcludeReplicas)(nil), // 32: google.spanner.v1.DirectedReadOptions.ExcludeReplicas
- (*ExecuteSqlRequest_QueryOptions)(nil), // 33: google.spanner.v1.ExecuteSqlRequest.QueryOptions
- nil, // 34: google.spanner.v1.ExecuteSqlRequest.ParamTypesEntry
- (*ExecuteBatchDmlRequest_Statement)(nil), // 35: google.spanner.v1.ExecuteBatchDmlRequest.Statement
- nil, // 36: google.spanner.v1.ExecuteBatchDmlRequest.Statement.ParamTypesEntry
- nil, // 37: google.spanner.v1.PartitionQueryRequest.ParamTypesEntry
- (*BatchWriteRequest_MutationGroup)(nil), // 38: google.spanner.v1.BatchWriteRequest.MutationGroup
- (*timestamppb.Timestamp)(nil), // 39: google.protobuf.Timestamp
- (*TransactionSelector)(nil), // 40: google.spanner.v1.TransactionSelector
- (*structpb.Struct)(nil), // 41: google.protobuf.Struct
- (*ResultSet)(nil), // 42: google.spanner.v1.ResultSet
- (*status.Status)(nil), // 43: google.rpc.Status
- (*KeySet)(nil), // 44: google.spanner.v1.KeySet
- (*Transaction)(nil), // 45: google.spanner.v1.Transaction
- (*TransactionOptions)(nil), // 46: google.spanner.v1.TransactionOptions
- (*Mutation)(nil), // 47: google.spanner.v1.Mutation
- (*durationpb.Duration)(nil), // 48: google.protobuf.Duration
- (*Type)(nil), // 49: google.spanner.v1.Type
- (*emptypb.Empty)(nil), // 50: google.protobuf.Empty
- (*PartialResultSet)(nil), // 51: google.spanner.v1.PartialResultSet
- (*CommitResponse)(nil), // 52: google.spanner.v1.CommitResponse
-}
-var file_google_spanner_v1_spanner_proto_depIdxs = []int32{
- 8, // 0: google.spanner.v1.CreateSessionRequest.session:type_name -> google.spanner.v1.Session
- 8, // 1: google.spanner.v1.BatchCreateSessionsRequest.session_template:type_name -> google.spanner.v1.Session
- 8, // 2: google.spanner.v1.BatchCreateSessionsResponse.session:type_name -> google.spanner.v1.Session
- 29, // 3: google.spanner.v1.Session.labels:type_name -> google.spanner.v1.Session.LabelsEntry
- 39, // 4: google.spanner.v1.Session.create_time:type_name -> google.protobuf.Timestamp
- 39, // 5: google.spanner.v1.Session.approximate_last_use_time:type_name -> google.protobuf.Timestamp
- 8, // 6: google.spanner.v1.ListSessionsResponse.sessions:type_name -> google.spanner.v1.Session
- 0, // 7: google.spanner.v1.RequestOptions.priority:type_name -> google.spanner.v1.RequestOptions.Priority
- 31, // 8: google.spanner.v1.DirectedReadOptions.include_replicas:type_name -> google.spanner.v1.DirectedReadOptions.IncludeReplicas
- 32, // 9: google.spanner.v1.DirectedReadOptions.exclude_replicas:type_name -> google.spanner.v1.DirectedReadOptions.ExcludeReplicas
- 40, // 10: google.spanner.v1.ExecuteSqlRequest.transaction:type_name -> google.spanner.v1.TransactionSelector
- 41, // 11: google.spanner.v1.ExecuteSqlRequest.params:type_name -> google.protobuf.Struct
- 34, // 12: google.spanner.v1.ExecuteSqlRequest.param_types:type_name -> google.spanner.v1.ExecuteSqlRequest.ParamTypesEntry
- 2, // 13: google.spanner.v1.ExecuteSqlRequest.query_mode:type_name -> google.spanner.v1.ExecuteSqlRequest.QueryMode
- 33, // 14: google.spanner.v1.ExecuteSqlRequest.query_options:type_name -> google.spanner.v1.ExecuteSqlRequest.QueryOptions
- 13, // 15: google.spanner.v1.ExecuteSqlRequest.request_options:type_name -> google.spanner.v1.RequestOptions
- 14, // 16: google.spanner.v1.ExecuteSqlRequest.directed_read_options:type_name -> google.spanner.v1.DirectedReadOptions
- 40, // 17: google.spanner.v1.ExecuteBatchDmlRequest.transaction:type_name -> google.spanner.v1.TransactionSelector
- 35, // 18: google.spanner.v1.ExecuteBatchDmlRequest.statements:type_name -> google.spanner.v1.ExecuteBatchDmlRequest.Statement
- 13, // 19: google.spanner.v1.ExecuteBatchDmlRequest.request_options:type_name -> google.spanner.v1.RequestOptions
- 42, // 20: google.spanner.v1.ExecuteBatchDmlResponse.result_sets:type_name -> google.spanner.v1.ResultSet
- 43, // 21: google.spanner.v1.ExecuteBatchDmlResponse.status:type_name -> google.rpc.Status
- 40, // 22: google.spanner.v1.PartitionQueryRequest.transaction:type_name -> google.spanner.v1.TransactionSelector
- 41, // 23: google.spanner.v1.PartitionQueryRequest.params:type_name -> google.protobuf.Struct
- 37, // 24: google.spanner.v1.PartitionQueryRequest.param_types:type_name -> google.spanner.v1.PartitionQueryRequest.ParamTypesEntry
- 18, // 25: google.spanner.v1.PartitionQueryRequest.partition_options:type_name -> google.spanner.v1.PartitionOptions
- 40, // 26: google.spanner.v1.PartitionReadRequest.transaction:type_name -> google.spanner.v1.TransactionSelector
- 44, // 27: google.spanner.v1.PartitionReadRequest.key_set:type_name -> google.spanner.v1.KeySet
- 18, // 28: google.spanner.v1.PartitionReadRequest.partition_options:type_name -> google.spanner.v1.PartitionOptions
- 21, // 29: google.spanner.v1.PartitionResponse.partitions:type_name -> google.spanner.v1.Partition
- 45, // 30: google.spanner.v1.PartitionResponse.transaction:type_name -> google.spanner.v1.Transaction
- 40, // 31: google.spanner.v1.ReadRequest.transaction:type_name -> google.spanner.v1.TransactionSelector
- 44, // 32: google.spanner.v1.ReadRequest.key_set:type_name -> google.spanner.v1.KeySet
- 13, // 33: google.spanner.v1.ReadRequest.request_options:type_name -> google.spanner.v1.RequestOptions
- 14, // 34: google.spanner.v1.ReadRequest.directed_read_options:type_name -> google.spanner.v1.DirectedReadOptions
- 3, // 35: google.spanner.v1.ReadRequest.order_by:type_name -> google.spanner.v1.ReadRequest.OrderBy
- 4, // 36: google.spanner.v1.ReadRequest.lock_hint:type_name -> google.spanner.v1.ReadRequest.LockHint
- 46, // 37: google.spanner.v1.BeginTransactionRequest.options:type_name -> google.spanner.v1.TransactionOptions
- 13, // 38: google.spanner.v1.BeginTransactionRequest.request_options:type_name -> google.spanner.v1.RequestOptions
- 46, // 39: google.spanner.v1.CommitRequest.single_use_transaction:type_name -> google.spanner.v1.TransactionOptions
- 47, // 40: google.spanner.v1.CommitRequest.mutations:type_name -> google.spanner.v1.Mutation
- 48, // 41: google.spanner.v1.CommitRequest.max_commit_delay:type_name -> google.protobuf.Duration
- 13, // 42: google.spanner.v1.CommitRequest.request_options:type_name -> google.spanner.v1.RequestOptions
- 13, // 43: google.spanner.v1.BatchWriteRequest.request_options:type_name -> google.spanner.v1.RequestOptions
- 38, // 44: google.spanner.v1.BatchWriteRequest.mutation_groups:type_name -> google.spanner.v1.BatchWriteRequest.MutationGroup
- 43, // 45: google.spanner.v1.BatchWriteResponse.status:type_name -> google.rpc.Status
- 39, // 46: google.spanner.v1.BatchWriteResponse.commit_timestamp:type_name -> google.protobuf.Timestamp
- 1, // 47: google.spanner.v1.DirectedReadOptions.ReplicaSelection.type:type_name -> google.spanner.v1.DirectedReadOptions.ReplicaSelection.Type
- 30, // 48: google.spanner.v1.DirectedReadOptions.IncludeReplicas.replica_selections:type_name -> google.spanner.v1.DirectedReadOptions.ReplicaSelection
- 30, // 49: google.spanner.v1.DirectedReadOptions.ExcludeReplicas.replica_selections:type_name -> google.spanner.v1.DirectedReadOptions.ReplicaSelection
- 49, // 50: google.spanner.v1.ExecuteSqlRequest.ParamTypesEntry.value:type_name -> google.spanner.v1.Type
- 41, // 51: google.spanner.v1.ExecuteBatchDmlRequest.Statement.params:type_name -> google.protobuf.Struct
- 36, // 52: google.spanner.v1.ExecuteBatchDmlRequest.Statement.param_types:type_name -> google.spanner.v1.ExecuteBatchDmlRequest.Statement.ParamTypesEntry
- 49, // 53: google.spanner.v1.ExecuteBatchDmlRequest.Statement.ParamTypesEntry.value:type_name -> google.spanner.v1.Type
- 49, // 54: google.spanner.v1.PartitionQueryRequest.ParamTypesEntry.value:type_name -> google.spanner.v1.Type
- 47, // 55: google.spanner.v1.BatchWriteRequest.MutationGroup.mutations:type_name -> google.spanner.v1.Mutation
- 5, // 56: google.spanner.v1.Spanner.CreateSession:input_type -> google.spanner.v1.CreateSessionRequest
- 6, // 57: google.spanner.v1.Spanner.BatchCreateSessions:input_type -> google.spanner.v1.BatchCreateSessionsRequest
- 9, // 58: google.spanner.v1.Spanner.GetSession:input_type -> google.spanner.v1.GetSessionRequest
- 10, // 59: google.spanner.v1.Spanner.ListSessions:input_type -> google.spanner.v1.ListSessionsRequest
- 12, // 60: google.spanner.v1.Spanner.DeleteSession:input_type -> google.spanner.v1.DeleteSessionRequest
- 15, // 61: google.spanner.v1.Spanner.ExecuteSql:input_type -> google.spanner.v1.ExecuteSqlRequest
- 15, // 62: google.spanner.v1.Spanner.ExecuteStreamingSql:input_type -> google.spanner.v1.ExecuteSqlRequest
- 16, // 63: google.spanner.v1.Spanner.ExecuteBatchDml:input_type -> google.spanner.v1.ExecuteBatchDmlRequest
- 23, // 64: google.spanner.v1.Spanner.Read:input_type -> google.spanner.v1.ReadRequest
- 23, // 65: google.spanner.v1.Spanner.StreamingRead:input_type -> google.spanner.v1.ReadRequest
- 24, // 66: google.spanner.v1.Spanner.BeginTransaction:input_type -> google.spanner.v1.BeginTransactionRequest
- 25, // 67: google.spanner.v1.Spanner.Commit:input_type -> google.spanner.v1.CommitRequest
- 26, // 68: google.spanner.v1.Spanner.Rollback:input_type -> google.spanner.v1.RollbackRequest
- 19, // 69: google.spanner.v1.Spanner.PartitionQuery:input_type -> google.spanner.v1.PartitionQueryRequest
- 20, // 70: google.spanner.v1.Spanner.PartitionRead:input_type -> google.spanner.v1.PartitionReadRequest
- 27, // 71: google.spanner.v1.Spanner.BatchWrite:input_type -> google.spanner.v1.BatchWriteRequest
- 8, // 72: google.spanner.v1.Spanner.CreateSession:output_type -> google.spanner.v1.Session
- 7, // 73: google.spanner.v1.Spanner.BatchCreateSessions:output_type -> google.spanner.v1.BatchCreateSessionsResponse
- 8, // 74: google.spanner.v1.Spanner.GetSession:output_type -> google.spanner.v1.Session
- 11, // 75: google.spanner.v1.Spanner.ListSessions:output_type -> google.spanner.v1.ListSessionsResponse
- 50, // 76: google.spanner.v1.Spanner.DeleteSession:output_type -> google.protobuf.Empty
- 42, // 77: google.spanner.v1.Spanner.ExecuteSql:output_type -> google.spanner.v1.ResultSet
- 51, // 78: google.spanner.v1.Spanner.ExecuteStreamingSql:output_type -> google.spanner.v1.PartialResultSet
- 17, // 79: google.spanner.v1.Spanner.ExecuteBatchDml:output_type -> google.spanner.v1.ExecuteBatchDmlResponse
- 42, // 80: google.spanner.v1.Spanner.Read:output_type -> google.spanner.v1.ResultSet
- 51, // 81: google.spanner.v1.Spanner.StreamingRead:output_type -> google.spanner.v1.PartialResultSet
- 45, // 82: google.spanner.v1.Spanner.BeginTransaction:output_type -> google.spanner.v1.Transaction
- 52, // 83: google.spanner.v1.Spanner.Commit:output_type -> google.spanner.v1.CommitResponse
- 50, // 84: google.spanner.v1.Spanner.Rollback:output_type -> google.protobuf.Empty
- 22, // 85: google.spanner.v1.Spanner.PartitionQuery:output_type -> google.spanner.v1.PartitionResponse
- 22, // 86: google.spanner.v1.Spanner.PartitionRead:output_type -> google.spanner.v1.PartitionResponse
- 28, // 87: google.spanner.v1.Spanner.BatchWrite:output_type -> google.spanner.v1.BatchWriteResponse
- 72, // [72:88] is the sub-list for method output_type
- 56, // [56:72] is the sub-list for method input_type
- 56, // [56:56] is the sub-list for extension type_name
- 56, // [56:56] is the sub-list for extension extendee
- 0, // [0:56] is the sub-list for field type_name
-}
-
-func init() { file_google_spanner_v1_spanner_proto_init() }
-func file_google_spanner_v1_spanner_proto_init() {
- if File_google_spanner_v1_spanner_proto != nil {
- return
- }
- file_google_spanner_v1_commit_response_proto_init()
- file_google_spanner_v1_keys_proto_init()
- file_google_spanner_v1_mutation_proto_init()
- file_google_spanner_v1_result_set_proto_init()
- file_google_spanner_v1_transaction_proto_init()
- file_google_spanner_v1_type_proto_init()
- if !protoimpl.UnsafeEnabled {
- file_google_spanner_v1_spanner_proto_msgTypes[0].Exporter = func(v any, i int) any {
- switch v := v.(*CreateSessionRequest); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_google_spanner_v1_spanner_proto_msgTypes[1].Exporter = func(v any, i int) any {
- switch v := v.(*BatchCreateSessionsRequest); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_google_spanner_v1_spanner_proto_msgTypes[2].Exporter = func(v any, i int) any {
- switch v := v.(*BatchCreateSessionsResponse); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_google_spanner_v1_spanner_proto_msgTypes[3].Exporter = func(v any, i int) any {
- switch v := v.(*Session); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_google_spanner_v1_spanner_proto_msgTypes[4].Exporter = func(v any, i int) any {
- switch v := v.(*GetSessionRequest); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_google_spanner_v1_spanner_proto_msgTypes[5].Exporter = func(v any, i int) any {
- switch v := v.(*ListSessionsRequest); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_google_spanner_v1_spanner_proto_msgTypes[6].Exporter = func(v any, i int) any {
- switch v := v.(*ListSessionsResponse); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_google_spanner_v1_spanner_proto_msgTypes[7].Exporter = func(v any, i int) any {
- switch v := v.(*DeleteSessionRequest); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_google_spanner_v1_spanner_proto_msgTypes[8].Exporter = func(v any, i int) any {
- switch v := v.(*RequestOptions); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_google_spanner_v1_spanner_proto_msgTypes[9].Exporter = func(v any, i int) any {
- switch v := v.(*DirectedReadOptions); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_google_spanner_v1_spanner_proto_msgTypes[10].Exporter = func(v any, i int) any {
- switch v := v.(*ExecuteSqlRequest); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_google_spanner_v1_spanner_proto_msgTypes[11].Exporter = func(v any, i int) any {
- switch v := v.(*ExecuteBatchDmlRequest); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_google_spanner_v1_spanner_proto_msgTypes[12].Exporter = func(v any, i int) any {
- switch v := v.(*ExecuteBatchDmlResponse); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_google_spanner_v1_spanner_proto_msgTypes[13].Exporter = func(v any, i int) any {
- switch v := v.(*PartitionOptions); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_google_spanner_v1_spanner_proto_msgTypes[14].Exporter = func(v any, i int) any {
- switch v := v.(*PartitionQueryRequest); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_google_spanner_v1_spanner_proto_msgTypes[15].Exporter = func(v any, i int) any {
- switch v := v.(*PartitionReadRequest); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_google_spanner_v1_spanner_proto_msgTypes[16].Exporter = func(v any, i int) any {
- switch v := v.(*Partition); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_google_spanner_v1_spanner_proto_msgTypes[17].Exporter = func(v any, i int) any {
- switch v := v.(*PartitionResponse); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_google_spanner_v1_spanner_proto_msgTypes[18].Exporter = func(v any, i int) any {
- switch v := v.(*ReadRequest); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_google_spanner_v1_spanner_proto_msgTypes[19].Exporter = func(v any, i int) any {
- switch v := v.(*BeginTransactionRequest); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_google_spanner_v1_spanner_proto_msgTypes[20].Exporter = func(v any, i int) any {
- switch v := v.(*CommitRequest); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_google_spanner_v1_spanner_proto_msgTypes[21].Exporter = func(v any, i int) any {
- switch v := v.(*RollbackRequest); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_google_spanner_v1_spanner_proto_msgTypes[22].Exporter = func(v any, i int) any {
- switch v := v.(*BatchWriteRequest); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_google_spanner_v1_spanner_proto_msgTypes[23].Exporter = func(v any, i int) any {
- switch v := v.(*BatchWriteResponse); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_google_spanner_v1_spanner_proto_msgTypes[25].Exporter = func(v any, i int) any {
- switch v := v.(*DirectedReadOptions_ReplicaSelection); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_google_spanner_v1_spanner_proto_msgTypes[26].Exporter = func(v any, i int) any {
- switch v := v.(*DirectedReadOptions_IncludeReplicas); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_google_spanner_v1_spanner_proto_msgTypes[27].Exporter = func(v any, i int) any {
- switch v := v.(*DirectedReadOptions_ExcludeReplicas); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_google_spanner_v1_spanner_proto_msgTypes[28].Exporter = func(v any, i int) any {
- switch v := v.(*ExecuteSqlRequest_QueryOptions); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_google_spanner_v1_spanner_proto_msgTypes[30].Exporter = func(v any, i int) any {
- switch v := v.(*ExecuteBatchDmlRequest_Statement); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_google_spanner_v1_spanner_proto_msgTypes[33].Exporter = func(v any, i int) any {
- switch v := v.(*BatchWriteRequest_MutationGroup); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- }
- file_google_spanner_v1_spanner_proto_msgTypes[9].OneofWrappers = []any{
- (*DirectedReadOptions_IncludeReplicas_)(nil),
- (*DirectedReadOptions_ExcludeReplicas_)(nil),
- }
- file_google_spanner_v1_spanner_proto_msgTypes[20].OneofWrappers = []any{
- (*CommitRequest_TransactionId)(nil),
- (*CommitRequest_SingleUseTransaction)(nil),
- }
- type x struct{}
- out := protoimpl.TypeBuilder{
- File: protoimpl.DescBuilder{
- GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
- RawDescriptor: file_google_spanner_v1_spanner_proto_rawDesc,
- NumEnums: 5,
- NumMessages: 34,
- NumExtensions: 0,
- NumServices: 1,
- },
- GoTypes: file_google_spanner_v1_spanner_proto_goTypes,
- DependencyIndexes: file_google_spanner_v1_spanner_proto_depIdxs,
- EnumInfos: file_google_spanner_v1_spanner_proto_enumTypes,
- MessageInfos: file_google_spanner_v1_spanner_proto_msgTypes,
- }.Build()
- File_google_spanner_v1_spanner_proto = out.File
- file_google_spanner_v1_spanner_proto_rawDesc = nil
- file_google_spanner_v1_spanner_proto_goTypes = nil
- file_google_spanner_v1_spanner_proto_depIdxs = nil
-}
-
-// Reference imports to suppress errors if they are not otherwise used.
-var _ context.Context
-var _ grpc.ClientConnInterface
-
-// This is a compile-time assertion to ensure that this generated file
-// is compatible with the grpc package it is being compiled against.
-const _ = grpc.SupportPackageIsVersion6
-
-// SpannerClient is the client API for Spanner service.
-//
-// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream.
-type SpannerClient interface {
- // Creates a new session. A session can be used to perform
- // transactions that read and/or modify data in a Cloud Spanner database.
- // Sessions are meant to be reused for many consecutive
- // transactions.
- //
- // Sessions can only execute one transaction at a time. To execute
- // multiple concurrent read-write/write-only transactions, create
- // multiple sessions. Note that standalone reads and queries use a
- // transaction internally, and count toward the one transaction
- // limit.
- //
- // Active sessions use additional server resources, so it is a good idea to
- // delete idle and unneeded sessions.
- // Aside from explicit deletes, Cloud Spanner may delete sessions for which no
- // operations are sent for more than an hour. If a session is deleted,
- // requests to it return `NOT_FOUND`.
- //
- // Idle sessions can be kept alive by sending a trivial SQL query
- // periodically, e.g., `"SELECT 1"`.
- CreateSession(ctx context.Context, in *CreateSessionRequest, opts ...grpc.CallOption) (*Session, error)
- // Creates multiple new sessions.
- //
- // This API can be used to initialize a session cache on the clients.
- // See https://goo.gl/TgSFN2 for best practices on session cache management.
- BatchCreateSessions(ctx context.Context, in *BatchCreateSessionsRequest, opts ...grpc.CallOption) (*BatchCreateSessionsResponse, error)
- // Gets a session. Returns `NOT_FOUND` if the session does not exist.
- // This is mainly useful for determining whether a session is still
- // alive.
- GetSession(ctx context.Context, in *GetSessionRequest, opts ...grpc.CallOption) (*Session, error)
- // Lists all sessions in a given database.
- ListSessions(ctx context.Context, in *ListSessionsRequest, opts ...grpc.CallOption) (*ListSessionsResponse, error)
- // Ends a session, releasing server resources associated with it. This will
- // asynchronously trigger cancellation of any operations that are running with
- // this session.
- DeleteSession(ctx context.Context, in *DeleteSessionRequest, opts ...grpc.CallOption) (*emptypb.Empty, error)
- // Executes an SQL statement, returning all results in a single reply. This
- // method cannot be used to return a result set larger than 10 MiB;
- // if the query yields more data than that, the query fails with
- // a `FAILED_PRECONDITION` error.
- //
- // Operations inside read-write transactions might return `ABORTED`. If
- // this occurs, the application should restart the transaction from
- // the beginning. See [Transaction][google.spanner.v1.Transaction] for more
- // details.
- //
- // Larger result sets can be fetched in streaming fashion by calling
- // [ExecuteStreamingSql][google.spanner.v1.Spanner.ExecuteStreamingSql]
- // instead.
- ExecuteSql(ctx context.Context, in *ExecuteSqlRequest, opts ...grpc.CallOption) (*ResultSet, error)
- // Like [ExecuteSql][google.spanner.v1.Spanner.ExecuteSql], except returns the
- // result set as a stream. Unlike
- // [ExecuteSql][google.spanner.v1.Spanner.ExecuteSql], there is no limit on
- // the size of the returned result set. However, no individual row in the
- // result set can exceed 100 MiB, and no column value can exceed 10 MiB.
- ExecuteStreamingSql(ctx context.Context, in *ExecuteSqlRequest, opts ...grpc.CallOption) (Spanner_ExecuteStreamingSqlClient, error)
- // Executes a batch of SQL DML statements. This method allows many statements
- // to be run with lower latency than submitting them sequentially with
- // [ExecuteSql][google.spanner.v1.Spanner.ExecuteSql].
- //
- // Statements are executed in sequential order. A request can succeed even if
- // a statement fails. The
- // [ExecuteBatchDmlResponse.status][google.spanner.v1.ExecuteBatchDmlResponse.status]
- // field in the response provides information about the statement that failed.
- // Clients must inspect this field to determine whether an error occurred.
- //
- // Execution stops after the first failed statement; the remaining statements
- // are not executed.
- ExecuteBatchDml(ctx context.Context, in *ExecuteBatchDmlRequest, opts ...grpc.CallOption) (*ExecuteBatchDmlResponse, error)
- // Reads rows from the database using key lookups and scans, as a
- // simple key/value style alternative to
- // [ExecuteSql][google.spanner.v1.Spanner.ExecuteSql]. This method cannot be
- // used to return a result set larger than 10 MiB; if the read matches more
- // data than that, the read fails with a `FAILED_PRECONDITION`
- // error.
- //
- // Reads inside read-write transactions might return `ABORTED`. If
- // this occurs, the application should restart the transaction from
- // the beginning. See [Transaction][google.spanner.v1.Transaction] for more
- // details.
- //
- // Larger result sets can be yielded in streaming fashion by calling
- // [StreamingRead][google.spanner.v1.Spanner.StreamingRead] instead.
- Read(ctx context.Context, in *ReadRequest, opts ...grpc.CallOption) (*ResultSet, error)
- // Like [Read][google.spanner.v1.Spanner.Read], except returns the result set
- // as a stream. Unlike [Read][google.spanner.v1.Spanner.Read], there is no
- // limit on the size of the returned result set. However, no individual row in
- // the result set can exceed 100 MiB, and no column value can exceed
- // 10 MiB.
- StreamingRead(ctx context.Context, in *ReadRequest, opts ...grpc.CallOption) (Spanner_StreamingReadClient, error)
- // Begins a new transaction. This step can often be skipped:
- // [Read][google.spanner.v1.Spanner.Read],
- // [ExecuteSql][google.spanner.v1.Spanner.ExecuteSql] and
- // [Commit][google.spanner.v1.Spanner.Commit] can begin a new transaction as a
- // side-effect.
- BeginTransaction(ctx context.Context, in *BeginTransactionRequest, opts ...grpc.CallOption) (*Transaction, error)
- // Commits a transaction. The request includes the mutations to be
- // applied to rows in the database.
- //
- // `Commit` might return an `ABORTED` error. This can occur at any time;
- // commonly, the cause is conflicts with concurrent
- // transactions. However, it can also happen for a variety of other
- // reasons. If `Commit` returns `ABORTED`, the caller should re-attempt
- // the transaction from the beginning, re-using the same session.
- //
- // On very rare occasions, `Commit` might return `UNKNOWN`. This can happen,
- // for example, if the client job experiences a 1+ hour networking failure.
- // At that point, Cloud Spanner has lost track of the transaction outcome and
- // we recommend that you perform another read from the database to see the
- // state of things as they are now.
- Commit(ctx context.Context, in *CommitRequest, opts ...grpc.CallOption) (*CommitResponse, error)
- // Rolls back a transaction, releasing any locks it holds. It is a good
- // idea to call this for any transaction that includes one or more
- // [Read][google.spanner.v1.Spanner.Read] or
- // [ExecuteSql][google.spanner.v1.Spanner.ExecuteSql] requests and ultimately
- // decides not to commit.
- //
- // `Rollback` returns `OK` if it successfully aborts the transaction, the
- // transaction was already aborted, or the transaction is not
- // found. `Rollback` never returns `ABORTED`.
- Rollback(ctx context.Context, in *RollbackRequest, opts ...grpc.CallOption) (*emptypb.Empty, error)
- // Creates a set of partition tokens that can be used to execute a query
- // operation in parallel. Each of the returned partition tokens can be used
- // by [ExecuteStreamingSql][google.spanner.v1.Spanner.ExecuteStreamingSql] to
- // specify a subset of the query result to read. The same session and
- // read-only transaction must be used by the PartitionQueryRequest used to
- // create the partition tokens and the ExecuteSqlRequests that use the
- // partition tokens.
- //
- // Partition tokens become invalid when the session used to create them
- // is deleted, is idle for too long, begins a new transaction, or becomes too
- // old. When any of these happen, it is not possible to resume the query, and
- // the whole operation must be restarted from the beginning.
- PartitionQuery(ctx context.Context, in *PartitionQueryRequest, opts ...grpc.CallOption) (*PartitionResponse, error)
- // Creates a set of partition tokens that can be used to execute a read
- // operation in parallel. Each of the returned partition tokens can be used
- // by [StreamingRead][google.spanner.v1.Spanner.StreamingRead] to specify a
- // subset of the read result to read. The same session and read-only
- // transaction must be used by the PartitionReadRequest used to create the
- // partition tokens and the ReadRequests that use the partition tokens. There
- // are no ordering guarantees on rows returned among the returned partition
- // tokens, or even within each individual StreamingRead call issued with a
- // partition_token.
- //
- // Partition tokens become invalid when the session used to create them
- // is deleted, is idle for too long, begins a new transaction, or becomes too
- // old. When any of these happen, it is not possible to resume the read, and
- // the whole operation must be restarted from the beginning.
- PartitionRead(ctx context.Context, in *PartitionReadRequest, opts ...grpc.CallOption) (*PartitionResponse, error)
- // Batches the supplied mutation groups in a collection of efficient
- // transactions. All mutations in a group are committed atomically. However,
- // mutations across groups can be committed non-atomically in an unspecified
- // order and thus, they must be independent of each other. Partial failure is
- // possible, i.e., some groups may have been committed successfully, while
- // some may have failed. The results of individual batches are streamed into
- // the response as the batches are applied.
- //
- // BatchWrite requests are not replay protected, meaning that each mutation
- // group may be applied more than once. Replays of non-idempotent mutations
- // may have undesirable effects. For example, replays of an insert mutation
- // may produce an already exists error or if you use generated or commit
- // timestamp-based keys, it may result in additional rows being added to the
- // mutation's table. We recommend structuring your mutation groups to be
- // idempotent to avoid this issue.
- BatchWrite(ctx context.Context, in *BatchWriteRequest, opts ...grpc.CallOption) (Spanner_BatchWriteClient, error)
-}
-
-type spannerClient struct {
- cc grpc.ClientConnInterface
-}
-
-func NewSpannerClient(cc grpc.ClientConnInterface) SpannerClient {
- return &spannerClient{cc}
-}
-
-func (c *spannerClient) CreateSession(ctx context.Context, in *CreateSessionRequest, opts ...grpc.CallOption) (*Session, error) {
- out := new(Session)
- err := c.cc.Invoke(ctx, "/google.spanner.v1.Spanner/CreateSession", in, out, opts...)
- if err != nil {
- return nil, err
- }
- return out, nil
-}
-
-func (c *spannerClient) BatchCreateSessions(ctx context.Context, in *BatchCreateSessionsRequest, opts ...grpc.CallOption) (*BatchCreateSessionsResponse, error) {
- out := new(BatchCreateSessionsResponse)
- err := c.cc.Invoke(ctx, "/google.spanner.v1.Spanner/BatchCreateSessions", in, out, opts...)
- if err != nil {
- return nil, err
- }
- return out, nil
-}
-
-func (c *spannerClient) GetSession(ctx context.Context, in *GetSessionRequest, opts ...grpc.CallOption) (*Session, error) {
- out := new(Session)
- err := c.cc.Invoke(ctx, "/google.spanner.v1.Spanner/GetSession", in, out, opts...)
- if err != nil {
- return nil, err
- }
- return out, nil
-}
-
-func (c *spannerClient) ListSessions(ctx context.Context, in *ListSessionsRequest, opts ...grpc.CallOption) (*ListSessionsResponse, error) {
- out := new(ListSessionsResponse)
- err := c.cc.Invoke(ctx, "/google.spanner.v1.Spanner/ListSessions", in, out, opts...)
- if err != nil {
- return nil, err
- }
- return out, nil
-}
-
-func (c *spannerClient) DeleteSession(ctx context.Context, in *DeleteSessionRequest, opts ...grpc.CallOption) (*emptypb.Empty, error) {
- out := new(emptypb.Empty)
- err := c.cc.Invoke(ctx, "/google.spanner.v1.Spanner/DeleteSession", in, out, opts...)
- if err != nil {
- return nil, err
- }
- return out, nil
-}
-
-func (c *spannerClient) ExecuteSql(ctx context.Context, in *ExecuteSqlRequest, opts ...grpc.CallOption) (*ResultSet, error) {
- out := new(ResultSet)
- err := c.cc.Invoke(ctx, "/google.spanner.v1.Spanner/ExecuteSql", in, out, opts...)
- if err != nil {
- return nil, err
- }
- return out, nil
-}
-
-func (c *spannerClient) ExecuteStreamingSql(ctx context.Context, in *ExecuteSqlRequest, opts ...grpc.CallOption) (Spanner_ExecuteStreamingSqlClient, error) {
- stream, err := c.cc.NewStream(ctx, &_Spanner_serviceDesc.Streams[0], "/google.spanner.v1.Spanner/ExecuteStreamingSql", opts...)
- if err != nil {
- return nil, err
- }
- x := &spannerExecuteStreamingSqlClient{stream}
- if err := x.ClientStream.SendMsg(in); err != nil {
- return nil, err
- }
- if err := x.ClientStream.CloseSend(); err != nil {
- return nil, err
- }
- return x, nil
-}
-
-type Spanner_ExecuteStreamingSqlClient interface {
- Recv() (*PartialResultSet, error)
- grpc.ClientStream
-}
-
-type spannerExecuteStreamingSqlClient struct {
- grpc.ClientStream
-}
-
-func (x *spannerExecuteStreamingSqlClient) Recv() (*PartialResultSet, error) {
- m := new(PartialResultSet)
- if err := x.ClientStream.RecvMsg(m); err != nil {
- return nil, err
- }
- return m, nil
-}
-
-func (c *spannerClient) ExecuteBatchDml(ctx context.Context, in *ExecuteBatchDmlRequest, opts ...grpc.CallOption) (*ExecuteBatchDmlResponse, error) {
- out := new(ExecuteBatchDmlResponse)
- err := c.cc.Invoke(ctx, "/google.spanner.v1.Spanner/ExecuteBatchDml", in, out, opts...)
- if err != nil {
- return nil, err
- }
- return out, nil
-}
-
-func (c *spannerClient) Read(ctx context.Context, in *ReadRequest, opts ...grpc.CallOption) (*ResultSet, error) {
- out := new(ResultSet)
- err := c.cc.Invoke(ctx, "/google.spanner.v1.Spanner/Read", in, out, opts...)
- if err != nil {
- return nil, err
- }
- return out, nil
-}
-
-func (c *spannerClient) StreamingRead(ctx context.Context, in *ReadRequest, opts ...grpc.CallOption) (Spanner_StreamingReadClient, error) {
- stream, err := c.cc.NewStream(ctx, &_Spanner_serviceDesc.Streams[1], "/google.spanner.v1.Spanner/StreamingRead", opts...)
- if err != nil {
- return nil, err
- }
- x := &spannerStreamingReadClient{stream}
- if err := x.ClientStream.SendMsg(in); err != nil {
- return nil, err
- }
- if err := x.ClientStream.CloseSend(); err != nil {
- return nil, err
- }
- return x, nil
-}
-
-type Spanner_StreamingReadClient interface {
- Recv() (*PartialResultSet, error)
- grpc.ClientStream
-}
-
-type spannerStreamingReadClient struct {
- grpc.ClientStream
-}
-
-func (x *spannerStreamingReadClient) Recv() (*PartialResultSet, error) {
- m := new(PartialResultSet)
- if err := x.ClientStream.RecvMsg(m); err != nil {
- return nil, err
- }
- return m, nil
-}
-
-func (c *spannerClient) BeginTransaction(ctx context.Context, in *BeginTransactionRequest, opts ...grpc.CallOption) (*Transaction, error) {
- out := new(Transaction)
- err := c.cc.Invoke(ctx, "/google.spanner.v1.Spanner/BeginTransaction", in, out, opts...)
- if err != nil {
- return nil, err
- }
- return out, nil
-}
-
-func (c *spannerClient) Commit(ctx context.Context, in *CommitRequest, opts ...grpc.CallOption) (*CommitResponse, error) {
- out := new(CommitResponse)
- err := c.cc.Invoke(ctx, "/google.spanner.v1.Spanner/Commit", in, out, opts...)
- if err != nil {
- return nil, err
- }
- return out, nil
-}
-
-func (c *spannerClient) Rollback(ctx context.Context, in *RollbackRequest, opts ...grpc.CallOption) (*emptypb.Empty, error) {
- out := new(emptypb.Empty)
- err := c.cc.Invoke(ctx, "/google.spanner.v1.Spanner/Rollback", in, out, opts...)
- if err != nil {
- return nil, err
- }
- return out, nil
-}
-
-func (c *spannerClient) PartitionQuery(ctx context.Context, in *PartitionQueryRequest, opts ...grpc.CallOption) (*PartitionResponse, error) {
- out := new(PartitionResponse)
- err := c.cc.Invoke(ctx, "/google.spanner.v1.Spanner/PartitionQuery", in, out, opts...)
- if err != nil {
- return nil, err
- }
- return out, nil
-}
-
-func (c *spannerClient) PartitionRead(ctx context.Context, in *PartitionReadRequest, opts ...grpc.CallOption) (*PartitionResponse, error) {
- out := new(PartitionResponse)
- err := c.cc.Invoke(ctx, "/google.spanner.v1.Spanner/PartitionRead", in, out, opts...)
- if err != nil {
- return nil, err
- }
- return out, nil
-}
-
-func (c *spannerClient) BatchWrite(ctx context.Context, in *BatchWriteRequest, opts ...grpc.CallOption) (Spanner_BatchWriteClient, error) {
- stream, err := c.cc.NewStream(ctx, &_Spanner_serviceDesc.Streams[2], "/google.spanner.v1.Spanner/BatchWrite", opts...)
- if err != nil {
- return nil, err
- }
- x := &spannerBatchWriteClient{stream}
- if err := x.ClientStream.SendMsg(in); err != nil {
- return nil, err
- }
- if err := x.ClientStream.CloseSend(); err != nil {
- return nil, err
- }
- return x, nil
-}
-
-type Spanner_BatchWriteClient interface {
- Recv() (*BatchWriteResponse, error)
- grpc.ClientStream
-}
-
-type spannerBatchWriteClient struct {
- grpc.ClientStream
-}
-
-func (x *spannerBatchWriteClient) Recv() (*BatchWriteResponse, error) {
- m := new(BatchWriteResponse)
- if err := x.ClientStream.RecvMsg(m); err != nil {
- return nil, err
- }
- return m, nil
-}
-
-// SpannerServer is the server API for Spanner service.
-type SpannerServer interface {
- // Creates a new session. A session can be used to perform
- // transactions that read and/or modify data in a Cloud Spanner database.
- // Sessions are meant to be reused for many consecutive
- // transactions.
- //
- // Sessions can only execute one transaction at a time. To execute
- // multiple concurrent read-write/write-only transactions, create
- // multiple sessions. Note that standalone reads and queries use a
- // transaction internally, and count toward the one transaction
- // limit.
- //
- // Active sessions use additional server resources, so it is a good idea to
- // delete idle and unneeded sessions.
- // Aside from explicit deletes, Cloud Spanner may delete sessions for which no
- // operations are sent for more than an hour. If a session is deleted,
- // requests to it return `NOT_FOUND`.
- //
- // Idle sessions can be kept alive by sending a trivial SQL query
- // periodically, e.g., `"SELECT 1"`.
- CreateSession(context.Context, *CreateSessionRequest) (*Session, error)
- // Creates multiple new sessions.
- //
- // This API can be used to initialize a session cache on the clients.
- // See https://goo.gl/TgSFN2 for best practices on session cache management.
- BatchCreateSessions(context.Context, *BatchCreateSessionsRequest) (*BatchCreateSessionsResponse, error)
- // Gets a session. Returns `NOT_FOUND` if the session does not exist.
- // This is mainly useful for determining whether a session is still
- // alive.
- GetSession(context.Context, *GetSessionRequest) (*Session, error)
- // Lists all sessions in a given database.
- ListSessions(context.Context, *ListSessionsRequest) (*ListSessionsResponse, error)
- // Ends a session, releasing server resources associated with it. This will
- // asynchronously trigger cancellation of any operations that are running with
- // this session.
- DeleteSession(context.Context, *DeleteSessionRequest) (*emptypb.Empty, error)
- // Executes an SQL statement, returning all results in a single reply. This
- // method cannot be used to return a result set larger than 10 MiB;
- // if the query yields more data than that, the query fails with
- // a `FAILED_PRECONDITION` error.
- //
- // Operations inside read-write transactions might return `ABORTED`. If
- // this occurs, the application should restart the transaction from
- // the beginning. See [Transaction][google.spanner.v1.Transaction] for more
- // details.
- //
- // Larger result sets can be fetched in streaming fashion by calling
- // [ExecuteStreamingSql][google.spanner.v1.Spanner.ExecuteStreamingSql]
- // instead.
- ExecuteSql(context.Context, *ExecuteSqlRequest) (*ResultSet, error)
- // Like [ExecuteSql][google.spanner.v1.Spanner.ExecuteSql], except returns the
- // result set as a stream. Unlike
- // [ExecuteSql][google.spanner.v1.Spanner.ExecuteSql], there is no limit on
- // the size of the returned result set. However, no individual row in the
- // result set can exceed 100 MiB, and no column value can exceed 10 MiB.
- ExecuteStreamingSql(*ExecuteSqlRequest, Spanner_ExecuteStreamingSqlServer) error
- // Executes a batch of SQL DML statements. This method allows many statements
- // to be run with lower latency than submitting them sequentially with
- // [ExecuteSql][google.spanner.v1.Spanner.ExecuteSql].
- //
- // Statements are executed in sequential order. A request can succeed even if
- // a statement fails. The
- // [ExecuteBatchDmlResponse.status][google.spanner.v1.ExecuteBatchDmlResponse.status]
- // field in the response provides information about the statement that failed.
- // Clients must inspect this field to determine whether an error occurred.
- //
- // Execution stops after the first failed statement; the remaining statements
- // are not executed.
- ExecuteBatchDml(context.Context, *ExecuteBatchDmlRequest) (*ExecuteBatchDmlResponse, error)
- // Reads rows from the database using key lookups and scans, as a
- // simple key/value style alternative to
- // [ExecuteSql][google.spanner.v1.Spanner.ExecuteSql]. This method cannot be
- // used to return a result set larger than 10 MiB; if the read matches more
- // data than that, the read fails with a `FAILED_PRECONDITION`
- // error.
- //
- // Reads inside read-write transactions might return `ABORTED`. If
- // this occurs, the application should restart the transaction from
- // the beginning. See [Transaction][google.spanner.v1.Transaction] for more
- // details.
- //
- // Larger result sets can be yielded in streaming fashion by calling
- // [StreamingRead][google.spanner.v1.Spanner.StreamingRead] instead.
- Read(context.Context, *ReadRequest) (*ResultSet, error)
- // Like [Read][google.spanner.v1.Spanner.Read], except returns the result set
- // as a stream. Unlike [Read][google.spanner.v1.Spanner.Read], there is no
- // limit on the size of the returned result set. However, no individual row in
- // the result set can exceed 100 MiB, and no column value can exceed
- // 10 MiB.
- StreamingRead(*ReadRequest, Spanner_StreamingReadServer) error
- // Begins a new transaction. This step can often be skipped:
- // [Read][google.spanner.v1.Spanner.Read],
- // [ExecuteSql][google.spanner.v1.Spanner.ExecuteSql] and
- // [Commit][google.spanner.v1.Spanner.Commit] can begin a new transaction as a
- // side-effect.
- BeginTransaction(context.Context, *BeginTransactionRequest) (*Transaction, error)
- // Commits a transaction. The request includes the mutations to be
- // applied to rows in the database.
- //
- // `Commit` might return an `ABORTED` error. This can occur at any time;
- // commonly, the cause is conflicts with concurrent
- // transactions. However, it can also happen for a variety of other
- // reasons. If `Commit` returns `ABORTED`, the caller should re-attempt
- // the transaction from the beginning, re-using the same session.
- //
- // On very rare occasions, `Commit` might return `UNKNOWN`. This can happen,
- // for example, if the client job experiences a 1+ hour networking failure.
- // At that point, Cloud Spanner has lost track of the transaction outcome and
- // we recommend that you perform another read from the database to see the
- // state of things as they are now.
- Commit(context.Context, *CommitRequest) (*CommitResponse, error)
- // Rolls back a transaction, releasing any locks it holds. It is a good
- // idea to call this for any transaction that includes one or more
- // [Read][google.spanner.v1.Spanner.Read] or
- // [ExecuteSql][google.spanner.v1.Spanner.ExecuteSql] requests and ultimately
- // decides not to commit.
- //
- // `Rollback` returns `OK` if it successfully aborts the transaction, the
- // transaction was already aborted, or the transaction is not
- // found. `Rollback` never returns `ABORTED`.
- Rollback(context.Context, *RollbackRequest) (*emptypb.Empty, error)
- // Creates a set of partition tokens that can be used to execute a query
- // operation in parallel. Each of the returned partition tokens can be used
- // by [ExecuteStreamingSql][google.spanner.v1.Spanner.ExecuteStreamingSql] to
- // specify a subset of the query result to read. The same session and
- // read-only transaction must be used by the PartitionQueryRequest used to
- // create the partition tokens and the ExecuteSqlRequests that use the
- // partition tokens.
- //
- // Partition tokens become invalid when the session used to create them
- // is deleted, is idle for too long, begins a new transaction, or becomes too
- // old. When any of these happen, it is not possible to resume the query, and
- // the whole operation must be restarted from the beginning.
- PartitionQuery(context.Context, *PartitionQueryRequest) (*PartitionResponse, error)
- // Creates a set of partition tokens that can be used to execute a read
- // operation in parallel. Each of the returned partition tokens can be used
- // by [StreamingRead][google.spanner.v1.Spanner.StreamingRead] to specify a
- // subset of the read result to read. The same session and read-only
- // transaction must be used by the PartitionReadRequest used to create the
- // partition tokens and the ReadRequests that use the partition tokens. There
- // are no ordering guarantees on rows returned among the returned partition
- // tokens, or even within each individual StreamingRead call issued with a
- // partition_token.
- //
- // Partition tokens become invalid when the session used to create them
- // is deleted, is idle for too long, begins a new transaction, or becomes too
- // old. When any of these happen, it is not possible to resume the read, and
- // the whole operation must be restarted from the beginning.
- PartitionRead(context.Context, *PartitionReadRequest) (*PartitionResponse, error)
- // Batches the supplied mutation groups in a collection of efficient
- // transactions. All mutations in a group are committed atomically. However,
- // mutations across groups can be committed non-atomically in an unspecified
- // order and thus, they must be independent of each other. Partial failure is
- // possible, i.e., some groups may have been committed successfully, while
- // some may have failed. The results of individual batches are streamed into
- // the response as the batches are applied.
- //
- // BatchWrite requests are not replay protected, meaning that each mutation
- // group may be applied more than once. Replays of non-idempotent mutations
- // may have undesirable effects. For example, replays of an insert mutation
- // may produce an already exists error or if you use generated or commit
- // timestamp-based keys, it may result in additional rows being added to the
- // mutation's table. We recommend structuring your mutation groups to be
- // idempotent to avoid this issue.
- BatchWrite(*BatchWriteRequest, Spanner_BatchWriteServer) error
-}
-
-// UnimplementedSpannerServer can be embedded to have forward compatible implementations.
-type UnimplementedSpannerServer struct {
-}
-
-func (*UnimplementedSpannerServer) CreateSession(context.Context, *CreateSessionRequest) (*Session, error) {
- return nil, status1.Errorf(codes.Unimplemented, "method CreateSession not implemented")
-}
-func (*UnimplementedSpannerServer) BatchCreateSessions(context.Context, *BatchCreateSessionsRequest) (*BatchCreateSessionsResponse, error) {
- return nil, status1.Errorf(codes.Unimplemented, "method BatchCreateSessions not implemented")
-}
-func (*UnimplementedSpannerServer) GetSession(context.Context, *GetSessionRequest) (*Session, error) {
- return nil, status1.Errorf(codes.Unimplemented, "method GetSession not implemented")
-}
-func (*UnimplementedSpannerServer) ListSessions(context.Context, *ListSessionsRequest) (*ListSessionsResponse, error) {
- return nil, status1.Errorf(codes.Unimplemented, "method ListSessions not implemented")
-}
-func (*UnimplementedSpannerServer) DeleteSession(context.Context, *DeleteSessionRequest) (*emptypb.Empty, error) {
- return nil, status1.Errorf(codes.Unimplemented, "method DeleteSession not implemented")
-}
-func (*UnimplementedSpannerServer) ExecuteSql(context.Context, *ExecuteSqlRequest) (*ResultSet, error) {
- return nil, status1.Errorf(codes.Unimplemented, "method ExecuteSql not implemented")
-}
-func (*UnimplementedSpannerServer) ExecuteStreamingSql(*ExecuteSqlRequest, Spanner_ExecuteStreamingSqlServer) error {
- return status1.Errorf(codes.Unimplemented, "method ExecuteStreamingSql not implemented")
-}
-func (*UnimplementedSpannerServer) ExecuteBatchDml(context.Context, *ExecuteBatchDmlRequest) (*ExecuteBatchDmlResponse, error) {
- return nil, status1.Errorf(codes.Unimplemented, "method ExecuteBatchDml not implemented")
-}
-func (*UnimplementedSpannerServer) Read(context.Context, *ReadRequest) (*ResultSet, error) {
- return nil, status1.Errorf(codes.Unimplemented, "method Read not implemented")
-}
-func (*UnimplementedSpannerServer) StreamingRead(*ReadRequest, Spanner_StreamingReadServer) error {
- return status1.Errorf(codes.Unimplemented, "method StreamingRead not implemented")
-}
-func (*UnimplementedSpannerServer) BeginTransaction(context.Context, *BeginTransactionRequest) (*Transaction, error) {
- return nil, status1.Errorf(codes.Unimplemented, "method BeginTransaction not implemented")
-}
-func (*UnimplementedSpannerServer) Commit(context.Context, *CommitRequest) (*CommitResponse, error) {
- return nil, status1.Errorf(codes.Unimplemented, "method Commit not implemented")
-}
-func (*UnimplementedSpannerServer) Rollback(context.Context, *RollbackRequest) (*emptypb.Empty, error) {
- return nil, status1.Errorf(codes.Unimplemented, "method Rollback not implemented")
-}
-func (*UnimplementedSpannerServer) PartitionQuery(context.Context, *PartitionQueryRequest) (*PartitionResponse, error) {
- return nil, status1.Errorf(codes.Unimplemented, "method PartitionQuery not implemented")
-}
-func (*UnimplementedSpannerServer) PartitionRead(context.Context, *PartitionReadRequest) (*PartitionResponse, error) {
- return nil, status1.Errorf(codes.Unimplemented, "method PartitionRead not implemented")
-}
-func (*UnimplementedSpannerServer) BatchWrite(*BatchWriteRequest, Spanner_BatchWriteServer) error {
- return status1.Errorf(codes.Unimplemented, "method BatchWrite not implemented")
-}
-
-func RegisterSpannerServer(s *grpc.Server, srv SpannerServer) {
- s.RegisterService(&_Spanner_serviceDesc, srv)
-}
-
-func _Spanner_CreateSession_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
- in := new(CreateSessionRequest)
- if err := dec(in); err != nil {
- return nil, err
- }
- if interceptor == nil {
- return srv.(SpannerServer).CreateSession(ctx, in)
- }
- info := &grpc.UnaryServerInfo{
- Server: srv,
- FullMethod: "/google.spanner.v1.Spanner/CreateSession",
- }
- handler := func(ctx context.Context, req interface{}) (interface{}, error) {
- return srv.(SpannerServer).CreateSession(ctx, req.(*CreateSessionRequest))
- }
- return interceptor(ctx, in, info, handler)
-}
-
-func _Spanner_BatchCreateSessions_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
- in := new(BatchCreateSessionsRequest)
- if err := dec(in); err != nil {
- return nil, err
- }
- if interceptor == nil {
- return srv.(SpannerServer).BatchCreateSessions(ctx, in)
- }
- info := &grpc.UnaryServerInfo{
- Server: srv,
- FullMethod: "/google.spanner.v1.Spanner/BatchCreateSessions",
- }
- handler := func(ctx context.Context, req interface{}) (interface{}, error) {
- return srv.(SpannerServer).BatchCreateSessions(ctx, req.(*BatchCreateSessionsRequest))
- }
- return interceptor(ctx, in, info, handler)
-}
-
-func _Spanner_GetSession_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
- in := new(GetSessionRequest)
- if err := dec(in); err != nil {
- return nil, err
- }
- if interceptor == nil {
- return srv.(SpannerServer).GetSession(ctx, in)
- }
- info := &grpc.UnaryServerInfo{
- Server: srv,
- FullMethod: "/google.spanner.v1.Spanner/GetSession",
- }
- handler := func(ctx context.Context, req interface{}) (interface{}, error) {
- return srv.(SpannerServer).GetSession(ctx, req.(*GetSessionRequest))
- }
- return interceptor(ctx, in, info, handler)
-}
-
-func _Spanner_ListSessions_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
- in := new(ListSessionsRequest)
- if err := dec(in); err != nil {
- return nil, err
- }
- if interceptor == nil {
- return srv.(SpannerServer).ListSessions(ctx, in)
- }
- info := &grpc.UnaryServerInfo{
- Server: srv,
- FullMethod: "/google.spanner.v1.Spanner/ListSessions",
- }
- handler := func(ctx context.Context, req interface{}) (interface{}, error) {
- return srv.(SpannerServer).ListSessions(ctx, req.(*ListSessionsRequest))
- }
- return interceptor(ctx, in, info, handler)
-}
-
-func _Spanner_DeleteSession_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
- in := new(DeleteSessionRequest)
- if err := dec(in); err != nil {
- return nil, err
- }
- if interceptor == nil {
- return srv.(SpannerServer).DeleteSession(ctx, in)
- }
- info := &grpc.UnaryServerInfo{
- Server: srv,
- FullMethod: "/google.spanner.v1.Spanner/DeleteSession",
- }
- handler := func(ctx context.Context, req interface{}) (interface{}, error) {
- return srv.(SpannerServer).DeleteSession(ctx, req.(*DeleteSessionRequest))
- }
- return interceptor(ctx, in, info, handler)
-}
-
-func _Spanner_ExecuteSql_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
- in := new(ExecuteSqlRequest)
- if err := dec(in); err != nil {
- return nil, err
- }
- if interceptor == nil {
- return srv.(SpannerServer).ExecuteSql(ctx, in)
- }
- info := &grpc.UnaryServerInfo{
- Server: srv,
- FullMethod: "/google.spanner.v1.Spanner/ExecuteSql",
- }
- handler := func(ctx context.Context, req interface{}) (interface{}, error) {
- return srv.(SpannerServer).ExecuteSql(ctx, req.(*ExecuteSqlRequest))
- }
- return interceptor(ctx, in, info, handler)
-}
-
-func _Spanner_ExecuteStreamingSql_Handler(srv interface{}, stream grpc.ServerStream) error {
- m := new(ExecuteSqlRequest)
- if err := stream.RecvMsg(m); err != nil {
- return err
- }
- return srv.(SpannerServer).ExecuteStreamingSql(m, &spannerExecuteStreamingSqlServer{stream})
-}
-
-type Spanner_ExecuteStreamingSqlServer interface {
- Send(*PartialResultSet) error
- grpc.ServerStream
-}
-
-type spannerExecuteStreamingSqlServer struct {
- grpc.ServerStream
-}
-
-func (x *spannerExecuteStreamingSqlServer) Send(m *PartialResultSet) error {
- return x.ServerStream.SendMsg(m)
-}
-
-func _Spanner_ExecuteBatchDml_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
- in := new(ExecuteBatchDmlRequest)
- if err := dec(in); err != nil {
- return nil, err
- }
- if interceptor == nil {
- return srv.(SpannerServer).ExecuteBatchDml(ctx, in)
- }
- info := &grpc.UnaryServerInfo{
- Server: srv,
- FullMethod: "/google.spanner.v1.Spanner/ExecuteBatchDml",
- }
- handler := func(ctx context.Context, req interface{}) (interface{}, error) {
- return srv.(SpannerServer).ExecuteBatchDml(ctx, req.(*ExecuteBatchDmlRequest))
- }
- return interceptor(ctx, in, info, handler)
-}
-
-func _Spanner_Read_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
- in := new(ReadRequest)
- if err := dec(in); err != nil {
- return nil, err
- }
- if interceptor == nil {
- return srv.(SpannerServer).Read(ctx, in)
- }
- info := &grpc.UnaryServerInfo{
- Server: srv,
- FullMethod: "/google.spanner.v1.Spanner/Read",
- }
- handler := func(ctx context.Context, req interface{}) (interface{}, error) {
- return srv.(SpannerServer).Read(ctx, req.(*ReadRequest))
- }
- return interceptor(ctx, in, info, handler)
-}
-
-func _Spanner_StreamingRead_Handler(srv interface{}, stream grpc.ServerStream) error {
- m := new(ReadRequest)
- if err := stream.RecvMsg(m); err != nil {
- return err
- }
- return srv.(SpannerServer).StreamingRead(m, &spannerStreamingReadServer{stream})
-}
-
-type Spanner_StreamingReadServer interface {
- Send(*PartialResultSet) error
- grpc.ServerStream
-}
-
-type spannerStreamingReadServer struct {
- grpc.ServerStream
-}
-
-func (x *spannerStreamingReadServer) Send(m *PartialResultSet) error {
- return x.ServerStream.SendMsg(m)
-}
-
-func _Spanner_BeginTransaction_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
- in := new(BeginTransactionRequest)
- if err := dec(in); err != nil {
- return nil, err
- }
- if interceptor == nil {
- return srv.(SpannerServer).BeginTransaction(ctx, in)
- }
- info := &grpc.UnaryServerInfo{
- Server: srv,
- FullMethod: "/google.spanner.v1.Spanner/BeginTransaction",
- }
- handler := func(ctx context.Context, req interface{}) (interface{}, error) {
- return srv.(SpannerServer).BeginTransaction(ctx, req.(*BeginTransactionRequest))
- }
- return interceptor(ctx, in, info, handler)
-}
-
-func _Spanner_Commit_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
- in := new(CommitRequest)
- if err := dec(in); err != nil {
- return nil, err
- }
- if interceptor == nil {
- return srv.(SpannerServer).Commit(ctx, in)
- }
- info := &grpc.UnaryServerInfo{
- Server: srv,
- FullMethod: "/google.spanner.v1.Spanner/Commit",
- }
- handler := func(ctx context.Context, req interface{}) (interface{}, error) {
- return srv.(SpannerServer).Commit(ctx, req.(*CommitRequest))
- }
- return interceptor(ctx, in, info, handler)
-}
-
-func _Spanner_Rollback_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
- in := new(RollbackRequest)
- if err := dec(in); err != nil {
- return nil, err
- }
- if interceptor == nil {
- return srv.(SpannerServer).Rollback(ctx, in)
- }
- info := &grpc.UnaryServerInfo{
- Server: srv,
- FullMethod: "/google.spanner.v1.Spanner/Rollback",
- }
- handler := func(ctx context.Context, req interface{}) (interface{}, error) {
- return srv.(SpannerServer).Rollback(ctx, req.(*RollbackRequest))
- }
- return interceptor(ctx, in, info, handler)
-}
-
-func _Spanner_PartitionQuery_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
- in := new(PartitionQueryRequest)
- if err := dec(in); err != nil {
- return nil, err
- }
- if interceptor == nil {
- return srv.(SpannerServer).PartitionQuery(ctx, in)
- }
- info := &grpc.UnaryServerInfo{
- Server: srv,
- FullMethod: "/google.spanner.v1.Spanner/PartitionQuery",
- }
- handler := func(ctx context.Context, req interface{}) (interface{}, error) {
- return srv.(SpannerServer).PartitionQuery(ctx, req.(*PartitionQueryRequest))
- }
- return interceptor(ctx, in, info, handler)
-}
-
-func _Spanner_PartitionRead_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
- in := new(PartitionReadRequest)
- if err := dec(in); err != nil {
- return nil, err
- }
- if interceptor == nil {
- return srv.(SpannerServer).PartitionRead(ctx, in)
- }
- info := &grpc.UnaryServerInfo{
- Server: srv,
- FullMethod: "/google.spanner.v1.Spanner/PartitionRead",
- }
- handler := func(ctx context.Context, req interface{}) (interface{}, error) {
- return srv.(SpannerServer).PartitionRead(ctx, req.(*PartitionReadRequest))
- }
- return interceptor(ctx, in, info, handler)
-}
-
-func _Spanner_BatchWrite_Handler(srv interface{}, stream grpc.ServerStream) error {
- m := new(BatchWriteRequest)
- if err := stream.RecvMsg(m); err != nil {
- return err
- }
- return srv.(SpannerServer).BatchWrite(m, &spannerBatchWriteServer{stream})
-}
-
-type Spanner_BatchWriteServer interface {
- Send(*BatchWriteResponse) error
- grpc.ServerStream
-}
-
-type spannerBatchWriteServer struct {
- grpc.ServerStream
-}
-
-func (x *spannerBatchWriteServer) Send(m *BatchWriteResponse) error {
- return x.ServerStream.SendMsg(m)
-}
-
-var _Spanner_serviceDesc = grpc.ServiceDesc{
- ServiceName: "google.spanner.v1.Spanner",
- HandlerType: (*SpannerServer)(nil),
- Methods: []grpc.MethodDesc{
- {
- MethodName: "CreateSession",
- Handler: _Spanner_CreateSession_Handler,
- },
- {
- MethodName: "BatchCreateSessions",
- Handler: _Spanner_BatchCreateSessions_Handler,
- },
- {
- MethodName: "GetSession",
- Handler: _Spanner_GetSession_Handler,
- },
- {
- MethodName: "ListSessions",
- Handler: _Spanner_ListSessions_Handler,
- },
- {
- MethodName: "DeleteSession",
- Handler: _Spanner_DeleteSession_Handler,
- },
- {
- MethodName: "ExecuteSql",
- Handler: _Spanner_ExecuteSql_Handler,
- },
- {
- MethodName: "ExecuteBatchDml",
- Handler: _Spanner_ExecuteBatchDml_Handler,
- },
- {
- MethodName: "Read",
- Handler: _Spanner_Read_Handler,
- },
- {
- MethodName: "BeginTransaction",
- Handler: _Spanner_BeginTransaction_Handler,
- },
- {
- MethodName: "Commit",
- Handler: _Spanner_Commit_Handler,
- },
- {
- MethodName: "Rollback",
- Handler: _Spanner_Rollback_Handler,
- },
- {
- MethodName: "PartitionQuery",
- Handler: _Spanner_PartitionQuery_Handler,
- },
- {
- MethodName: "PartitionRead",
- Handler: _Spanner_PartitionRead_Handler,
- },
- },
- Streams: []grpc.StreamDesc{
- {
- StreamName: "ExecuteStreamingSql",
- Handler: _Spanner_ExecuteStreamingSql_Handler,
- ServerStreams: true,
- },
- {
- StreamName: "StreamingRead",
- Handler: _Spanner_StreamingRead_Handler,
- ServerStreams: true,
- },
- {
- StreamName: "BatchWrite",
- Handler: _Spanner_BatchWrite_Handler,
- ServerStreams: true,
- },
- },
- Metadata: "google/spanner/v1/spanner.proto",
-}
diff --git a/vendor/cloud.google.com/go/spanner/apiv1/spannerpb/transaction.pb.go b/vendor/cloud.google.com/go/spanner/apiv1/spannerpb/transaction.pb.go
deleted file mode 100644
index 829120c1c..000000000
--- a/vendor/cloud.google.com/go/spanner/apiv1/spannerpb/transaction.pb.go
+++ /dev/null
@@ -1,1278 +0,0 @@
-// Copyright 2024 Google LLC
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-// Code generated by protoc-gen-go. DO NOT EDIT.
-// versions:
-// protoc-gen-go v1.34.2
-// protoc v4.25.3
-// source: google/spanner/v1/transaction.proto
-
-package spannerpb
-
-import (
- reflect "reflect"
- sync "sync"
-
- protoreflect "google.golang.org/protobuf/reflect/protoreflect"
- protoimpl "google.golang.org/protobuf/runtime/protoimpl"
- durationpb "google.golang.org/protobuf/types/known/durationpb"
- timestamppb "google.golang.org/protobuf/types/known/timestamppb"
-)
-
-const (
- // Verify that this generated code is sufficiently up-to-date.
- _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion)
- // Verify that runtime/protoimpl is sufficiently up-to-date.
- _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20)
-)
-
-// `ReadLockMode` is used to set the read lock mode for read-write
-// transactions.
-type TransactionOptions_ReadWrite_ReadLockMode int32
-
-const (
- // Default value.
- //
- // If the value is not specified, the pessimistic read lock is used.
- TransactionOptions_ReadWrite_READ_LOCK_MODE_UNSPECIFIED TransactionOptions_ReadWrite_ReadLockMode = 0
- // Pessimistic lock mode.
- //
- // Read locks are acquired immediately on read.
- TransactionOptions_ReadWrite_PESSIMISTIC TransactionOptions_ReadWrite_ReadLockMode = 1
- // Optimistic lock mode.
- //
- // Locks for reads within the transaction are not acquired on read.
- // Instead the locks are acquired on a commit to validate that
- // read/queried data has not changed since the transaction started.
- TransactionOptions_ReadWrite_OPTIMISTIC TransactionOptions_ReadWrite_ReadLockMode = 2
-)
-
-// Enum value maps for TransactionOptions_ReadWrite_ReadLockMode.
-var (
- TransactionOptions_ReadWrite_ReadLockMode_name = map[int32]string{
- 0: "READ_LOCK_MODE_UNSPECIFIED",
- 1: "PESSIMISTIC",
- 2: "OPTIMISTIC",
- }
- TransactionOptions_ReadWrite_ReadLockMode_value = map[string]int32{
- "READ_LOCK_MODE_UNSPECIFIED": 0,
- "PESSIMISTIC": 1,
- "OPTIMISTIC": 2,
- }
-)
-
-func (x TransactionOptions_ReadWrite_ReadLockMode) Enum() *TransactionOptions_ReadWrite_ReadLockMode {
- p := new(TransactionOptions_ReadWrite_ReadLockMode)
- *p = x
- return p
-}
-
-func (x TransactionOptions_ReadWrite_ReadLockMode) String() string {
- return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x))
-}
-
-func (TransactionOptions_ReadWrite_ReadLockMode) Descriptor() protoreflect.EnumDescriptor {
- return file_google_spanner_v1_transaction_proto_enumTypes[0].Descriptor()
-}
-
-func (TransactionOptions_ReadWrite_ReadLockMode) Type() protoreflect.EnumType {
- return &file_google_spanner_v1_transaction_proto_enumTypes[0]
-}
-
-func (x TransactionOptions_ReadWrite_ReadLockMode) Number() protoreflect.EnumNumber {
- return protoreflect.EnumNumber(x)
-}
-
-// Deprecated: Use TransactionOptions_ReadWrite_ReadLockMode.Descriptor instead.
-func (TransactionOptions_ReadWrite_ReadLockMode) EnumDescriptor() ([]byte, []int) {
- return file_google_spanner_v1_transaction_proto_rawDescGZIP(), []int{0, 0, 0}
-}
-
-// Transactions:
-//
-// Each session can have at most one active transaction at a time (note that
-// standalone reads and queries use a transaction internally and do count
-// towards the one transaction limit). After the active transaction is
-// completed, the session can immediately be re-used for the next transaction.
-// It is not necessary to create a new session for each transaction.
-//
-// Transaction modes:
-//
-// Cloud Spanner supports three transaction modes:
-//
-// 1. Locking read-write. This type of transaction is the only way
-// to write data into Cloud Spanner. These transactions rely on
-// pessimistic locking and, if necessary, two-phase commit.
-// Locking read-write transactions may abort, requiring the
-// application to retry.
-//
-// 2. Snapshot read-only. Snapshot read-only transactions provide guaranteed
-// consistency across several reads, but do not allow
-// writes. Snapshot read-only transactions can be configured to read at
-// timestamps in the past, or configured to perform a strong read
-// (where Spanner will select a timestamp such that the read is
-// guaranteed to see the effects of all transactions that have committed
-// before the start of the read). Snapshot read-only transactions do not
-// need to be committed.
-//
-// Queries on change streams must be performed with the snapshot read-only
-// transaction mode, specifying a strong read. Please see
-// [TransactionOptions.ReadOnly.strong][google.spanner.v1.TransactionOptions.ReadOnly.strong]
-// for more details.
-//
-// 3. Partitioned DML. This type of transaction is used to execute
-// a single Partitioned DML statement. Partitioned DML partitions
-// the key space and runs the DML statement over each partition
-// in parallel using separate, internal transactions that commit
-// independently. Partitioned DML transactions do not need to be
-// committed.
-//
-// For transactions that only read, snapshot read-only transactions
-// provide simpler semantics and are almost always faster. In
-// particular, read-only transactions do not take locks, so they do
-// not conflict with read-write transactions. As a consequence of not
-// taking locks, they also do not abort, so retry loops are not needed.
-//
-// Transactions may only read-write data in a single database. They
-// may, however, read-write data in different tables within that
-// database.
-//
-// Locking read-write transactions:
-//
-// Locking transactions may be used to atomically read-modify-write
-// data anywhere in a database. This type of transaction is externally
-// consistent.
-//
-// Clients should attempt to minimize the amount of time a transaction
-// is active. Faster transactions commit with higher probability
-// and cause less contention. Cloud Spanner attempts to keep read locks
-// active as long as the transaction continues to do reads, and the
-// transaction has not been terminated by
-// [Commit][google.spanner.v1.Spanner.Commit] or
-// [Rollback][google.spanner.v1.Spanner.Rollback]. Long periods of
-// inactivity at the client may cause Cloud Spanner to release a
-// transaction's locks and abort it.
-//
-// Conceptually, a read-write transaction consists of zero or more
-// reads or SQL statements followed by
-// [Commit][google.spanner.v1.Spanner.Commit]. At any time before
-// [Commit][google.spanner.v1.Spanner.Commit], the client can send a
-// [Rollback][google.spanner.v1.Spanner.Rollback] request to abort the
-// transaction.
-//
-// Semantics:
-//
-// Cloud Spanner can commit the transaction if all read locks it acquired
-// are still valid at commit time, and it is able to acquire write
-// locks for all writes. Cloud Spanner can abort the transaction for any
-// reason. If a commit attempt returns `ABORTED`, Cloud Spanner guarantees
-// that the transaction has not modified any user data in Cloud Spanner.
-//
-// Unless the transaction commits, Cloud Spanner makes no guarantees about
-// how long the transaction's locks were held for. It is an error to
-// use Cloud Spanner locks for any sort of mutual exclusion other than
-// between Cloud Spanner transactions themselves.
-//
-// Retrying aborted transactions:
-//
-// When a transaction aborts, the application can choose to retry the
-// whole transaction again. To maximize the chances of successfully
-// committing the retry, the client should execute the retry in the
-// same session as the original attempt. The original session's lock
-// priority increases with each consecutive abort, meaning that each
-// attempt has a slightly better chance of success than the previous.
-//
-// Under some circumstances (for example, many transactions attempting to
-// modify the same row(s)), a transaction can abort many times in a
-// short period before successfully committing. Thus, it is not a good
-// idea to cap the number of retries a transaction can attempt;
-// instead, it is better to limit the total amount of time spent
-// retrying.
-//
-// Idle transactions:
-//
-// A transaction is considered idle if it has no outstanding reads or
-// SQL queries and has not started a read or SQL query within the last 10
-// seconds. Idle transactions can be aborted by Cloud Spanner so that they
-// don't hold on to locks indefinitely. If an idle transaction is aborted, the
-// commit will fail with error `ABORTED`.
-//
-// If this behavior is undesirable, periodically executing a simple
-// SQL query in the transaction (for example, `SELECT 1`) prevents the
-// transaction from becoming idle.
-//
-// Snapshot read-only transactions:
-//
-// Snapshot read-only transactions provides a simpler method than
-// locking read-write transactions for doing several consistent
-// reads. However, this type of transaction does not support writes.
-//
-// Snapshot transactions do not take locks. Instead, they work by
-// choosing a Cloud Spanner timestamp, then executing all reads at that
-// timestamp. Since they do not acquire locks, they do not block
-// concurrent read-write transactions.
-//
-// Unlike locking read-write transactions, snapshot read-only
-// transactions never abort. They can fail if the chosen read
-// timestamp is garbage collected; however, the default garbage
-// collection policy is generous enough that most applications do not
-// need to worry about this in practice.
-//
-// Snapshot read-only transactions do not need to call
-// [Commit][google.spanner.v1.Spanner.Commit] or
-// [Rollback][google.spanner.v1.Spanner.Rollback] (and in fact are not
-// permitted to do so).
-//
-// To execute a snapshot transaction, the client specifies a timestamp
-// bound, which tells Cloud Spanner how to choose a read timestamp.
-//
-// The types of timestamp bound are:
-//
-// - Strong (the default).
-// - Bounded staleness.
-// - Exact staleness.
-//
-// If the Cloud Spanner database to be read is geographically distributed,
-// stale read-only transactions can execute more quickly than strong
-// or read-write transactions, because they are able to execute far
-// from the leader replica.
-//
-// Each type of timestamp bound is discussed in detail below.
-//
-// Strong: Strong reads are guaranteed to see the effects of all transactions
-// that have committed before the start of the read. Furthermore, all
-// rows yielded by a single read are consistent with each other -- if
-// any part of the read observes a transaction, all parts of the read
-// see the transaction.
-//
-// Strong reads are not repeatable: two consecutive strong read-only
-// transactions might return inconsistent results if there are
-// concurrent writes. If consistency across reads is required, the
-// reads should be executed within a transaction or at an exact read
-// timestamp.
-//
-// Queries on change streams (see below for more details) must also specify
-// the strong read timestamp bound.
-//
-// See
-// [TransactionOptions.ReadOnly.strong][google.spanner.v1.TransactionOptions.ReadOnly.strong].
-//
-// Exact staleness:
-//
-// These timestamp bounds execute reads at a user-specified
-// timestamp. Reads at a timestamp are guaranteed to see a consistent
-// prefix of the global transaction history: they observe
-// modifications done by all transactions with a commit timestamp less than or
-// equal to the read timestamp, and observe none of the modifications done by
-// transactions with a larger commit timestamp. They will block until
-// all conflicting transactions that may be assigned commit timestamps
-// <= the read timestamp have finished.
-//
-// The timestamp can either be expressed as an absolute Cloud Spanner commit
-// timestamp or a staleness relative to the current time.
-//
-// These modes do not require a "negotiation phase" to pick a
-// timestamp. As a result, they execute slightly faster than the
-// equivalent boundedly stale concurrency modes. On the other hand,
-// boundedly stale reads usually return fresher results.
-//
-// See
-// [TransactionOptions.ReadOnly.read_timestamp][google.spanner.v1.TransactionOptions.ReadOnly.read_timestamp]
-// and
-// [TransactionOptions.ReadOnly.exact_staleness][google.spanner.v1.TransactionOptions.ReadOnly.exact_staleness].
-//
-// Bounded staleness:
-//
-// Bounded staleness modes allow Cloud Spanner to pick the read timestamp,
-// subject to a user-provided staleness bound. Cloud Spanner chooses the
-// newest timestamp within the staleness bound that allows execution
-// of the reads at the closest available replica without blocking.
-//
-// All rows yielded are consistent with each other -- if any part of
-// the read observes a transaction, all parts of the read see the
-// transaction. Boundedly stale reads are not repeatable: two stale
-// reads, even if they use the same staleness bound, can execute at
-// different timestamps and thus return inconsistent results.
-//
-// Boundedly stale reads execute in two phases: the first phase
-// negotiates a timestamp among all replicas needed to serve the
-// read. In the second phase, reads are executed at the negotiated
-// timestamp.
-//
-// As a result of the two phase execution, bounded staleness reads are
-// usually a little slower than comparable exact staleness
-// reads. However, they are typically able to return fresher
-// results, and are more likely to execute at the closest replica.
-//
-// Because the timestamp negotiation requires up-front knowledge of
-// which rows will be read, it can only be used with single-use
-// read-only transactions.
-//
-// See
-// [TransactionOptions.ReadOnly.max_staleness][google.spanner.v1.TransactionOptions.ReadOnly.max_staleness]
-// and
-// [TransactionOptions.ReadOnly.min_read_timestamp][google.spanner.v1.TransactionOptions.ReadOnly.min_read_timestamp].
-//
-// Old read timestamps and garbage collection:
-//
-// Cloud Spanner continuously garbage collects deleted and overwritten data
-// in the background to reclaim storage space. This process is known
-// as "version GC". By default, version GC reclaims versions after they
-// are one hour old. Because of this, Cloud Spanner cannot perform reads
-// at read timestamps more than one hour in the past. This
-// restriction also applies to in-progress reads and/or SQL queries whose
-// timestamp become too old while executing. Reads and SQL queries with
-// too-old read timestamps fail with the error `FAILED_PRECONDITION`.
-//
-// You can configure and extend the `VERSION_RETENTION_PERIOD` of a
-// database up to a period as long as one week, which allows Cloud Spanner
-// to perform reads up to one week in the past.
-//
-// Querying change Streams:
-//
-// A Change Stream is a schema object that can be configured to watch data
-// changes on the entire database, a set of tables, or a set of columns
-// in a database.
-//
-// When a change stream is created, Spanner automatically defines a
-// corresponding SQL Table-Valued Function (TVF) that can be used to query
-// the change records in the associated change stream using the
-// ExecuteStreamingSql API. The name of the TVF for a change stream is
-// generated from the name of the change stream: READ_<change_stream_name>.
-//
-// All queries on change stream TVFs must be executed using the
-// ExecuteStreamingSql API with a single-use read-only transaction with a
-// strong read-only timestamp_bound. The change stream TVF allows users to
-// specify the start_timestamp and end_timestamp for the time range of
-// interest. All change records within the retention period is accessible
-// using the strong read-only timestamp_bound. All other TransactionOptions
-// are invalid for change stream queries.
-//
-// In addition, if TransactionOptions.read_only.return_read_timestamp is set
-// to true, a special value of 2^63 - 2 will be returned in the
-// [Transaction][google.spanner.v1.Transaction] message that describes the
-// transaction, instead of a valid read timestamp. This special value should be
-// discarded and not used for any subsequent queries.
-//
-// Please see https://cloud.google.com/spanner/docs/change-streams
-// for more details on how to query the change stream TVFs.
-//
-// Partitioned DML transactions:
-//
-// Partitioned DML transactions are used to execute DML statements with a
-// different execution strategy that provides different, and often better,
-// scalability properties for large, table-wide operations than DML in a
-// ReadWrite transaction. Smaller scoped statements, such as an OLTP workload,
-// should prefer using ReadWrite transactions.
-//
-// Partitioned DML partitions the keyspace and runs the DML statement on each
-// partition in separate, internal transactions. These transactions commit
-// automatically when complete, and run independently from one another.
-//
-// To reduce lock contention, this execution strategy only acquires read locks
-// on rows that match the WHERE clause of the statement. Additionally, the
-// smaller per-partition transactions hold locks for less time.
-//
-// That said, Partitioned DML is not a drop-in replacement for standard DML used
-// in ReadWrite transactions.
-//
-// - The DML statement must be fully-partitionable. Specifically, the statement
-// must be expressible as the union of many statements which each access only
-// a single row of the table.
-//
-// - The statement is not applied atomically to all rows of the table. Rather,
-// the statement is applied atomically to partitions of the table, in
-// independent transactions. Secondary index rows are updated atomically
-// with the base table rows.
-//
-// - Partitioned DML does not guarantee exactly-once execution semantics
-// against a partition. The statement will be applied at least once to each
-// partition. It is strongly recommended that the DML statement should be
-// idempotent to avoid unexpected results. For instance, it is potentially
-// dangerous to run a statement such as
-// `UPDATE table SET column = column + 1` as it could be run multiple times
-// against some rows.
-//
-// - The partitions are committed automatically - there is no support for
-// Commit or Rollback. If the call returns an error, or if the client issuing
-// the ExecuteSql call dies, it is possible that some rows had the statement
-// executed on them successfully. It is also possible that statement was
-// never executed against other rows.
-//
-// - Partitioned DML transactions may only contain the execution of a single
-// DML statement via ExecuteSql or ExecuteStreamingSql.
-//
-// - If any error is encountered during the execution of the partitioned DML
-// operation (for instance, a UNIQUE INDEX violation, division by zero, or a
-// value that cannot be stored due to schema constraints), then the
-// operation is stopped at that point and an error is returned. It is
-// possible that at this point, some partitions have been committed (or even
-// committed multiple times), and other partitions have not been run at all.
-//
-// Given the above, Partitioned DML is good fit for large, database-wide,
-// operations that are idempotent, such as deleting old rows from a very large
-// table.
-type TransactionOptions struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
- // Required. The type of transaction.
- //
- // Types that are assignable to Mode:
- //
- // *TransactionOptions_ReadWrite_
- // *TransactionOptions_PartitionedDml_
- // *TransactionOptions_ReadOnly_
- Mode isTransactionOptions_Mode `protobuf_oneof:"mode"`
- // When `exclude_txn_from_change_streams` is set to `true`:
- // - Mutations from this transaction will not be recorded in change streams
- // with DDL option `allow_txn_exclusion=true` that are tracking columns
- // modified by these transactions.
- // - Mutations from this transaction will be recorded in change streams with
- // DDL option `allow_txn_exclusion=false or not set` that are tracking
- // columns modified by these transactions.
- //
- // When `exclude_txn_from_change_streams` is set to `false` or not set,
- // mutations from this transaction will be recorded in all change streams that
- // are tracking columns modified by these transactions.
- // `exclude_txn_from_change_streams` may only be specified for read-write or
- // partitioned-dml transactions, otherwise the API will return an
- // `INVALID_ARGUMENT` error.
- ExcludeTxnFromChangeStreams bool `protobuf:"varint,5,opt,name=exclude_txn_from_change_streams,json=excludeTxnFromChangeStreams,proto3" json:"exclude_txn_from_change_streams,omitempty"`
-}
-
-func (x *TransactionOptions) Reset() {
- *x = TransactionOptions{}
- if protoimpl.UnsafeEnabled {
- mi := &file_google_spanner_v1_transaction_proto_msgTypes[0]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
-}
-
-func (x *TransactionOptions) String() string {
- return protoimpl.X.MessageStringOf(x)
-}
-
-func (*TransactionOptions) ProtoMessage() {}
-
-func (x *TransactionOptions) ProtoReflect() protoreflect.Message {
- mi := &file_google_spanner_v1_transaction_proto_msgTypes[0]
- if protoimpl.UnsafeEnabled && x != nil {
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- if ms.LoadMessageInfo() == nil {
- ms.StoreMessageInfo(mi)
- }
- return ms
- }
- return mi.MessageOf(x)
-}
-
-// Deprecated: Use TransactionOptions.ProtoReflect.Descriptor instead.
-func (*TransactionOptions) Descriptor() ([]byte, []int) {
- return file_google_spanner_v1_transaction_proto_rawDescGZIP(), []int{0}
-}
-
-func (m *TransactionOptions) GetMode() isTransactionOptions_Mode {
- if m != nil {
- return m.Mode
- }
- return nil
-}
-
-func (x *TransactionOptions) GetReadWrite() *TransactionOptions_ReadWrite {
- if x, ok := x.GetMode().(*TransactionOptions_ReadWrite_); ok {
- return x.ReadWrite
- }
- return nil
-}
-
-func (x *TransactionOptions) GetPartitionedDml() *TransactionOptions_PartitionedDml {
- if x, ok := x.GetMode().(*TransactionOptions_PartitionedDml_); ok {
- return x.PartitionedDml
- }
- return nil
-}
-
-func (x *TransactionOptions) GetReadOnly() *TransactionOptions_ReadOnly {
- if x, ok := x.GetMode().(*TransactionOptions_ReadOnly_); ok {
- return x.ReadOnly
- }
- return nil
-}
-
-func (x *TransactionOptions) GetExcludeTxnFromChangeStreams() bool {
- if x != nil {
- return x.ExcludeTxnFromChangeStreams
- }
- return false
-}
-
-type isTransactionOptions_Mode interface {
- isTransactionOptions_Mode()
-}
-
-type TransactionOptions_ReadWrite_ struct {
- // Transaction may write.
- //
- // Authorization to begin a read-write transaction requires
- // `spanner.databases.beginOrRollbackReadWriteTransaction` permission
- // on the `session` resource.
- ReadWrite *TransactionOptions_ReadWrite `protobuf:"bytes,1,opt,name=read_write,json=readWrite,proto3,oneof"`
-}
-
-type TransactionOptions_PartitionedDml_ struct {
- // Partitioned DML transaction.
- //
- // Authorization to begin a Partitioned DML transaction requires
- // `spanner.databases.beginPartitionedDmlTransaction` permission
- // on the `session` resource.
- PartitionedDml *TransactionOptions_PartitionedDml `protobuf:"bytes,3,opt,name=partitioned_dml,json=partitionedDml,proto3,oneof"`
-}
-
-type TransactionOptions_ReadOnly_ struct {
- // Transaction will not write.
- //
- // Authorization to begin a read-only transaction requires
- // `spanner.databases.beginReadOnlyTransaction` permission
- // on the `session` resource.
- ReadOnly *TransactionOptions_ReadOnly `protobuf:"bytes,2,opt,name=read_only,json=readOnly,proto3,oneof"`
-}
-
-func (*TransactionOptions_ReadWrite_) isTransactionOptions_Mode() {}
-
-func (*TransactionOptions_PartitionedDml_) isTransactionOptions_Mode() {}
-
-func (*TransactionOptions_ReadOnly_) isTransactionOptions_Mode() {}
-
-// A transaction.
-type Transaction struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
- // `id` may be used to identify the transaction in subsequent
- // [Read][google.spanner.v1.Spanner.Read],
- // [ExecuteSql][google.spanner.v1.Spanner.ExecuteSql],
- // [Commit][google.spanner.v1.Spanner.Commit], or
- // [Rollback][google.spanner.v1.Spanner.Rollback] calls.
- //
- // Single-use read-only transactions do not have IDs, because
- // single-use transactions do not support multiple requests.
- Id []byte `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"`
- // For snapshot read-only transactions, the read timestamp chosen
- // for the transaction. Not returned by default: see
- // [TransactionOptions.ReadOnly.return_read_timestamp][google.spanner.v1.TransactionOptions.ReadOnly.return_read_timestamp].
- //
- // A timestamp in RFC3339 UTC \"Zulu\" format, accurate to nanoseconds.
- // Example: `"2014-10-02T15:01:23.045123456Z"`.
- ReadTimestamp *timestamppb.Timestamp `protobuf:"bytes,2,opt,name=read_timestamp,json=readTimestamp,proto3" json:"read_timestamp,omitempty"`
-}
-
-func (x *Transaction) Reset() {
- *x = Transaction{}
- if protoimpl.UnsafeEnabled {
- mi := &file_google_spanner_v1_transaction_proto_msgTypes[1]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
-}
-
-func (x *Transaction) String() string {
- return protoimpl.X.MessageStringOf(x)
-}
-
-func (*Transaction) ProtoMessage() {}
-
-func (x *Transaction) ProtoReflect() protoreflect.Message {
- mi := &file_google_spanner_v1_transaction_proto_msgTypes[1]
- if protoimpl.UnsafeEnabled && x != nil {
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- if ms.LoadMessageInfo() == nil {
- ms.StoreMessageInfo(mi)
- }
- return ms
- }
- return mi.MessageOf(x)
-}
-
-// Deprecated: Use Transaction.ProtoReflect.Descriptor instead.
-func (*Transaction) Descriptor() ([]byte, []int) {
- return file_google_spanner_v1_transaction_proto_rawDescGZIP(), []int{1}
-}
-
-func (x *Transaction) GetId() []byte {
- if x != nil {
- return x.Id
- }
- return nil
-}
-
-func (x *Transaction) GetReadTimestamp() *timestamppb.Timestamp {
- if x != nil {
- return x.ReadTimestamp
- }
- return nil
-}
-
-// This message is used to select the transaction in which a
-// [Read][google.spanner.v1.Spanner.Read] or
-// [ExecuteSql][google.spanner.v1.Spanner.ExecuteSql] call runs.
-//
-// See [TransactionOptions][google.spanner.v1.TransactionOptions] for more
-// information about transactions.
-type TransactionSelector struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
- // If no fields are set, the default is a single use transaction
- // with strong concurrency.
- //
- // Types that are assignable to Selector:
- //
- // *TransactionSelector_SingleUse
- // *TransactionSelector_Id
- // *TransactionSelector_Begin
- Selector isTransactionSelector_Selector `protobuf_oneof:"selector"`
-}
-
-func (x *TransactionSelector) Reset() {
- *x = TransactionSelector{}
- if protoimpl.UnsafeEnabled {
- mi := &file_google_spanner_v1_transaction_proto_msgTypes[2]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
-}
-
-func (x *TransactionSelector) String() string {
- return protoimpl.X.MessageStringOf(x)
-}
-
-func (*TransactionSelector) ProtoMessage() {}
-
-func (x *TransactionSelector) ProtoReflect() protoreflect.Message {
- mi := &file_google_spanner_v1_transaction_proto_msgTypes[2]
- if protoimpl.UnsafeEnabled && x != nil {
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- if ms.LoadMessageInfo() == nil {
- ms.StoreMessageInfo(mi)
- }
- return ms
- }
- return mi.MessageOf(x)
-}
-
-// Deprecated: Use TransactionSelector.ProtoReflect.Descriptor instead.
-func (*TransactionSelector) Descriptor() ([]byte, []int) {
- return file_google_spanner_v1_transaction_proto_rawDescGZIP(), []int{2}
-}
-
-func (m *TransactionSelector) GetSelector() isTransactionSelector_Selector {
- if m != nil {
- return m.Selector
- }
- return nil
-}
-
-func (x *TransactionSelector) GetSingleUse() *TransactionOptions {
- if x, ok := x.GetSelector().(*TransactionSelector_SingleUse); ok {
- return x.SingleUse
- }
- return nil
-}
-
-func (x *TransactionSelector) GetId() []byte {
- if x, ok := x.GetSelector().(*TransactionSelector_Id); ok {
- return x.Id
- }
- return nil
-}
-
-func (x *TransactionSelector) GetBegin() *TransactionOptions {
- if x, ok := x.GetSelector().(*TransactionSelector_Begin); ok {
- return x.Begin
- }
- return nil
-}
-
-type isTransactionSelector_Selector interface {
- isTransactionSelector_Selector()
-}
-
-type TransactionSelector_SingleUse struct {
- // Execute the read or SQL query in a temporary transaction.
- // This is the most efficient way to execute a transaction that
- // consists of a single SQL query.
- SingleUse *TransactionOptions `protobuf:"bytes,1,opt,name=single_use,json=singleUse,proto3,oneof"`
-}
-
-type TransactionSelector_Id struct {
- // Execute the read or SQL query in a previously-started transaction.
- Id []byte `protobuf:"bytes,2,opt,name=id,proto3,oneof"`
-}
-
-type TransactionSelector_Begin struct {
- // Begin a new transaction and execute this read or SQL query in
- // it. The transaction ID of the new transaction is returned in
- // [ResultSetMetadata.transaction][google.spanner.v1.ResultSetMetadata.transaction],
- // which is a [Transaction][google.spanner.v1.Transaction].
- Begin *TransactionOptions `protobuf:"bytes,3,opt,name=begin,proto3,oneof"`
-}
-
-func (*TransactionSelector_SingleUse) isTransactionSelector_Selector() {}
-
-func (*TransactionSelector_Id) isTransactionSelector_Selector() {}
-
-func (*TransactionSelector_Begin) isTransactionSelector_Selector() {}
-
-// Message type to initiate a read-write transaction. Currently this
-// transaction type has no options.
-type TransactionOptions_ReadWrite struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
- // Read lock mode for the transaction.
- ReadLockMode TransactionOptions_ReadWrite_ReadLockMode `protobuf:"varint,1,opt,name=read_lock_mode,json=readLockMode,proto3,enum=google.spanner.v1.TransactionOptions_ReadWrite_ReadLockMode" json:"read_lock_mode,omitempty"`
-}
-
-func (x *TransactionOptions_ReadWrite) Reset() {
- *x = TransactionOptions_ReadWrite{}
- if protoimpl.UnsafeEnabled {
- mi := &file_google_spanner_v1_transaction_proto_msgTypes[3]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
-}
-
-func (x *TransactionOptions_ReadWrite) String() string {
- return protoimpl.X.MessageStringOf(x)
-}
-
-func (*TransactionOptions_ReadWrite) ProtoMessage() {}
-
-func (x *TransactionOptions_ReadWrite) ProtoReflect() protoreflect.Message {
- mi := &file_google_spanner_v1_transaction_proto_msgTypes[3]
- if protoimpl.UnsafeEnabled && x != nil {
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- if ms.LoadMessageInfo() == nil {
- ms.StoreMessageInfo(mi)
- }
- return ms
- }
- return mi.MessageOf(x)
-}
-
-// Deprecated: Use TransactionOptions_ReadWrite.ProtoReflect.Descriptor instead.
-func (*TransactionOptions_ReadWrite) Descriptor() ([]byte, []int) {
- return file_google_spanner_v1_transaction_proto_rawDescGZIP(), []int{0, 0}
-}
-
-func (x *TransactionOptions_ReadWrite) GetReadLockMode() TransactionOptions_ReadWrite_ReadLockMode {
- if x != nil {
- return x.ReadLockMode
- }
- return TransactionOptions_ReadWrite_READ_LOCK_MODE_UNSPECIFIED
-}
-
-// Message type to initiate a Partitioned DML transaction.
-type TransactionOptions_PartitionedDml struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-}
-
-func (x *TransactionOptions_PartitionedDml) Reset() {
- *x = TransactionOptions_PartitionedDml{}
- if protoimpl.UnsafeEnabled {
- mi := &file_google_spanner_v1_transaction_proto_msgTypes[4]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
-}
-
-func (x *TransactionOptions_PartitionedDml) String() string {
- return protoimpl.X.MessageStringOf(x)
-}
-
-func (*TransactionOptions_PartitionedDml) ProtoMessage() {}
-
-func (x *TransactionOptions_PartitionedDml) ProtoReflect() protoreflect.Message {
- mi := &file_google_spanner_v1_transaction_proto_msgTypes[4]
- if protoimpl.UnsafeEnabled && x != nil {
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- if ms.LoadMessageInfo() == nil {
- ms.StoreMessageInfo(mi)
- }
- return ms
- }
- return mi.MessageOf(x)
-}
-
-// Deprecated: Use TransactionOptions_PartitionedDml.ProtoReflect.Descriptor instead.
-func (*TransactionOptions_PartitionedDml) Descriptor() ([]byte, []int) {
- return file_google_spanner_v1_transaction_proto_rawDescGZIP(), []int{0, 1}
-}
-
-// Message type to initiate a read-only transaction.
-type TransactionOptions_ReadOnly struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
- // How to choose the timestamp for the read-only transaction.
- //
- // Types that are assignable to TimestampBound:
- //
- // *TransactionOptions_ReadOnly_Strong
- // *TransactionOptions_ReadOnly_MinReadTimestamp
- // *TransactionOptions_ReadOnly_MaxStaleness
- // *TransactionOptions_ReadOnly_ReadTimestamp
- // *TransactionOptions_ReadOnly_ExactStaleness
- TimestampBound isTransactionOptions_ReadOnly_TimestampBound `protobuf_oneof:"timestamp_bound"`
- // If true, the Cloud Spanner-selected read timestamp is included in
- // the [Transaction][google.spanner.v1.Transaction] message that describes
- // the transaction.
- ReturnReadTimestamp bool `protobuf:"varint,6,opt,name=return_read_timestamp,json=returnReadTimestamp,proto3" json:"return_read_timestamp,omitempty"`
-}
-
-func (x *TransactionOptions_ReadOnly) Reset() {
- *x = TransactionOptions_ReadOnly{}
- if protoimpl.UnsafeEnabled {
- mi := &file_google_spanner_v1_transaction_proto_msgTypes[5]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
-}
-
-func (x *TransactionOptions_ReadOnly) String() string {
- return protoimpl.X.MessageStringOf(x)
-}
-
-func (*TransactionOptions_ReadOnly) ProtoMessage() {}
-
-func (x *TransactionOptions_ReadOnly) ProtoReflect() protoreflect.Message {
- mi := &file_google_spanner_v1_transaction_proto_msgTypes[5]
- if protoimpl.UnsafeEnabled && x != nil {
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- if ms.LoadMessageInfo() == nil {
- ms.StoreMessageInfo(mi)
- }
- return ms
- }
- return mi.MessageOf(x)
-}
-
-// Deprecated: Use TransactionOptions_ReadOnly.ProtoReflect.Descriptor instead.
-func (*TransactionOptions_ReadOnly) Descriptor() ([]byte, []int) {
- return file_google_spanner_v1_transaction_proto_rawDescGZIP(), []int{0, 2}
-}
-
-func (m *TransactionOptions_ReadOnly) GetTimestampBound() isTransactionOptions_ReadOnly_TimestampBound {
- if m != nil {
- return m.TimestampBound
- }
- return nil
-}
-
-func (x *TransactionOptions_ReadOnly) GetStrong() bool {
- if x, ok := x.GetTimestampBound().(*TransactionOptions_ReadOnly_Strong); ok {
- return x.Strong
- }
- return false
-}
-
-func (x *TransactionOptions_ReadOnly) GetMinReadTimestamp() *timestamppb.Timestamp {
- if x, ok := x.GetTimestampBound().(*TransactionOptions_ReadOnly_MinReadTimestamp); ok {
- return x.MinReadTimestamp
- }
- return nil
-}
-
-func (x *TransactionOptions_ReadOnly) GetMaxStaleness() *durationpb.Duration {
- if x, ok := x.GetTimestampBound().(*TransactionOptions_ReadOnly_MaxStaleness); ok {
- return x.MaxStaleness
- }
- return nil
-}
-
-func (x *TransactionOptions_ReadOnly) GetReadTimestamp() *timestamppb.Timestamp {
- if x, ok := x.GetTimestampBound().(*TransactionOptions_ReadOnly_ReadTimestamp); ok {
- return x.ReadTimestamp
- }
- return nil
-}
-
-func (x *TransactionOptions_ReadOnly) GetExactStaleness() *durationpb.Duration {
- if x, ok := x.GetTimestampBound().(*TransactionOptions_ReadOnly_ExactStaleness); ok {
- return x.ExactStaleness
- }
- return nil
-}
-
-func (x *TransactionOptions_ReadOnly) GetReturnReadTimestamp() bool {
- if x != nil {
- return x.ReturnReadTimestamp
- }
- return false
-}
-
-type isTransactionOptions_ReadOnly_TimestampBound interface {
- isTransactionOptions_ReadOnly_TimestampBound()
-}
-
-type TransactionOptions_ReadOnly_Strong struct {
- // Read at a timestamp where all previously committed transactions
- // are visible.
- Strong bool `protobuf:"varint,1,opt,name=strong,proto3,oneof"`
-}
-
-type TransactionOptions_ReadOnly_MinReadTimestamp struct {
- // Executes all reads at a timestamp >= `min_read_timestamp`.
- //
- // This is useful for requesting fresher data than some previous
- // read, or data that is fresh enough to observe the effects of some
- // previously committed transaction whose timestamp is known.
- //
- // Note that this option can only be used in single-use transactions.
- //
- // A timestamp in RFC3339 UTC \"Zulu\" format, accurate to nanoseconds.
- // Example: `"2014-10-02T15:01:23.045123456Z"`.
- MinReadTimestamp *timestamppb.Timestamp `protobuf:"bytes,2,opt,name=min_read_timestamp,json=minReadTimestamp,proto3,oneof"`
-}
-
-type TransactionOptions_ReadOnly_MaxStaleness struct {
- // Read data at a timestamp >= `NOW - max_staleness`
- // seconds. Guarantees that all writes that have committed more
- // than the specified number of seconds ago are visible. Because
- // Cloud Spanner chooses the exact timestamp, this mode works even if
- // the client's local clock is substantially skewed from Cloud Spanner
- // commit timestamps.
- //
- // Useful for reading the freshest data available at a nearby
- // replica, while bounding the possible staleness if the local
- // replica has fallen behind.
- //
- // Note that this option can only be used in single-use
- // transactions.
- MaxStaleness *durationpb.Duration `protobuf:"bytes,3,opt,name=max_staleness,json=maxStaleness,proto3,oneof"`
-}
-
-type TransactionOptions_ReadOnly_ReadTimestamp struct {
- // Executes all reads at the given timestamp. Unlike other modes,
- // reads at a specific timestamp are repeatable; the same read at
- // the same timestamp always returns the same data. If the
- // timestamp is in the future, the read will block until the
- // specified timestamp, modulo the read's deadline.
- //
- // Useful for large scale consistent reads such as mapreduces, or
- // for coordinating many reads against a consistent snapshot of the
- // data.
- //
- // A timestamp in RFC3339 UTC \"Zulu\" format, accurate to nanoseconds.
- // Example: `"2014-10-02T15:01:23.045123456Z"`.
- ReadTimestamp *timestamppb.Timestamp `protobuf:"bytes,4,opt,name=read_timestamp,json=readTimestamp,proto3,oneof"`
-}
-
-type TransactionOptions_ReadOnly_ExactStaleness struct {
- // Executes all reads at a timestamp that is `exact_staleness`
- // old. The timestamp is chosen soon after the read is started.
- //
- // Guarantees that all writes that have committed more than the
- // specified number of seconds ago are visible. Because Cloud Spanner
- // chooses the exact timestamp, this mode works even if the client's
- // local clock is substantially skewed from Cloud Spanner commit
- // timestamps.
- //
- // Useful for reading at nearby replicas without the distributed
- // timestamp negotiation overhead of `max_staleness`.
- ExactStaleness *durationpb.Duration `protobuf:"bytes,5,opt,name=exact_staleness,json=exactStaleness,proto3,oneof"`
-}
-
-func (*TransactionOptions_ReadOnly_Strong) isTransactionOptions_ReadOnly_TimestampBound() {}
-
-func (*TransactionOptions_ReadOnly_MinReadTimestamp) isTransactionOptions_ReadOnly_TimestampBound() {}
-
-func (*TransactionOptions_ReadOnly_MaxStaleness) isTransactionOptions_ReadOnly_TimestampBound() {}
-
-func (*TransactionOptions_ReadOnly_ReadTimestamp) isTransactionOptions_ReadOnly_TimestampBound() {}
-
-func (*TransactionOptions_ReadOnly_ExactStaleness) isTransactionOptions_ReadOnly_TimestampBound() {}
-
-var File_google_spanner_v1_transaction_proto protoreflect.FileDescriptor
-
-var file_google_spanner_v1_transaction_proto_rawDesc = []byte{
- 0x0a, 0x23, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x73, 0x70, 0x61, 0x6e, 0x6e, 0x65, 0x72,
- 0x2f, 0x76, 0x31, 0x2f, 0x74, 0x72, 0x61, 0x6e, 0x73, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x2e,
- 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x11, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x70,
- 0x61, 0x6e, 0x6e, 0x65, 0x72, 0x2e, 0x76, 0x31, 0x1a, 0x1e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65,
- 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x64, 0x75, 0x72, 0x61, 0x74, 0x69,
- 0x6f, 0x6e, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65,
- 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74,
- 0x61, 0x6d, 0x70, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0xc0, 0x07, 0x0a, 0x12, 0x54, 0x72,
- 0x61, 0x6e, 0x73, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73,
- 0x12, 0x50, 0x0a, 0x0a, 0x72, 0x65, 0x61, 0x64, 0x5f, 0x77, 0x72, 0x69, 0x74, 0x65, 0x18, 0x01,
- 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2f, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x70,
- 0x61, 0x6e, 0x6e, 0x65, 0x72, 0x2e, 0x76, 0x31, 0x2e, 0x54, 0x72, 0x61, 0x6e, 0x73, 0x61, 0x63,
- 0x74, 0x69, 0x6f, 0x6e, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x52, 0x65, 0x61, 0x64,
- 0x57, 0x72, 0x69, 0x74, 0x65, 0x48, 0x00, 0x52, 0x09, 0x72, 0x65, 0x61, 0x64, 0x57, 0x72, 0x69,
- 0x74, 0x65, 0x12, 0x5f, 0x0a, 0x0f, 0x70, 0x61, 0x72, 0x74, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x65,
- 0x64, 0x5f, 0x64, 0x6d, 0x6c, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x34, 0x2e, 0x67, 0x6f,
- 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x70, 0x61, 0x6e, 0x6e, 0x65, 0x72, 0x2e, 0x76, 0x31, 0x2e,
- 0x54, 0x72, 0x61, 0x6e, 0x73, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x4f, 0x70, 0x74, 0x69, 0x6f,
- 0x6e, 0x73, 0x2e, 0x50, 0x61, 0x72, 0x74, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x65, 0x64, 0x44, 0x6d,
- 0x6c, 0x48, 0x00, 0x52, 0x0e, 0x70, 0x61, 0x72, 0x74, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x65, 0x64,
- 0x44, 0x6d, 0x6c, 0x12, 0x4d, 0x0a, 0x09, 0x72, 0x65, 0x61, 0x64, 0x5f, 0x6f, 0x6e, 0x6c, 0x79,
- 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2e, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e,
- 0x73, 0x70, 0x61, 0x6e, 0x6e, 0x65, 0x72, 0x2e, 0x76, 0x31, 0x2e, 0x54, 0x72, 0x61, 0x6e, 0x73,
- 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x52, 0x65,
- 0x61, 0x64, 0x4f, 0x6e, 0x6c, 0x79, 0x48, 0x00, 0x52, 0x08, 0x72, 0x65, 0x61, 0x64, 0x4f, 0x6e,
- 0x6c, 0x79, 0x12, 0x44, 0x0a, 0x1f, 0x65, 0x78, 0x63, 0x6c, 0x75, 0x64, 0x65, 0x5f, 0x74, 0x78,
- 0x6e, 0x5f, 0x66, 0x72, 0x6f, 0x6d, 0x5f, 0x63, 0x68, 0x61, 0x6e, 0x67, 0x65, 0x5f, 0x73, 0x74,
- 0x72, 0x65, 0x61, 0x6d, 0x73, 0x18, 0x05, 0x20, 0x01, 0x28, 0x08, 0x52, 0x1b, 0x65, 0x78, 0x63,
- 0x6c, 0x75, 0x64, 0x65, 0x54, 0x78, 0x6e, 0x46, 0x72, 0x6f, 0x6d, 0x43, 0x68, 0x61, 0x6e, 0x67,
- 0x65, 0x53, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x73, 0x1a, 0xc0, 0x01, 0x0a, 0x09, 0x52, 0x65, 0x61,
- 0x64, 0x57, 0x72, 0x69, 0x74, 0x65, 0x12, 0x62, 0x0a, 0x0e, 0x72, 0x65, 0x61, 0x64, 0x5f, 0x6c,
- 0x6f, 0x63, 0x6b, 0x5f, 0x6d, 0x6f, 0x64, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x3c,
- 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x70, 0x61, 0x6e, 0x6e, 0x65, 0x72, 0x2e,
- 0x76, 0x31, 0x2e, 0x54, 0x72, 0x61, 0x6e, 0x73, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x4f, 0x70,
- 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x52, 0x65, 0x61, 0x64, 0x57, 0x72, 0x69, 0x74, 0x65, 0x2e,
- 0x52, 0x65, 0x61, 0x64, 0x4c, 0x6f, 0x63, 0x6b, 0x4d, 0x6f, 0x64, 0x65, 0x52, 0x0c, 0x72, 0x65,
- 0x61, 0x64, 0x4c, 0x6f, 0x63, 0x6b, 0x4d, 0x6f, 0x64, 0x65, 0x22, 0x4f, 0x0a, 0x0c, 0x52, 0x65,
- 0x61, 0x64, 0x4c, 0x6f, 0x63, 0x6b, 0x4d, 0x6f, 0x64, 0x65, 0x12, 0x1e, 0x0a, 0x1a, 0x52, 0x45,
- 0x41, 0x44, 0x5f, 0x4c, 0x4f, 0x43, 0x4b, 0x5f, 0x4d, 0x4f, 0x44, 0x45, 0x5f, 0x55, 0x4e, 0x53,
- 0x50, 0x45, 0x43, 0x49, 0x46, 0x49, 0x45, 0x44, 0x10, 0x00, 0x12, 0x0f, 0x0a, 0x0b, 0x50, 0x45,
- 0x53, 0x53, 0x49, 0x4d, 0x49, 0x53, 0x54, 0x49, 0x43, 0x10, 0x01, 0x12, 0x0e, 0x0a, 0x0a, 0x4f,
- 0x50, 0x54, 0x49, 0x4d, 0x49, 0x53, 0x54, 0x49, 0x43, 0x10, 0x02, 0x1a, 0x10, 0x0a, 0x0e, 0x50,
- 0x61, 0x72, 0x74, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x65, 0x64, 0x44, 0x6d, 0x6c, 0x1a, 0x84, 0x03,
- 0x0a, 0x08, 0x52, 0x65, 0x61, 0x64, 0x4f, 0x6e, 0x6c, 0x79, 0x12, 0x18, 0x0a, 0x06, 0x73, 0x74,
- 0x72, 0x6f, 0x6e, 0x67, 0x18, 0x01, 0x20, 0x01, 0x28, 0x08, 0x48, 0x00, 0x52, 0x06, 0x73, 0x74,
- 0x72, 0x6f, 0x6e, 0x67, 0x12, 0x4a, 0x0a, 0x12, 0x6d, 0x69, 0x6e, 0x5f, 0x72, 0x65, 0x61, 0x64,
- 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b,
- 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62,
- 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x48, 0x00, 0x52, 0x10,
- 0x6d, 0x69, 0x6e, 0x52, 0x65, 0x61, 0x64, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70,
- 0x12, 0x40, 0x0a, 0x0d, 0x6d, 0x61, 0x78, 0x5f, 0x73, 0x74, 0x61, 0x6c, 0x65, 0x6e, 0x65, 0x73,
- 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65,
- 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69,
- 0x6f, 0x6e, 0x48, 0x00, 0x52, 0x0c, 0x6d, 0x61, 0x78, 0x53, 0x74, 0x61, 0x6c, 0x65, 0x6e, 0x65,
- 0x73, 0x73, 0x12, 0x43, 0x0a, 0x0e, 0x72, 0x65, 0x61, 0x64, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x73,
- 0x74, 0x61, 0x6d, 0x70, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f,
- 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d,
- 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x48, 0x00, 0x52, 0x0d, 0x72, 0x65, 0x61, 0x64, 0x54, 0x69,
- 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x12, 0x44, 0x0a, 0x0f, 0x65, 0x78, 0x61, 0x63, 0x74,
- 0x5f, 0x73, 0x74, 0x61, 0x6c, 0x65, 0x6e, 0x65, 0x73, 0x73, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b,
- 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62,
- 0x75, 0x66, 0x2e, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x48, 0x00, 0x52, 0x0e, 0x65,
- 0x78, 0x61, 0x63, 0x74, 0x53, 0x74, 0x61, 0x6c, 0x65, 0x6e, 0x65, 0x73, 0x73, 0x12, 0x32, 0x0a,
- 0x15, 0x72, 0x65, 0x74, 0x75, 0x72, 0x6e, 0x5f, 0x72, 0x65, 0x61, 0x64, 0x5f, 0x74, 0x69, 0x6d,
- 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x18, 0x06, 0x20, 0x01, 0x28, 0x08, 0x52, 0x13, 0x72, 0x65,
- 0x74, 0x75, 0x72, 0x6e, 0x52, 0x65, 0x61, 0x64, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d,
- 0x70, 0x42, 0x11, 0x0a, 0x0f, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x5f, 0x62,
- 0x6f, 0x75, 0x6e, 0x64, 0x42, 0x06, 0x0a, 0x04, 0x6d, 0x6f, 0x64, 0x65, 0x22, 0x60, 0x0a, 0x0b,
- 0x54, 0x72, 0x61, 0x6e, 0x73, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x0e, 0x0a, 0x02, 0x69,
- 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x02, 0x69, 0x64, 0x12, 0x41, 0x0a, 0x0e, 0x72,
- 0x65, 0x61, 0x64, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x18, 0x02, 0x20,
- 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f,
- 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52,
- 0x0d, 0x72, 0x65, 0x61, 0x64, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x22, 0xba,
- 0x01, 0x0a, 0x13, 0x54, 0x72, 0x61, 0x6e, 0x73, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x53, 0x65,
- 0x6c, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x12, 0x46, 0x0a, 0x0a, 0x73, 0x69, 0x6e, 0x67, 0x6c, 0x65,
- 0x5f, 0x75, 0x73, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x25, 0x2e, 0x67, 0x6f, 0x6f,
- 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x70, 0x61, 0x6e, 0x6e, 0x65, 0x72, 0x2e, 0x76, 0x31, 0x2e, 0x54,
- 0x72, 0x61, 0x6e, 0x73, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e,
- 0x73, 0x48, 0x00, 0x52, 0x09, 0x73, 0x69, 0x6e, 0x67, 0x6c, 0x65, 0x55, 0x73, 0x65, 0x12, 0x10,
- 0x0a, 0x02, 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0c, 0x48, 0x00, 0x52, 0x02, 0x69, 0x64,
- 0x12, 0x3d, 0x0a, 0x05, 0x62, 0x65, 0x67, 0x69, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32,
- 0x25, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x70, 0x61, 0x6e, 0x6e, 0x65, 0x72,
- 0x2e, 0x76, 0x31, 0x2e, 0x54, 0x72, 0x61, 0x6e, 0x73, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x4f,
- 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x48, 0x00, 0x52, 0x05, 0x62, 0x65, 0x67, 0x69, 0x6e, 0x42,
- 0x0a, 0x0a, 0x08, 0x73, 0x65, 0x6c, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x42, 0xb3, 0x01, 0x0a, 0x15,
- 0x63, 0x6f, 0x6d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x70, 0x61, 0x6e, 0x6e,
- 0x65, 0x72, 0x2e, 0x76, 0x31, 0x42, 0x10, 0x54, 0x72, 0x61, 0x6e, 0x73, 0x61, 0x63, 0x74, 0x69,
- 0x6f, 0x6e, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x35, 0x63, 0x6c, 0x6f, 0x75, 0x64,
- 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x67, 0x6f, 0x2f, 0x73,
- 0x70, 0x61, 0x6e, 0x6e, 0x65, 0x72, 0x2f, 0x61, 0x70, 0x69, 0x76, 0x31, 0x2f, 0x73, 0x70, 0x61,
- 0x6e, 0x6e, 0x65, 0x72, 0x70, 0x62, 0x3b, 0x73, 0x70, 0x61, 0x6e, 0x6e, 0x65, 0x72, 0x70, 0x62,
- 0xaa, 0x02, 0x17, 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x43, 0x6c, 0x6f, 0x75, 0x64, 0x2e,
- 0x53, 0x70, 0x61, 0x6e, 0x6e, 0x65, 0x72, 0x2e, 0x56, 0x31, 0xca, 0x02, 0x17, 0x47, 0x6f, 0x6f,
- 0x67, 0x6c, 0x65, 0x5c, 0x43, 0x6c, 0x6f, 0x75, 0x64, 0x5c, 0x53, 0x70, 0x61, 0x6e, 0x6e, 0x65,
- 0x72, 0x5c, 0x56, 0x31, 0xea, 0x02, 0x1a, 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x3a, 0x3a, 0x43,
- 0x6c, 0x6f, 0x75, 0x64, 0x3a, 0x3a, 0x53, 0x70, 0x61, 0x6e, 0x6e, 0x65, 0x72, 0x3a, 0x3a, 0x56,
- 0x31, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33,
-}
-
-var (
- file_google_spanner_v1_transaction_proto_rawDescOnce sync.Once
- file_google_spanner_v1_transaction_proto_rawDescData = file_google_spanner_v1_transaction_proto_rawDesc
-)
-
-func file_google_spanner_v1_transaction_proto_rawDescGZIP() []byte {
- file_google_spanner_v1_transaction_proto_rawDescOnce.Do(func() {
- file_google_spanner_v1_transaction_proto_rawDescData = protoimpl.X.CompressGZIP(file_google_spanner_v1_transaction_proto_rawDescData)
- })
- return file_google_spanner_v1_transaction_proto_rawDescData
-}
-
-var file_google_spanner_v1_transaction_proto_enumTypes = make([]protoimpl.EnumInfo, 1)
-var file_google_spanner_v1_transaction_proto_msgTypes = make([]protoimpl.MessageInfo, 6)
-var file_google_spanner_v1_transaction_proto_goTypes = []any{
- (TransactionOptions_ReadWrite_ReadLockMode)(0), // 0: google.spanner.v1.TransactionOptions.ReadWrite.ReadLockMode
- (*TransactionOptions)(nil), // 1: google.spanner.v1.TransactionOptions
- (*Transaction)(nil), // 2: google.spanner.v1.Transaction
- (*TransactionSelector)(nil), // 3: google.spanner.v1.TransactionSelector
- (*TransactionOptions_ReadWrite)(nil), // 4: google.spanner.v1.TransactionOptions.ReadWrite
- (*TransactionOptions_PartitionedDml)(nil), // 5: google.spanner.v1.TransactionOptions.PartitionedDml
- (*TransactionOptions_ReadOnly)(nil), // 6: google.spanner.v1.TransactionOptions.ReadOnly
- (*timestamppb.Timestamp)(nil), // 7: google.protobuf.Timestamp
- (*durationpb.Duration)(nil), // 8: google.protobuf.Duration
-}
-var file_google_spanner_v1_transaction_proto_depIdxs = []int32{
- 4, // 0: google.spanner.v1.TransactionOptions.read_write:type_name -> google.spanner.v1.TransactionOptions.ReadWrite
- 5, // 1: google.spanner.v1.TransactionOptions.partitioned_dml:type_name -> google.spanner.v1.TransactionOptions.PartitionedDml
- 6, // 2: google.spanner.v1.TransactionOptions.read_only:type_name -> google.spanner.v1.TransactionOptions.ReadOnly
- 7, // 3: google.spanner.v1.Transaction.read_timestamp:type_name -> google.protobuf.Timestamp
- 1, // 4: google.spanner.v1.TransactionSelector.single_use:type_name -> google.spanner.v1.TransactionOptions
- 1, // 5: google.spanner.v1.TransactionSelector.begin:type_name -> google.spanner.v1.TransactionOptions
- 0, // 6: google.spanner.v1.TransactionOptions.ReadWrite.read_lock_mode:type_name -> google.spanner.v1.TransactionOptions.ReadWrite.ReadLockMode
- 7, // 7: google.spanner.v1.TransactionOptions.ReadOnly.min_read_timestamp:type_name -> google.protobuf.Timestamp
- 8, // 8: google.spanner.v1.TransactionOptions.ReadOnly.max_staleness:type_name -> google.protobuf.Duration
- 7, // 9: google.spanner.v1.TransactionOptions.ReadOnly.read_timestamp:type_name -> google.protobuf.Timestamp
- 8, // 10: google.spanner.v1.TransactionOptions.ReadOnly.exact_staleness:type_name -> google.protobuf.Duration
- 11, // [11:11] is the sub-list for method output_type
- 11, // [11:11] is the sub-list for method input_type
- 11, // [11:11] is the sub-list for extension type_name
- 11, // [11:11] is the sub-list for extension extendee
- 0, // [0:11] is the sub-list for field type_name
-}
-
-func init() { file_google_spanner_v1_transaction_proto_init() }
-func file_google_spanner_v1_transaction_proto_init() {
- if File_google_spanner_v1_transaction_proto != nil {
- return
- }
- if !protoimpl.UnsafeEnabled {
- file_google_spanner_v1_transaction_proto_msgTypes[0].Exporter = func(v any, i int) any {
- switch v := v.(*TransactionOptions); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_google_spanner_v1_transaction_proto_msgTypes[1].Exporter = func(v any, i int) any {
- switch v := v.(*Transaction); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_google_spanner_v1_transaction_proto_msgTypes[2].Exporter = func(v any, i int) any {
- switch v := v.(*TransactionSelector); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_google_spanner_v1_transaction_proto_msgTypes[3].Exporter = func(v any, i int) any {
- switch v := v.(*TransactionOptions_ReadWrite); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_google_spanner_v1_transaction_proto_msgTypes[4].Exporter = func(v any, i int) any {
- switch v := v.(*TransactionOptions_PartitionedDml); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_google_spanner_v1_transaction_proto_msgTypes[5].Exporter = func(v any, i int) any {
- switch v := v.(*TransactionOptions_ReadOnly); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- }
- file_google_spanner_v1_transaction_proto_msgTypes[0].OneofWrappers = []any{
- (*TransactionOptions_ReadWrite_)(nil),
- (*TransactionOptions_PartitionedDml_)(nil),
- (*TransactionOptions_ReadOnly_)(nil),
- }
- file_google_spanner_v1_transaction_proto_msgTypes[2].OneofWrappers = []any{
- (*TransactionSelector_SingleUse)(nil),
- (*TransactionSelector_Id)(nil),
- (*TransactionSelector_Begin)(nil),
- }
- file_google_spanner_v1_transaction_proto_msgTypes[5].OneofWrappers = []any{
- (*TransactionOptions_ReadOnly_Strong)(nil),
- (*TransactionOptions_ReadOnly_MinReadTimestamp)(nil),
- (*TransactionOptions_ReadOnly_MaxStaleness)(nil),
- (*TransactionOptions_ReadOnly_ReadTimestamp)(nil),
- (*TransactionOptions_ReadOnly_ExactStaleness)(nil),
- }
- type x struct{}
- out := protoimpl.TypeBuilder{
- File: protoimpl.DescBuilder{
- GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
- RawDescriptor: file_google_spanner_v1_transaction_proto_rawDesc,
- NumEnums: 1,
- NumMessages: 6,
- NumExtensions: 0,
- NumServices: 0,
- },
- GoTypes: file_google_spanner_v1_transaction_proto_goTypes,
- DependencyIndexes: file_google_spanner_v1_transaction_proto_depIdxs,
- EnumInfos: file_google_spanner_v1_transaction_proto_enumTypes,
- MessageInfos: file_google_spanner_v1_transaction_proto_msgTypes,
- }.Build()
- File_google_spanner_v1_transaction_proto = out.File
- file_google_spanner_v1_transaction_proto_rawDesc = nil
- file_google_spanner_v1_transaction_proto_goTypes = nil
- file_google_spanner_v1_transaction_proto_depIdxs = nil
-}
diff --git a/vendor/cloud.google.com/go/spanner/apiv1/spannerpb/type.pb.go b/vendor/cloud.google.com/go/spanner/apiv1/spannerpb/type.pb.go
deleted file mode 100644
index 0c30a4f54..000000000
--- a/vendor/cloud.google.com/go/spanner/apiv1/spannerpb/type.pb.go
+++ /dev/null
@@ -1,630 +0,0 @@
-// Copyright 2024 Google LLC
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-// Code generated by protoc-gen-go. DO NOT EDIT.
-// versions:
-// protoc-gen-go v1.34.2
-// protoc v4.25.3
-// source: google/spanner/v1/type.proto
-
-package spannerpb
-
-import (
- reflect "reflect"
- sync "sync"
-
- _ "google.golang.org/genproto/googleapis/api/annotations"
- protoreflect "google.golang.org/protobuf/reflect/protoreflect"
- protoimpl "google.golang.org/protobuf/runtime/protoimpl"
-)
-
-const (
- // Verify that this generated code is sufficiently up-to-date.
- _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion)
- // Verify that runtime/protoimpl is sufficiently up-to-date.
- _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20)
-)
-
-// `TypeCode` is used as part of [Type][google.spanner.v1.Type] to
-// indicate the type of a Cloud Spanner value.
-//
-// Each legal value of a type can be encoded to or decoded from a JSON
-// value, using the encodings described below. All Cloud Spanner values can
-// be `null`, regardless of type; `null`s are always encoded as a JSON
-// `null`.
-type TypeCode int32
-
-const (
- // Not specified.
- TypeCode_TYPE_CODE_UNSPECIFIED TypeCode = 0
- // Encoded as JSON `true` or `false`.
- TypeCode_BOOL TypeCode = 1
- // Encoded as `string`, in decimal format.
- TypeCode_INT64 TypeCode = 2
- // Encoded as `number`, or the strings `"NaN"`, `"Infinity"`, or
- // `"-Infinity"`.
- TypeCode_FLOAT64 TypeCode = 3
- // Encoded as `number`, or the strings `"NaN"`, `"Infinity"`, or
- // `"-Infinity"`.
- TypeCode_FLOAT32 TypeCode = 15
- // Encoded as `string` in RFC 3339 timestamp format. The time zone
- // must be present, and must be `"Z"`.
- //
- // If the schema has the column option
- // `allow_commit_timestamp=true`, the placeholder string
- // `"spanner.commit_timestamp()"` can be used to instruct the system
- // to insert the commit timestamp associated with the transaction
- // commit.
- TypeCode_TIMESTAMP TypeCode = 4
- // Encoded as `string` in RFC 3339 date format.
- TypeCode_DATE TypeCode = 5
- // Encoded as `string`.
- TypeCode_STRING TypeCode = 6
- // Encoded as a base64-encoded `string`, as described in RFC 4648,
- // section 4.
- TypeCode_BYTES TypeCode = 7
- // Encoded as `list`, where the list elements are represented
- // according to
- // [array_element_type][google.spanner.v1.Type.array_element_type].
- TypeCode_ARRAY TypeCode = 8
- // Encoded as `list`, where list element `i` is represented according
- // to [struct_type.fields[i]][google.spanner.v1.StructType.fields].
- TypeCode_STRUCT TypeCode = 9
- // Encoded as `string`, in decimal format or scientific notation format.
- // <br>Decimal format:
- // <br>`[+-]Digits[.[Digits]]` or
- // <br>`[+-][Digits].Digits`
- //
- // Scientific notation:
- // <br>`[+-]Digits[.[Digits]][ExponentIndicator[+-]Digits]` or
- // <br>`[+-][Digits].Digits[ExponentIndicator[+-]Digits]`
- // <br>(ExponentIndicator is `"e"` or `"E"`)
- TypeCode_NUMERIC TypeCode = 10
- // Encoded as a JSON-formatted `string` as described in RFC 7159. The
- // following rules are applied when parsing JSON input:
- //
- // - Whitespace characters are not preserved.
- // - If a JSON object has duplicate keys, only the first key is preserved.
- // - Members of a JSON object are not guaranteed to have their order
- // preserved.
- // - JSON array elements will have their order preserved.
- TypeCode_JSON TypeCode = 11
- // Encoded as a base64-encoded `string`, as described in RFC 4648,
- // section 4.
- TypeCode_PROTO TypeCode = 13
- // Encoded as `string`, in decimal format.
- TypeCode_ENUM TypeCode = 14
-)
-
-// Enum value maps for TypeCode.
-var (
- TypeCode_name = map[int32]string{
- 0: "TYPE_CODE_UNSPECIFIED",
- 1: "BOOL",
- 2: "INT64",
- 3: "FLOAT64",
- 15: "FLOAT32",
- 4: "TIMESTAMP",
- 5: "DATE",
- 6: "STRING",
- 7: "BYTES",
- 8: "ARRAY",
- 9: "STRUCT",
- 10: "NUMERIC",
- 11: "JSON",
- 13: "PROTO",
- 14: "ENUM",
- }
- TypeCode_value = map[string]int32{
- "TYPE_CODE_UNSPECIFIED": 0,
- "BOOL": 1,
- "INT64": 2,
- "FLOAT64": 3,
- "FLOAT32": 15,
- "TIMESTAMP": 4,
- "DATE": 5,
- "STRING": 6,
- "BYTES": 7,
- "ARRAY": 8,
- "STRUCT": 9,
- "NUMERIC": 10,
- "JSON": 11,
- "PROTO": 13,
- "ENUM": 14,
- }
-)
-
-func (x TypeCode) Enum() *TypeCode {
- p := new(TypeCode)
- *p = x
- return p
-}
-
-func (x TypeCode) String() string {
- return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x))
-}
-
-func (TypeCode) Descriptor() protoreflect.EnumDescriptor {
- return file_google_spanner_v1_type_proto_enumTypes[0].Descriptor()
-}
-
-func (TypeCode) Type() protoreflect.EnumType {
- return &file_google_spanner_v1_type_proto_enumTypes[0]
-}
-
-func (x TypeCode) Number() protoreflect.EnumNumber {
- return protoreflect.EnumNumber(x)
-}
-
-// Deprecated: Use TypeCode.Descriptor instead.
-func (TypeCode) EnumDescriptor() ([]byte, []int) {
- return file_google_spanner_v1_type_proto_rawDescGZIP(), []int{0}
-}
-
-// `TypeAnnotationCode` is used as a part of [Type][google.spanner.v1.Type] to
-// disambiguate SQL types that should be used for a given Cloud Spanner value.
-// Disambiguation is needed because the same Cloud Spanner type can be mapped to
-// different SQL types depending on SQL dialect. TypeAnnotationCode doesn't
-// affect the way value is serialized.
-type TypeAnnotationCode int32
-
-const (
- // Not specified.
- TypeAnnotationCode_TYPE_ANNOTATION_CODE_UNSPECIFIED TypeAnnotationCode = 0
- // PostgreSQL compatible NUMERIC type. This annotation needs to be applied to
- // [Type][google.spanner.v1.Type] instances having [NUMERIC][google.spanner.v1.TypeCode.NUMERIC]
- // type code to specify that values of this type should be treated as
- // PostgreSQL NUMERIC values. Currently this annotation is always needed for
- // [NUMERIC][google.spanner.v1.TypeCode.NUMERIC] when a client interacts with PostgreSQL-enabled
- // Spanner databases.
- TypeAnnotationCode_PG_NUMERIC TypeAnnotationCode = 2
- // PostgreSQL compatible JSONB type. This annotation needs to be applied to
- // [Type][google.spanner.v1.Type] instances having [JSON][google.spanner.v1.TypeCode.JSON]
- // type code to specify that values of this type should be treated as
- // PostgreSQL JSONB values. Currently this annotation is always needed for
- // [JSON][google.spanner.v1.TypeCode.JSON] when a client interacts with PostgreSQL-enabled
- // Spanner databases.
- TypeAnnotationCode_PG_JSONB TypeAnnotationCode = 3
- // PostgreSQL compatible OID type. This annotation can be used by a client
- // interacting with PostgreSQL-enabled Spanner database to specify that a
- // value should be treated using the semantics of the OID type.
- TypeAnnotationCode_PG_OID TypeAnnotationCode = 4
-)
-
-// Enum value maps for TypeAnnotationCode.
-var (
- TypeAnnotationCode_name = map[int32]string{
- 0: "TYPE_ANNOTATION_CODE_UNSPECIFIED",
- 2: "PG_NUMERIC",
- 3: "PG_JSONB",
- 4: "PG_OID",
- }
- TypeAnnotationCode_value = map[string]int32{
- "TYPE_ANNOTATION_CODE_UNSPECIFIED": 0,
- "PG_NUMERIC": 2,
- "PG_JSONB": 3,
- "PG_OID": 4,
- }
-)
-
-func (x TypeAnnotationCode) Enum() *TypeAnnotationCode {
- p := new(TypeAnnotationCode)
- *p = x
- return p
-}
-
-func (x TypeAnnotationCode) String() string {
- return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x))
-}
-
-func (TypeAnnotationCode) Descriptor() protoreflect.EnumDescriptor {
- return file_google_spanner_v1_type_proto_enumTypes[1].Descriptor()
-}
-
-func (TypeAnnotationCode) Type() protoreflect.EnumType {
- return &file_google_spanner_v1_type_proto_enumTypes[1]
-}
-
-func (x TypeAnnotationCode) Number() protoreflect.EnumNumber {
- return protoreflect.EnumNumber(x)
-}
-
-// Deprecated: Use TypeAnnotationCode.Descriptor instead.
-func (TypeAnnotationCode) EnumDescriptor() ([]byte, []int) {
- return file_google_spanner_v1_type_proto_rawDescGZIP(), []int{1}
-}
-
-// `Type` indicates the type of a Cloud Spanner value, as might be stored in a
-// table cell or returned from an SQL query.
-type Type struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
- // Required. The [TypeCode][google.spanner.v1.TypeCode] for this type.
- Code TypeCode `protobuf:"varint,1,opt,name=code,proto3,enum=google.spanner.v1.TypeCode" json:"code,omitempty"`
- // If [code][google.spanner.v1.Type.code] == [ARRAY][google.spanner.v1.TypeCode.ARRAY], then `array_element_type`
- // is the type of the array elements.
- ArrayElementType *Type `protobuf:"bytes,2,opt,name=array_element_type,json=arrayElementType,proto3" json:"array_element_type,omitempty"`
- // If [code][google.spanner.v1.Type.code] == [STRUCT][google.spanner.v1.TypeCode.STRUCT], then `struct_type`
- // provides type information for the struct's fields.
- StructType *StructType `protobuf:"bytes,3,opt,name=struct_type,json=structType,proto3" json:"struct_type,omitempty"`
- // The [TypeAnnotationCode][google.spanner.v1.TypeAnnotationCode] that disambiguates SQL type that Spanner will
- // use to represent values of this type during query processing. This is
- // necessary for some type codes because a single [TypeCode][google.spanner.v1.TypeCode] can be mapped
- // to different SQL types depending on the SQL dialect. [type_annotation][google.spanner.v1.Type.type_annotation]
- // typically is not needed to process the content of a value (it doesn't
- // affect serialization) and clients can ignore it on the read path.
- TypeAnnotation TypeAnnotationCode `protobuf:"varint,4,opt,name=type_annotation,json=typeAnnotation,proto3,enum=google.spanner.v1.TypeAnnotationCode" json:"type_annotation,omitempty"`
- // If [code][google.spanner.v1.Type.code] ==
- // [PROTO][google.spanner.v1.TypeCode.PROTO] or
- // [code][google.spanner.v1.Type.code] ==
- // [ENUM][google.spanner.v1.TypeCode.ENUM], then `proto_type_fqn` is the fully
- // qualified name of the proto type representing the proto/enum definition.
- ProtoTypeFqn string `protobuf:"bytes,5,opt,name=proto_type_fqn,json=protoTypeFqn,proto3" json:"proto_type_fqn,omitempty"`
-}
-
-func (x *Type) Reset() {
- *x = Type{}
- if protoimpl.UnsafeEnabled {
- mi := &file_google_spanner_v1_type_proto_msgTypes[0]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
-}
-
-func (x *Type) String() string {
- return protoimpl.X.MessageStringOf(x)
-}
-
-func (*Type) ProtoMessage() {}
-
-func (x *Type) ProtoReflect() protoreflect.Message {
- mi := &file_google_spanner_v1_type_proto_msgTypes[0]
- if protoimpl.UnsafeEnabled && x != nil {
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- if ms.LoadMessageInfo() == nil {
- ms.StoreMessageInfo(mi)
- }
- return ms
- }
- return mi.MessageOf(x)
-}
-
-// Deprecated: Use Type.ProtoReflect.Descriptor instead.
-func (*Type) Descriptor() ([]byte, []int) {
- return file_google_spanner_v1_type_proto_rawDescGZIP(), []int{0}
-}
-
-func (x *Type) GetCode() TypeCode {
- if x != nil {
- return x.Code
- }
- return TypeCode_TYPE_CODE_UNSPECIFIED
-}
-
-func (x *Type) GetArrayElementType() *Type {
- if x != nil {
- return x.ArrayElementType
- }
- return nil
-}
-
-func (x *Type) GetStructType() *StructType {
- if x != nil {
- return x.StructType
- }
- return nil
-}
-
-func (x *Type) GetTypeAnnotation() TypeAnnotationCode {
- if x != nil {
- return x.TypeAnnotation
- }
- return TypeAnnotationCode_TYPE_ANNOTATION_CODE_UNSPECIFIED
-}
-
-func (x *Type) GetProtoTypeFqn() string {
- if x != nil {
- return x.ProtoTypeFqn
- }
- return ""
-}
-
-// `StructType` defines the fields of a [STRUCT][google.spanner.v1.TypeCode.STRUCT] type.
-type StructType struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
- // The list of fields that make up this struct. Order is
- // significant, because values of this struct type are represented as
- // lists, where the order of field values matches the order of
- // fields in the [StructType][google.spanner.v1.StructType]. In turn, the order of fields
- // matches the order of columns in a read request, or the order of
- // fields in the `SELECT` clause of a query.
- Fields []*StructType_Field `protobuf:"bytes,1,rep,name=fields,proto3" json:"fields,omitempty"`
-}
-
-func (x *StructType) Reset() {
- *x = StructType{}
- if protoimpl.UnsafeEnabled {
- mi := &file_google_spanner_v1_type_proto_msgTypes[1]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
-}
-
-func (x *StructType) String() string {
- return protoimpl.X.MessageStringOf(x)
-}
-
-func (*StructType) ProtoMessage() {}
-
-func (x *StructType) ProtoReflect() protoreflect.Message {
- mi := &file_google_spanner_v1_type_proto_msgTypes[1]
- if protoimpl.UnsafeEnabled && x != nil {
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- if ms.LoadMessageInfo() == nil {
- ms.StoreMessageInfo(mi)
- }
- return ms
- }
- return mi.MessageOf(x)
-}
-
-// Deprecated: Use StructType.ProtoReflect.Descriptor instead.
-func (*StructType) Descriptor() ([]byte, []int) {
- return file_google_spanner_v1_type_proto_rawDescGZIP(), []int{1}
-}
-
-func (x *StructType) GetFields() []*StructType_Field {
- if x != nil {
- return x.Fields
- }
- return nil
-}
-
-// Message representing a single field of a struct.
-type StructType_Field struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
- // The name of the field. For reads, this is the column name. For
- // SQL queries, it is the column alias (e.g., `"Word"` in the
- // query `"SELECT 'hello' AS Word"`), or the column name (e.g.,
- // `"ColName"` in the query `"SELECT ColName FROM Table"`). Some
- // columns might have an empty name (e.g., `"SELECT
- // UPPER(ColName)"`). Note that a query result can contain
- // multiple fields with the same name.
- Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"`
- // The type of the field.
- Type *Type `protobuf:"bytes,2,opt,name=type,proto3" json:"type,omitempty"`
-}
-
-func (x *StructType_Field) Reset() {
- *x = StructType_Field{}
- if protoimpl.UnsafeEnabled {
- mi := &file_google_spanner_v1_type_proto_msgTypes[2]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
-}
-
-func (x *StructType_Field) String() string {
- return protoimpl.X.MessageStringOf(x)
-}
-
-func (*StructType_Field) ProtoMessage() {}
-
-func (x *StructType_Field) ProtoReflect() protoreflect.Message {
- mi := &file_google_spanner_v1_type_proto_msgTypes[2]
- if protoimpl.UnsafeEnabled && x != nil {
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- if ms.LoadMessageInfo() == nil {
- ms.StoreMessageInfo(mi)
- }
- return ms
- }
- return mi.MessageOf(x)
-}
-
-// Deprecated: Use StructType_Field.ProtoReflect.Descriptor instead.
-func (*StructType_Field) Descriptor() ([]byte, []int) {
- return file_google_spanner_v1_type_proto_rawDescGZIP(), []int{1, 0}
-}
-
-func (x *StructType_Field) GetName() string {
- if x != nil {
- return x.Name
- }
- return ""
-}
-
-func (x *StructType_Field) GetType() *Type {
- if x != nil {
- return x.Type
- }
- return nil
-}
-
-var File_google_spanner_v1_type_proto protoreflect.FileDescriptor
-
-var file_google_spanner_v1_type_proto_rawDesc = []byte{
- 0x0a, 0x1c, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x73, 0x70, 0x61, 0x6e, 0x6e, 0x65, 0x72,
- 0x2f, 0x76, 0x31, 0x2f, 0x74, 0x79, 0x70, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x11,
- 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x70, 0x61, 0x6e, 0x6e, 0x65, 0x72, 0x2e, 0x76,
- 0x31, 0x1a, 0x1f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x66, 0x69,
- 0x65, 0x6c, 0x64, 0x5f, 0x62, 0x65, 0x68, 0x61, 0x76, 0x69, 0x6f, 0x72, 0x2e, 0x70, 0x72, 0x6f,
- 0x74, 0x6f, 0x22, 0xb9, 0x02, 0x0a, 0x04, 0x54, 0x79, 0x70, 0x65, 0x12, 0x34, 0x0a, 0x04, 0x63,
- 0x6f, 0x64, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x1b, 0x2e, 0x67, 0x6f, 0x6f, 0x67,
- 0x6c, 0x65, 0x2e, 0x73, 0x70, 0x61, 0x6e, 0x6e, 0x65, 0x72, 0x2e, 0x76, 0x31, 0x2e, 0x54, 0x79,
- 0x70, 0x65, 0x43, 0x6f, 0x64, 0x65, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x04, 0x63, 0x6f, 0x64,
- 0x65, 0x12, 0x45, 0x0a, 0x12, 0x61, 0x72, 0x72, 0x61, 0x79, 0x5f, 0x65, 0x6c, 0x65, 0x6d, 0x65,
- 0x6e, 0x74, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x17, 0x2e,
- 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x70, 0x61, 0x6e, 0x6e, 0x65, 0x72, 0x2e, 0x76,
- 0x31, 0x2e, 0x54, 0x79, 0x70, 0x65, 0x52, 0x10, 0x61, 0x72, 0x72, 0x61, 0x79, 0x45, 0x6c, 0x65,
- 0x6d, 0x65, 0x6e, 0x74, 0x54, 0x79, 0x70, 0x65, 0x12, 0x3e, 0x0a, 0x0b, 0x73, 0x74, 0x72, 0x75,
- 0x63, 0x74, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1d, 0x2e,
- 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x70, 0x61, 0x6e, 0x6e, 0x65, 0x72, 0x2e, 0x76,
- 0x31, 0x2e, 0x53, 0x74, 0x72, 0x75, 0x63, 0x74, 0x54, 0x79, 0x70, 0x65, 0x52, 0x0a, 0x73, 0x74,
- 0x72, 0x75, 0x63, 0x74, 0x54, 0x79, 0x70, 0x65, 0x12, 0x4e, 0x0a, 0x0f, 0x74, 0x79, 0x70, 0x65,
- 0x5f, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x04, 0x20, 0x01, 0x28,
- 0x0e, 0x32, 0x25, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x70, 0x61, 0x6e, 0x6e,
- 0x65, 0x72, 0x2e, 0x76, 0x31, 0x2e, 0x54, 0x79, 0x70, 0x65, 0x41, 0x6e, 0x6e, 0x6f, 0x74, 0x61,
- 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x6f, 0x64, 0x65, 0x52, 0x0e, 0x74, 0x79, 0x70, 0x65, 0x41, 0x6e,
- 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x24, 0x0a, 0x0e, 0x70, 0x72, 0x6f, 0x74,
- 0x6f, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x5f, 0x66, 0x71, 0x6e, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09,
- 0x52, 0x0c, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x54, 0x79, 0x70, 0x65, 0x46, 0x71, 0x6e, 0x22, 0x93,
- 0x01, 0x0a, 0x0a, 0x53, 0x74, 0x72, 0x75, 0x63, 0x74, 0x54, 0x79, 0x70, 0x65, 0x12, 0x3b, 0x0a,
- 0x06, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x23, 0x2e,
- 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x70, 0x61, 0x6e, 0x6e, 0x65, 0x72, 0x2e, 0x76,
- 0x31, 0x2e, 0x53, 0x74, 0x72, 0x75, 0x63, 0x74, 0x54, 0x79, 0x70, 0x65, 0x2e, 0x46, 0x69, 0x65,
- 0x6c, 0x64, 0x52, 0x06, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x73, 0x1a, 0x48, 0x0a, 0x05, 0x46, 0x69,
- 0x65, 0x6c, 0x64, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28,
- 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x2b, 0x0a, 0x04, 0x74, 0x79, 0x70, 0x65, 0x18,
- 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73,
- 0x70, 0x61, 0x6e, 0x6e, 0x65, 0x72, 0x2e, 0x76, 0x31, 0x2e, 0x54, 0x79, 0x70, 0x65, 0x52, 0x04,
- 0x74, 0x79, 0x70, 0x65, 0x2a, 0xc7, 0x01, 0x0a, 0x08, 0x54, 0x79, 0x70, 0x65, 0x43, 0x6f, 0x64,
- 0x65, 0x12, 0x19, 0x0a, 0x15, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x43, 0x4f, 0x44, 0x45, 0x5f, 0x55,
- 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, 0x49, 0x45, 0x44, 0x10, 0x00, 0x12, 0x08, 0x0a, 0x04,
- 0x42, 0x4f, 0x4f, 0x4c, 0x10, 0x01, 0x12, 0x09, 0x0a, 0x05, 0x49, 0x4e, 0x54, 0x36, 0x34, 0x10,
- 0x02, 0x12, 0x0b, 0x0a, 0x07, 0x46, 0x4c, 0x4f, 0x41, 0x54, 0x36, 0x34, 0x10, 0x03, 0x12, 0x0b,
- 0x0a, 0x07, 0x46, 0x4c, 0x4f, 0x41, 0x54, 0x33, 0x32, 0x10, 0x0f, 0x12, 0x0d, 0x0a, 0x09, 0x54,
- 0x49, 0x4d, 0x45, 0x53, 0x54, 0x41, 0x4d, 0x50, 0x10, 0x04, 0x12, 0x08, 0x0a, 0x04, 0x44, 0x41,
- 0x54, 0x45, 0x10, 0x05, 0x12, 0x0a, 0x0a, 0x06, 0x53, 0x54, 0x52, 0x49, 0x4e, 0x47, 0x10, 0x06,
- 0x12, 0x09, 0x0a, 0x05, 0x42, 0x59, 0x54, 0x45, 0x53, 0x10, 0x07, 0x12, 0x09, 0x0a, 0x05, 0x41,
- 0x52, 0x52, 0x41, 0x59, 0x10, 0x08, 0x12, 0x0a, 0x0a, 0x06, 0x53, 0x54, 0x52, 0x55, 0x43, 0x54,
- 0x10, 0x09, 0x12, 0x0b, 0x0a, 0x07, 0x4e, 0x55, 0x4d, 0x45, 0x52, 0x49, 0x43, 0x10, 0x0a, 0x12,
- 0x08, 0x0a, 0x04, 0x4a, 0x53, 0x4f, 0x4e, 0x10, 0x0b, 0x12, 0x09, 0x0a, 0x05, 0x50, 0x52, 0x4f,
- 0x54, 0x4f, 0x10, 0x0d, 0x12, 0x08, 0x0a, 0x04, 0x45, 0x4e, 0x55, 0x4d, 0x10, 0x0e, 0x2a, 0x64,
- 0x0a, 0x12, 0x54, 0x79, 0x70, 0x65, 0x41, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e,
- 0x43, 0x6f, 0x64, 0x65, 0x12, 0x24, 0x0a, 0x20, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x41, 0x4e, 0x4e,
- 0x4f, 0x54, 0x41, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x43, 0x4f, 0x44, 0x45, 0x5f, 0x55, 0x4e, 0x53,
- 0x50, 0x45, 0x43, 0x49, 0x46, 0x49, 0x45, 0x44, 0x10, 0x00, 0x12, 0x0e, 0x0a, 0x0a, 0x50, 0x47,
- 0x5f, 0x4e, 0x55, 0x4d, 0x45, 0x52, 0x49, 0x43, 0x10, 0x02, 0x12, 0x0c, 0x0a, 0x08, 0x50, 0x47,
- 0x5f, 0x4a, 0x53, 0x4f, 0x4e, 0x42, 0x10, 0x03, 0x12, 0x0a, 0x0a, 0x06, 0x50, 0x47, 0x5f, 0x4f,
- 0x49, 0x44, 0x10, 0x04, 0x42, 0xac, 0x01, 0x0a, 0x15, 0x63, 0x6f, 0x6d, 0x2e, 0x67, 0x6f, 0x6f,
- 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x70, 0x61, 0x6e, 0x6e, 0x65, 0x72, 0x2e, 0x76, 0x31, 0x42, 0x09,
- 0x54, 0x79, 0x70, 0x65, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x35, 0x63, 0x6c, 0x6f,
- 0x75, 0x64, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x67, 0x6f,
- 0x2f, 0x73, 0x70, 0x61, 0x6e, 0x6e, 0x65, 0x72, 0x2f, 0x61, 0x70, 0x69, 0x76, 0x31, 0x2f, 0x73,
- 0x70, 0x61, 0x6e, 0x6e, 0x65, 0x72, 0x70, 0x62, 0x3b, 0x73, 0x70, 0x61, 0x6e, 0x6e, 0x65, 0x72,
- 0x70, 0x62, 0xaa, 0x02, 0x17, 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x43, 0x6c, 0x6f, 0x75,
- 0x64, 0x2e, 0x53, 0x70, 0x61, 0x6e, 0x6e, 0x65, 0x72, 0x2e, 0x56, 0x31, 0xca, 0x02, 0x17, 0x47,
- 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x5c, 0x43, 0x6c, 0x6f, 0x75, 0x64, 0x5c, 0x53, 0x70, 0x61, 0x6e,
- 0x6e, 0x65, 0x72, 0x5c, 0x56, 0x31, 0xea, 0x02, 0x1a, 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x3a,
- 0x3a, 0x43, 0x6c, 0x6f, 0x75, 0x64, 0x3a, 0x3a, 0x53, 0x70, 0x61, 0x6e, 0x6e, 0x65, 0x72, 0x3a,
- 0x3a, 0x56, 0x31, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33,
-}
-
-var (
- file_google_spanner_v1_type_proto_rawDescOnce sync.Once
- file_google_spanner_v1_type_proto_rawDescData = file_google_spanner_v1_type_proto_rawDesc
-)
-
-func file_google_spanner_v1_type_proto_rawDescGZIP() []byte {
- file_google_spanner_v1_type_proto_rawDescOnce.Do(func() {
- file_google_spanner_v1_type_proto_rawDescData = protoimpl.X.CompressGZIP(file_google_spanner_v1_type_proto_rawDescData)
- })
- return file_google_spanner_v1_type_proto_rawDescData
-}
-
-var file_google_spanner_v1_type_proto_enumTypes = make([]protoimpl.EnumInfo, 2)
-var file_google_spanner_v1_type_proto_msgTypes = make([]protoimpl.MessageInfo, 3)
-var file_google_spanner_v1_type_proto_goTypes = []any{
- (TypeCode)(0), // 0: google.spanner.v1.TypeCode
- (TypeAnnotationCode)(0), // 1: google.spanner.v1.TypeAnnotationCode
- (*Type)(nil), // 2: google.spanner.v1.Type
- (*StructType)(nil), // 3: google.spanner.v1.StructType
- (*StructType_Field)(nil), // 4: google.spanner.v1.StructType.Field
-}
-var file_google_spanner_v1_type_proto_depIdxs = []int32{
- 0, // 0: google.spanner.v1.Type.code:type_name -> google.spanner.v1.TypeCode
- 2, // 1: google.spanner.v1.Type.array_element_type:type_name -> google.spanner.v1.Type
- 3, // 2: google.spanner.v1.Type.struct_type:type_name -> google.spanner.v1.StructType
- 1, // 3: google.spanner.v1.Type.type_annotation:type_name -> google.spanner.v1.TypeAnnotationCode
- 4, // 4: google.spanner.v1.StructType.fields:type_name -> google.spanner.v1.StructType.Field
- 2, // 5: google.spanner.v1.StructType.Field.type:type_name -> google.spanner.v1.Type
- 6, // [6:6] is the sub-list for method output_type
- 6, // [6:6] is the sub-list for method input_type
- 6, // [6:6] is the sub-list for extension type_name
- 6, // [6:6] is the sub-list for extension extendee
- 0, // [0:6] is the sub-list for field type_name
-}
-
-func init() { file_google_spanner_v1_type_proto_init() }
-func file_google_spanner_v1_type_proto_init() {
- if File_google_spanner_v1_type_proto != nil {
- return
- }
- if !protoimpl.UnsafeEnabled {
- file_google_spanner_v1_type_proto_msgTypes[0].Exporter = func(v any, i int) any {
- switch v := v.(*Type); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_google_spanner_v1_type_proto_msgTypes[1].Exporter = func(v any, i int) any {
- switch v := v.(*StructType); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_google_spanner_v1_type_proto_msgTypes[2].Exporter = func(v any, i int) any {
- switch v := v.(*StructType_Field); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- }
- type x struct{}
- out := protoimpl.TypeBuilder{
- File: protoimpl.DescBuilder{
- GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
- RawDescriptor: file_google_spanner_v1_type_proto_rawDesc,
- NumEnums: 2,
- NumMessages: 3,
- NumExtensions: 0,
- NumServices: 0,
- },
- GoTypes: file_google_spanner_v1_type_proto_goTypes,
- DependencyIndexes: file_google_spanner_v1_type_proto_depIdxs,
- EnumInfos: file_google_spanner_v1_type_proto_enumTypes,
- MessageInfos: file_google_spanner_v1_type_proto_msgTypes,
- }.Build()
- File_google_spanner_v1_type_proto = out.File
- file_google_spanner_v1_type_proto_rawDesc = nil
- file_google_spanner_v1_type_proto_goTypes = nil
- file_google_spanner_v1_type_proto_depIdxs = nil
-}
diff --git a/vendor/cloud.google.com/go/spanner/apiv1/version.go b/vendor/cloud.google.com/go/spanner/apiv1/version.go
deleted file mode 100644
index 483fd85b2..000000000
--- a/vendor/cloud.google.com/go/spanner/apiv1/version.go
+++ /dev/null
@@ -1,23 +0,0 @@
-// Copyright 2023 Google LLC
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-// Code generated by gapicgen. DO NOT EDIT.
-
-package spanner
-
-import "cloud.google.com/go/spanner/internal"
-
-func init() {
- versionClient = internal.Version
-}
diff --git a/vendor/cloud.google.com/go/spanner/batch.go b/vendor/cloud.google.com/go/spanner/batch.go
deleted file mode 100644
index 5c20f66fb..000000000
--- a/vendor/cloud.google.com/go/spanner/batch.go
+++ /dev/null
@@ -1,481 +0,0 @@
-/*
-Copyright 2018 Google LLC
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-*/
-
-package spanner
-
-import (
- "bytes"
- "context"
- "encoding/gob"
- "log"
- "time"
-
- "cloud.google.com/go/internal/trace"
- sppb "cloud.google.com/go/spanner/apiv1/spannerpb"
- "github.com/googleapis/gax-go/v2"
- "google.golang.org/grpc"
- "google.golang.org/grpc/metadata"
- "google.golang.org/protobuf/proto"
-)
-
-// BatchReadOnlyTransaction is a ReadOnlyTransaction that allows for exporting
-// arbitrarily large amounts of data from Cloud Spanner databases.
-// BatchReadOnlyTransaction partitions a read/query request. Read/query request
-// can then be executed independently over each partition while observing the
-// same snapshot of the database. BatchReadOnlyTransaction can also be shared
-// across multiple clients by passing around the BatchReadOnlyTransactionID and
-// then recreating the transaction using Client.BatchReadOnlyTransactionFromID.
-//
-// Note: if a client is used only to run partitions, you can
-// create it using a ClientConfig with both MinOpened and MaxIdle set to
-// zero to avoid creating unnecessary sessions. You can also avoid excess
-// gRPC channels by setting ClientConfig.NumChannels to the number of
-// concurrently active BatchReadOnlyTransactions you expect to have.
-type BatchReadOnlyTransaction struct {
- ReadOnlyTransaction
- ID BatchReadOnlyTransactionID
-}
-
-// BatchReadOnlyTransactionID is a unique identifier for a
-// BatchReadOnlyTransaction. It can be used to re-create a
-// BatchReadOnlyTransaction on a different machine or process by calling
-// Client.BatchReadOnlyTransactionFromID.
-type BatchReadOnlyTransactionID struct {
- // unique ID for the transaction.
- tid transactionID
- // sid is the id of the Cloud Spanner session used for this transaction.
- sid string
- // rts is the read timestamp of this transaction.
- rts time.Time
-}
-
-// Partition defines a segment of data to be read in a batch read or query. A
-// partition can be serialized and processed across several different machines
-// or processes.
-type Partition struct {
- pt []byte
- qreq *sppb.ExecuteSqlRequest
- rreq *sppb.ReadRequest
-}
-
-// PartitionOptions specifies options for a PartitionQueryRequest and
-// PartitionReadRequest. See
-// https://godoc.org/google.golang.org/genproto/googleapis/spanner/v1#PartitionOptions
-// for more details.
-type PartitionOptions struct {
- // The desired data size for each partition generated.
- PartitionBytes int64
- // The desired maximum number of partitions to return.
- MaxPartitions int64
-}
-
-// toProto converts a spanner.PartitionOptions into a sppb.PartitionOptions
-func (opt PartitionOptions) toProto() *sppb.PartitionOptions {
- return &sppb.PartitionOptions{
- PartitionSizeBytes: opt.PartitionBytes,
- MaxPartitions: opt.MaxPartitions,
- }
-}
-
-// PartitionRead returns a list of Partitions that can be used to read rows from
-// the database. These partitions can be executed across multiple processes,
-// even across different machines. The partition size and count hints can be
-// configured using PartitionOptions.
-func (t *BatchReadOnlyTransaction) PartitionRead(ctx context.Context, table string, keys KeySet, columns []string, opt PartitionOptions) ([]*Partition, error) {
- return t.PartitionReadUsingIndex(ctx, table, "", keys, columns, opt)
-}
-
-// PartitionReadWithOptions returns a list of Partitions that can be used to
-// read rows from the database. These partitions can be executed across multiple
-// processes, even across different machines. The partition size and count hints
-// can be configured using PartitionOptions. Pass a ReadOptions to modify the
-// read operation.
-func (t *BatchReadOnlyTransaction) PartitionReadWithOptions(ctx context.Context, table string, keys KeySet, columns []string, opt PartitionOptions, readOptions ReadOptions) ([]*Partition, error) {
- return t.PartitionReadUsingIndexWithOptions(ctx, table, "", keys, columns, opt, t.ReadOnlyTransaction.txReadOnly.ro.merge(readOptions))
-}
-
-// PartitionReadUsingIndex returns a list of Partitions that can be used to read
-// rows from the database using an index.
-func (t *BatchReadOnlyTransaction) PartitionReadUsingIndex(ctx context.Context, table, index string, keys KeySet, columns []string, opt PartitionOptions) ([]*Partition, error) {
- return t.PartitionReadUsingIndexWithOptions(ctx, table, index, keys, columns, opt, t.ReadOnlyTransaction.txReadOnly.ro)
-}
-
-// PartitionReadUsingIndexWithOptions returns a list of Partitions that can be
-// used to read rows from the database using an index. Pass a ReadOptions to
-// modify the read operation.
-func (t *BatchReadOnlyTransaction) PartitionReadUsingIndexWithOptions(ctx context.Context, table, index string, keys KeySet, columns []string, opt PartitionOptions, readOptions ReadOptions) ([]*Partition, error) {
- sh, ts, err := t.acquire(ctx)
- if err != nil {
- return nil, err
- }
- sid, client := sh.getID(), sh.getClient()
- var (
- kset *sppb.KeySet
- resp *sppb.PartitionResponse
- partitions []*Partition
- )
- kset, err = keys.keySetProto()
- // Request partitions.
- if err != nil {
- return nil, err
- }
- var md metadata.MD
- sh.updateLastUseTime()
- resp, err = client.PartitionRead(contextWithOutgoingMetadata(ctx, sh.getMetadata(), t.disableRouteToLeader), &sppb.PartitionReadRequest{
- Session: sid,
- Transaction: ts,
- Table: table,
- Index: index,
- Columns: columns,
- KeySet: kset,
- PartitionOptions: opt.toProto(),
- }, gax.WithGRPCOptions(grpc.Header(&md)))
-
- if getGFELatencyMetricsFlag() && md != nil && t.ct != nil {
- if err := createContextAndCaptureGFELatencyMetrics(ctx, t.ct, md, "PartitionReadUsingIndexWithOptions"); err != nil {
- trace.TracePrintf(ctx, nil, "Error in recording GFE Latency. Try disabling and rerunning. Error: %v", err)
- }
- }
- if metricErr := recordGFELatencyMetricsOT(ctx, md, "PartitionReadUsingIndexWithOptions", t.otConfig); metricErr != nil {
- trace.TracePrintf(ctx, nil, "Error in recording GFE Latency through OpenTelemetry. Error: %v", metricErr)
- }
- // Prepare ReadRequest.
- req := &sppb.ReadRequest{
- Session: sid,
- Transaction: ts,
- Table: table,
- Index: index,
- Columns: columns,
- KeySet: kset,
- RequestOptions: createRequestOptions(readOptions.Priority, readOptions.RequestTag, ""),
- DataBoostEnabled: readOptions.DataBoostEnabled,
- DirectedReadOptions: readOptions.DirectedReadOptions,
- }
- // Generate partitions.
- for _, p := range resp.GetPartitions() {
- partitions = append(partitions, &Partition{
- pt: p.PartitionToken,
- rreq: req,
- })
- }
- return partitions, err
-}
-
-// PartitionQuery returns a list of Partitions that can be used to execute a
-// query against the database.
-func (t *BatchReadOnlyTransaction) PartitionQuery(ctx context.Context, statement Statement, opt PartitionOptions) ([]*Partition, error) {
- return t.partitionQuery(ctx, statement, opt, t.ReadOnlyTransaction.txReadOnly.qo)
-}
-
-// PartitionQueryWithOptions returns a list of Partitions that can be used to
-// execute a query against the database. The sql query execution will be
-// optimized based on the given query options.
-func (t *BatchReadOnlyTransaction) PartitionQueryWithOptions(ctx context.Context, statement Statement, opt PartitionOptions, qOpts QueryOptions) ([]*Partition, error) {
- return t.partitionQuery(ctx, statement, opt, t.ReadOnlyTransaction.txReadOnly.qo.merge(qOpts))
-}
-
-func (t *BatchReadOnlyTransaction) partitionQuery(ctx context.Context, statement Statement, opt PartitionOptions, qOpts QueryOptions) ([]*Partition, error) {
- sh, ts, err := t.acquire(ctx)
- if err != nil {
- return nil, err
- }
- sid, client := sh.getID(), sh.getClient()
- params, paramTypes, err := statement.convertParams()
- if err != nil {
- return nil, err
- }
- var md metadata.MD
-
- // request Partitions
- req := &sppb.PartitionQueryRequest{
- Session: sid,
- Transaction: ts,
- Sql: statement.SQL,
- PartitionOptions: opt.toProto(),
- Params: params,
- ParamTypes: paramTypes,
- }
- sh.updateLastUseTime()
- resp, err := client.PartitionQuery(contextWithOutgoingMetadata(ctx, sh.getMetadata(), t.disableRouteToLeader), req, gax.WithGRPCOptions(grpc.Header(&md)))
-
- if getGFELatencyMetricsFlag() && md != nil && t.ct != nil {
- if err := createContextAndCaptureGFELatencyMetrics(ctx, t.ct, md, "partitionQuery"); err != nil {
- trace.TracePrintf(ctx, nil, "Error in recording GFE Latency. Try disabling and rerunning. Error: %v", err)
- }
- }
- if metricErr := recordGFELatencyMetricsOT(ctx, md, "partitionQuery", t.otConfig); metricErr != nil {
- trace.TracePrintf(ctx, nil, "Error in recording GFE Latency through OpenTelemetry. Error: %v", metricErr)
- }
-
- // prepare ExecuteSqlRequest
- r := &sppb.ExecuteSqlRequest{
- Session: sid,
- Transaction: ts,
- Sql: statement.SQL,
- Params: params,
- ParamTypes: paramTypes,
- QueryOptions: qOpts.Options,
- RequestOptions: createRequestOptions(qOpts.Priority, qOpts.RequestTag, ""),
- DataBoostEnabled: qOpts.DataBoostEnabled,
- DirectedReadOptions: qOpts.DirectedReadOptions,
- }
-
- // generate Partitions
- var partitions []*Partition
- for _, p := range resp.GetPartitions() {
- partitions = append(partitions, &Partition{
- pt: p.PartitionToken,
- qreq: r,
- })
- }
- return partitions, err
-}
-
-// release implements txReadEnv.release, noop.
-func (t *BatchReadOnlyTransaction) release(err error) {
-}
-
-// setTimestamp implements txReadEnv.setTimestamp, noop.
-//
-// read timestamp is ready on txn initialization, avoid contending writing to it
-// with future partitions.
-func (t *BatchReadOnlyTransaction) setTimestamp(ts time.Time) {
-}
-
-// Close marks the txn as closed.
-func (t *BatchReadOnlyTransaction) Close() {
- t.mu.Lock()
- defer t.mu.Unlock()
- t.state = txClosed
-}
-
-// Cleanup cleans up all the resources used by this transaction and makes
-// it unusable. Once this method is invoked, the transaction is no longer
-// usable anywhere, including other clients/processes with which this
-// transaction was shared.
-//
-// Calling Cleanup is optional, but recommended. If Cleanup is not called, the
-// transaction's resources will be freed when the session expires on the backend
-// and is deleted. For more information about recycled sessions, see
-// https://cloud.google.com/spanner/docs/sessions.
-func (t *BatchReadOnlyTransaction) Cleanup(ctx context.Context) {
- t.Close()
- t.mu.Lock()
- defer t.mu.Unlock()
- sh := t.sh
- if sh == nil {
- return
- }
- t.sh = nil
- sid, client := sh.getID(), sh.getClient()
-
- var md metadata.MD
- err := client.DeleteSession(contextWithOutgoingMetadata(ctx, sh.getMetadata(), true), &sppb.DeleteSessionRequest{Name: sid}, gax.WithGRPCOptions(grpc.Header(&md)))
-
- if getGFELatencyMetricsFlag() && md != nil && t.ct != nil {
- if err := createContextAndCaptureGFELatencyMetrics(ctx, t.ct, md, "Cleanup"); err != nil {
- trace.TracePrintf(ctx, nil, "Error in recording GFE Latency. Try disabling and rerunning. Error: %v", err)
- }
- }
- if metricErr := recordGFELatencyMetricsOT(ctx, md, "Cleanup", t.otConfig); metricErr != nil {
- trace.TracePrintf(ctx, nil, "Error in recording GFE Latency through OpenTelemetry. Error: %v", metricErr)
- }
-
- if err != nil {
- var logger *log.Logger
- if sh.session != nil {
- logger = sh.session.logger
- }
- logf(logger, "Failed to delete session %v. Error: %v", sid, err)
- }
-}
-
-// Execute runs a single Partition obtained from PartitionRead or
-// PartitionQuery.
-func (t *BatchReadOnlyTransaction) Execute(ctx context.Context, p *Partition) *RowIterator {
- var (
- sh *sessionHandle
- err error
- rpc func(ct context.Context, resumeToken []byte) (streamingReceiver, error)
- )
- if sh, _, err = t.acquire(ctx); err != nil {
- return &RowIterator{err: err}
- }
- client := sh.getClient()
- if client == nil {
- // Might happen if transaction is closed in the middle of a API call.
- return &RowIterator{err: errSessionClosed(sh)}
- }
- sh.updateLastUseTime()
- // Read or query partition.
- if p.rreq != nil {
- rpc = func(ctx context.Context, resumeToken []byte) (streamingReceiver, error) {
- client, err := client.StreamingRead(ctx, &sppb.ReadRequest{
- Session: p.rreq.Session,
- Transaction: p.rreq.Transaction,
- Table: p.rreq.Table,
- Index: p.rreq.Index,
- Columns: p.rreq.Columns,
- KeySet: p.rreq.KeySet,
- PartitionToken: p.pt,
- RequestOptions: p.rreq.RequestOptions,
- ResumeToken: resumeToken,
- DataBoostEnabled: p.rreq.DataBoostEnabled,
- DirectedReadOptions: p.rreq.DirectedReadOptions,
- })
- if err != nil {
- return client, err
- }
- md, err := client.Header()
- if getGFELatencyMetricsFlag() && md != nil && t.ct != nil {
- if err := createContextAndCaptureGFELatencyMetrics(ctx, t.ct, md, "Execute"); err != nil {
- trace.TracePrintf(ctx, nil, "Error in recording GFE Latency. Try disabling and rerunning. Error: %v", err)
- }
- }
- if metricErr := recordGFELatencyMetricsOT(ctx, md, "Execute", t.otConfig); metricErr != nil {
- trace.TracePrintf(ctx, nil, "Error in recording GFE Latency through OpenTelemetry. Error: %v", metricErr)
- }
- return client, err
- }
- } else {
- rpc = func(ctx context.Context, resumeToken []byte) (streamingReceiver, error) {
- client, err := client.ExecuteStreamingSql(ctx, &sppb.ExecuteSqlRequest{
- Session: p.qreq.Session,
- Transaction: p.qreq.Transaction,
- Sql: p.qreq.Sql,
- Params: p.qreq.Params,
- ParamTypes: p.qreq.ParamTypes,
- QueryOptions: p.qreq.QueryOptions,
- PartitionToken: p.pt,
- RequestOptions: p.qreq.RequestOptions,
- ResumeToken: resumeToken,
- DataBoostEnabled: p.qreq.DataBoostEnabled,
- DirectedReadOptions: p.qreq.DirectedReadOptions,
- })
- if err != nil {
- return client, err
- }
- md, err := client.Header()
-
- if getGFELatencyMetricsFlag() && md != nil && t.ct != nil {
- if err := createContextAndCaptureGFELatencyMetrics(ctx, t.ct, md, "Execute"); err != nil {
- trace.TracePrintf(ctx, nil, "Error in recording GFE Latency. Try disabling and rerunning. Error: %v", err)
- }
- }
- if metricErr := recordGFELatencyMetricsOT(ctx, md, "Execute", t.otConfig); metricErr != nil {
- trace.TracePrintf(ctx, nil, "Error in recording GFE Latency through OpenTelemetry. Error: %v", metricErr)
- }
- return client, err
- }
- }
- return stream(
- contextWithOutgoingMetadata(ctx, sh.getMetadata(), t.disableRouteToLeader),
- sh.session.logger,
- rpc,
- t.setTimestamp,
- t.release)
-}
-
-// MarshalBinary implements BinaryMarshaler.
-func (tid BatchReadOnlyTransactionID) MarshalBinary() (data []byte, err error) {
- var buf bytes.Buffer
- enc := gob.NewEncoder(&buf)
- if err := enc.Encode(tid.tid); err != nil {
- return nil, err
- }
- if err := enc.Encode(tid.sid); err != nil {
- return nil, err
- }
- if err := enc.Encode(tid.rts); err != nil {
- return nil, err
- }
- return buf.Bytes(), nil
-}
-
-// UnmarshalBinary implements BinaryUnmarshaler.
-func (tid *BatchReadOnlyTransactionID) UnmarshalBinary(data []byte) error {
- dec := gob.NewDecoder(bytes.NewReader(data))
- if err := dec.Decode(&tid.tid); err != nil {
- return err
- }
- if err := dec.Decode(&tid.sid); err != nil {
- return err
- }
- return dec.Decode(&tid.rts)
-}
-
-// MarshalBinary implements BinaryMarshaler.
-func (p Partition) MarshalBinary() (data []byte, err error) {
- var buf bytes.Buffer
- enc := gob.NewEncoder(&buf)
- if err := enc.Encode(p.pt); err != nil {
- return nil, err
- }
- var isReadPartition bool
- var req proto.Message
- if p.rreq != nil {
- isReadPartition = true
- req = p.rreq
- } else {
- isReadPartition = false
- req = p.qreq
- }
- if err := enc.Encode(isReadPartition); err != nil {
- return nil, err
- }
- if data, err = proto.Marshal(req); err != nil {
- return nil, err
- }
- if err := enc.Encode(data); err != nil {
- return nil, err
- }
- return buf.Bytes(), nil
-}
-
-// UnmarshalBinary implements BinaryUnmarshaler.
-func (p *Partition) UnmarshalBinary(data []byte) error {
- var (
- isReadPartition bool
- d []byte
- err error
- )
- dec := gob.NewDecoder(bytes.NewReader(data))
- if err := dec.Decode(&p.pt); err != nil {
- return err
- }
- if err := dec.Decode(&isReadPartition); err != nil {
- return err
- }
- if err := dec.Decode(&d); err != nil {
- return err
- }
- if isReadPartition {
- p.rreq = &sppb.ReadRequest{}
- err = proto.Unmarshal(d, p.rreq)
- } else {
- p.qreq = &sppb.ExecuteSqlRequest{}
- err = proto.Unmarshal(d, p.qreq)
- }
- return err
-}
-
-// GetPartitionToken returns partition token
-func (p *Partition) GetPartitionToken() []byte {
- if p != nil {
- return p.pt
- }
- return nil
-}
diff --git a/vendor/cloud.google.com/go/spanner/client.go b/vendor/cloud.google.com/go/spanner/client.go
deleted file mode 100644
index 5d3d078a5..000000000
--- a/vendor/cloud.google.com/go/spanner/client.go
+++ /dev/null
@@ -1,1188 +0,0 @@
-/*
-Copyright 2017 Google LLC
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-*/
-
-package spanner
-
-import (
- "context"
- "fmt"
- "io"
- "log"
- "os"
- "regexp"
- "strconv"
- "time"
-
- "cloud.google.com/go/internal/trace"
- sppb "cloud.google.com/go/spanner/apiv1/spannerpb"
- "github.com/GoogleCloudPlatform/grpc-gcp-go/grpcgcp"
- grpcgcppb "github.com/GoogleCloudPlatform/grpc-gcp-go/grpcgcp/grpc_gcp"
- "github.com/googleapis/gax-go/v2"
- "go.opentelemetry.io/otel/attribute"
- "go.opentelemetry.io/otel/metric"
- "google.golang.org/api/iterator"
- "google.golang.org/api/option"
- "google.golang.org/api/option/internaloption"
- gtransport "google.golang.org/api/transport/grpc"
- "google.golang.org/grpc"
- "google.golang.org/grpc/codes"
- "google.golang.org/grpc/credentials/insecure"
- "google.golang.org/grpc/encoding/gzip"
- "google.golang.org/grpc/metadata"
-
- vkit "cloud.google.com/go/spanner/apiv1"
- "cloud.google.com/go/spanner/internal"
-
- // Install google-c2p resolver, which is required for direct path.
- _ "google.golang.org/grpc/xds/googledirectpath"
- // Install RLS load balancer policy, which is needed for gRPC RLS.
- _ "google.golang.org/grpc/balancer/rls"
-)
-
-const (
- // resourcePrefixHeader is the name of the metadata header used to indicate
- // the resource being operated on.
- resourcePrefixHeader = "google-cloud-resource-prefix"
-
- // routeToLeaderHeader is the name of the metadata header if RW/PDML
- // requests need to route to leader.
- routeToLeaderHeader = "x-goog-spanner-route-to-leader"
-
- requestsCompressionHeader = "x-response-encoding"
-
- // numChannels is the default value for NumChannels of client.
- numChannels = 4
-)
-
-const (
- // Scope is the scope for Cloud Spanner Data API.
- Scope = "https://www.googleapis.com/auth/spanner.data"
-
- // AdminScope is the scope for Cloud Spanner Admin APIs.
- AdminScope = "https://www.googleapis.com/auth/spanner.admin"
-)
-
-var (
- validDBPattern = regexp.MustCompile("^projects/(?P<project>[^/]+)/instances/(?P<instance>[^/]+)/databases/(?P<database>[^/]+)$")
-)
-
-func validDatabaseName(db string) error {
- if matched := validDBPattern.MatchString(db); !matched {
- return fmt.Errorf("database name %q should conform to pattern %q",
- db, validDBPattern.String())
- }
- return nil
-}
-
-func parseDatabaseName(db string) (project, instance, database string, err error) {
- matches := validDBPattern.FindStringSubmatch(db)
- if len(matches) == 0 {
- return "", "", "", fmt.Errorf("Failed to parse database name from %q according to pattern %q",
- db, validDBPattern.String())
- }
- return matches[1], matches[2], matches[3], nil
-}
-
-// Client is a client for reading and writing data to a Cloud Spanner database.
-// A client is safe to use concurrently, except for its Close method.
-type Client struct {
- sc *sessionClient
- idleSessions *sessionPool
- logger *log.Logger
- qo QueryOptions
- ro ReadOptions
- ao []ApplyOption
- txo TransactionOptions
- bwo BatchWriteOptions
- ct *commonTags
- disableRouteToLeader bool
- dro *sppb.DirectedReadOptions
- otConfig *openTelemetryConfig
-}
-
-// DatabaseName returns the full name of a database, e.g.,
-// "projects/spanner-cloud-test/instances/foo/databases/foodb".
-func (c *Client) DatabaseName() string {
- return c.sc.database
-}
-
-// ClientID returns the id of the Client. This is not recommended for customer applications and used internally for testing.
-func (c *Client) ClientID() string {
- return c.sc.id
-}
-
-func createGCPMultiEndpoint(cfg *grpcgcp.GCPMultiEndpointOptions, config ClientConfig, opts ...option.ClientOption) (*grpcgcp.GCPMultiEndpoint, error) {
- if cfg.GRPCgcpConfig == nil {
- cfg.GRPCgcpConfig = &grpcgcppb.ApiConfig{}
- }
- if cfg.GRPCgcpConfig.Method == nil || len(cfg.GRPCgcpConfig.Method) == 0 {
- cfg.GRPCgcpConfig.Method = []*grpcgcppb.MethodConfig{
- {
- Name: []string{"/google.spanner.v1.Spanner/CreateSession"},
- Affinity: &grpcgcppb.AffinityConfig{
- Command: grpcgcppb.AffinityConfig_BIND,
- AffinityKey: "name",
- },
- },
- {
- Name: []string{"/google.spanner.v1.Spanner/BatchCreateSessions"},
- Affinity: &grpcgcppb.AffinityConfig{
- Command: grpcgcppb.AffinityConfig_BIND,
- AffinityKey: "session.name",
- },
- },
- {
- Name: []string{"/google.spanner.v1.Spanner/DeleteSession"},
- Affinity: &grpcgcppb.AffinityConfig{
- Command: grpcgcppb.AffinityConfig_UNBIND,
- AffinityKey: "name",
- },
- },
- {
- Name: []string{"/google.spanner.v1.Spanner/GetSession"},
- Affinity: &grpcgcppb.AffinityConfig{
- Command: grpcgcppb.AffinityConfig_BOUND,
- AffinityKey: "name",
- },
- },
- {
- Name: []string{
- "/google.spanner.v1.Spanner/BeginTransaction",
- "/google.spanner.v1.Spanner/Commit",
- "/google.spanner.v1.Spanner/ExecuteBatchDml",
- "/google.spanner.v1.Spanner/ExecuteSql",
- "/google.spanner.v1.Spanner/ExecuteStreamingSql",
- "/google.spanner.v1.Spanner/PartitionQuery",
- "/google.spanner.v1.Spanner/PartitionRead",
- "/google.spanner.v1.Spanner/Read",
- "/google.spanner.v1.Spanner/Rollback",
- "/google.spanner.v1.Spanner/StreamingRead",
- },
- Affinity: &grpcgcppb.AffinityConfig{
- Command: grpcgcppb.AffinityConfig_BOUND,
- AffinityKey: "session",
- },
- },
- }
- }
- // Append emulator options if SPANNER_EMULATOR_HOST has been set.
- if emulatorAddr := os.Getenv("SPANNER_EMULATOR_HOST"); emulatorAddr != "" {
- emulatorOpts := []option.ClientOption{
- option.WithEndpoint(emulatorAddr),
- option.WithGRPCDialOption(grpc.WithTransportCredentials(insecure.NewCredentials())),
- option.WithoutAuthentication(),
- internaloption.SkipDialSettingsValidation(),
- }
- opts = append(opts, emulatorOpts...)
- // Replace all endpoints with emulator target.
- for _, meo := range cfg.MultiEndpoints {
- meo.Endpoints = []string{emulatorAddr}
- }
- }
-
- // Set the number of channels to the default value if not specified.
- if cfg.GRPCgcpConfig.GetChannelPool() == nil || cfg.GRPCgcpConfig.GetChannelPool().GetMaxSize() == 0 {
- cfg.GRPCgcpConfig.ChannelPool = &grpcgcppb.ChannelPoolConfig{
- MinSize: numChannels,
- MaxSize: numChannels,
- }
- }
- // Set MinSize equal to MaxSize to create all the channels beforehand.
- cfg.GRPCgcpConfig.ChannelPool.MinSize = cfg.GRPCgcpConfig.ChannelPool.GetMaxSize()
-
- cfg.GRPCgcpConfig.ChannelPool.BindPickStrategy = grpcgcppb.ChannelPoolConfig_ROUND_ROBIN
-
- cfg.DialFunc = func(ctx context.Context, target string, dopts ...grpc.DialOption) (*grpc.ClientConn, error) {
- copts := opts
-
- for _, do := range dopts {
- copts = append(copts, option.WithGRPCDialOption(do))
- }
-
- allOpts := allClientOpts(1, config.Compression, copts...)
-
- // Overwrite endpoint and pool config.
- allOpts = append(allOpts,
- option.WithEndpoint(target),
- option.WithGRPCConnectionPool(1),
- option.WithGRPCConn(nil),
- )
-
- return gtransport.Dial(ctx, allOpts...)
- }
-
- gme, err := grpcgcp.NewGCPMultiEndpoint(cfg)
- return gme, err
-}
-
-// To use GCPMultiEndpoint in gtransport.Dial (via gtransport.WithConnPool option)
-// we implement gtransport.ConnPool interface using this wrapper.
-type gmeWrapper struct {
- *grpcgcp.GCPMultiEndpoint
-}
-
-// Make sure gmeWrapper implements ConnPool interface.
-var _ gtransport.ConnPool = (*gmeWrapper)(nil)
-
-func (gw *gmeWrapper) Conn() *grpc.ClientConn {
- // GCPMultiEndpoint does not expose any ClientConn.
- // This is safe because Cloud Spanner client doesn't use this function and instead
- // makes calls directly using Invoke and NewStream from the grpc.ClientConnInterface
- // which GCPMultiEndpoint implements.
- return nil
-}
-
-func (gw *gmeWrapper) Num() int {
- return int(gw.GCPMultiEndpoint.GCPConfig().GetChannelPool().GetMaxSize())
-}
-
-// ClientConfig has configurations for the client.
-type ClientConfig struct {
- // NumChannels is the number of gRPC channels.
- // If zero, a reasonable default is used based on the execution environment.
- //
- // Deprecated: The Spanner client now uses a pool of gRPC connections. Use
- // option.WithGRPCConnectionPool(numConns) instead to specify the number of
- // connections the client should use. The client will default to a
- // reasonable default if this option is not specified.
- NumChannels int
-
- // SessionPoolConfig is the configuration for session pool.
- SessionPoolConfig
-
- // SessionLabels for the sessions created by this client.
- // See https://cloud.google.com/spanner/docs/reference/rpc/google.spanner.v1#session
- // for more info.
- SessionLabels map[string]string
-
- // QueryOptions is the configuration for executing a sql query.
- QueryOptions QueryOptions
-
- // ReadOptions is the configuration for reading rows from a database
- ReadOptions ReadOptions
-
- // ApplyOptions is the configuration for applying
- ApplyOptions []ApplyOption
-
- // TransactionOptions is the configuration for a transaction.
- TransactionOptions TransactionOptions
-
- // BatchWriteOptions is the configuration for a BatchWrite request.
- BatchWriteOptions BatchWriteOptions
-
- // CallOptions is the configuration for providing custom retry settings that
- // override the default values.
- CallOptions *vkit.CallOptions
-
- // UserAgent is the prefix to the user agent header. This is used to supply information
- // such as application name or partner tool.
- //
- // Internal Use Only: This field is for internal tracking purpose only,
- // setting the value for this config is not required.
- //
- // Recommended format: ``application-or-tool-ID/major.minor.version``.
- UserAgent string
-
- // DatabaseRole specifies the role to be assumed for all operations on the
- // database by this client.
- DatabaseRole string
-
- // DisableRouteToLeader specifies if all the requests of type read-write and PDML
- // need to be routed to the leader region.
- //
- // Default: false
- DisableRouteToLeader bool
-
- // Logger is the logger to use for this client. If it is nil, all logging
- // will be directed to the standard logger.
- Logger *log.Logger
-
- //
- // Sets the compression to use for all gRPC calls. The compressor must be a valid name.
- // This will enable compression both from the client to the
- // server and from the server to the client.
- //
- // Supported values are:
- // gzip: Enable gzip compression
- // identity: Disable compression
- //
- // Default: identity
- Compression string
-
- // BatchTimeout specifies the timeout for a batch of sessions managed sessionClient.
- BatchTimeout time.Duration
-
- // ClientConfig options used to set the DirectedReadOptions for all ReadRequests
- // and ExecuteSqlRequests for the Client which indicate which replicas or regions
- // should be used for non-transactional reads or queries.
- DirectedReadOptions *sppb.DirectedReadOptions
-
- OpenTelemetryMeterProvider metric.MeterProvider
-}
-
-type openTelemetryConfig struct {
- meterProvider metric.MeterProvider
- attributeMap []attribute.KeyValue
- attributeMapWithMultiplexed []attribute.KeyValue
- attributeMapWithoutMultiplexed []attribute.KeyValue
- otMetricRegistration metric.Registration
- openSessionCount metric.Int64ObservableGauge
- maxAllowedSessionsCount metric.Int64ObservableGauge
- sessionsCount metric.Int64ObservableGauge
- maxInUseSessionsCount metric.Int64ObservableGauge
- getSessionTimeoutsCount metric.Int64Counter
- acquiredSessionsCount metric.Int64Counter
- releasedSessionsCount metric.Int64Counter
- gfeLatency metric.Int64Histogram
- gfeHeaderMissingCount metric.Int64Counter
-}
-
-func contextWithOutgoingMetadata(ctx context.Context, md metadata.MD, disableRouteToLeader bool) context.Context {
- existing, ok := metadata.FromOutgoingContext(ctx)
- if ok {
- md = metadata.Join(existing, md)
- }
- if !disableRouteToLeader {
- md = metadata.Join(md, metadata.Pairs(routeToLeaderHeader, "true"))
- }
- return metadata.NewOutgoingContext(ctx, md)
-}
-
-// NewClient creates a client to a database. A valid database name has the
-// form projects/PROJECT_ID/instances/INSTANCE_ID/databases/DATABASE_ID. It uses
-// a default configuration.
-func NewClient(ctx context.Context, database string, opts ...option.ClientOption) (*Client, error) {
- return NewClientWithConfig(ctx, database, ClientConfig{SessionPoolConfig: DefaultSessionPoolConfig, DisableRouteToLeader: false}, opts...)
-}
-
-// NewClientWithConfig creates a client to a database. A valid database name has
-// the form projects/PROJECT_ID/instances/INSTANCE_ID/databases/DATABASE_ID.
-func NewClientWithConfig(ctx context.Context, database string, config ClientConfig, opts ...option.ClientOption) (c *Client, err error) {
- return newClientWithConfig(ctx, database, config, nil, opts...)
-}
-
-func newClientWithConfig(ctx context.Context, database string, config ClientConfig, gme *grpcgcp.GCPMultiEndpoint, opts ...option.ClientOption) (c *Client, err error) {
- // Validate database path.
- if err := validDatabaseName(database); err != nil {
- return nil, err
- }
-
- ctx = trace.StartSpan(ctx, "cloud.google.com/go/spanner.NewClient")
- defer func() { trace.EndSpan(ctx, err) }()
-
- // Append emulator options if SPANNER_EMULATOR_HOST has been set.
- if emulatorAddr := os.Getenv("SPANNER_EMULATOR_HOST"); emulatorAddr != "" {
- emulatorOpts := []option.ClientOption{
- option.WithEndpoint(emulatorAddr),
- option.WithGRPCDialOption(grpc.WithTransportCredentials(insecure.NewCredentials())),
- option.WithoutAuthentication(),
- internaloption.SkipDialSettingsValidation(),
- }
- opts = append(emulatorOpts, opts...)
- }
-
- // Prepare gRPC channels.
- hasNumChannelsConfig := config.NumChannels > 0
- if config.NumChannels == 0 {
- config.NumChannels = numChannels
- }
-
- var pool gtransport.ConnPool
-
- if gme != nil {
- // Use GCPMultiEndpoint if provided.
- pool = &gmeWrapper{gme}
- } else {
- // Create gtransport ConnPool as usual if MultiEndpoint is not used.
- // gRPC options.
- allOpts := allClientOpts(config.NumChannels, config.Compression, opts...)
- pool, err = gtransport.DialPool(ctx, allOpts...)
- if err != nil {
- return nil, err
- }
-
- if hasNumChannelsConfig && pool.Num() != config.NumChannels {
- pool.Close()
- return nil, spannerErrorf(codes.InvalidArgument, "Connection pool mismatch: NumChannels=%v, WithGRPCConnectionPool=%v. Only set one of these options, or set both to the same value.", config.NumChannels, pool.Num())
- }
- }
-
- // TODO(loite): Remove as the original map cannot be changed by the user
- // anyways, and the client library is also not changing it.
- // Make a copy of labels.
- sessionLabels := make(map[string]string)
- for k, v := range config.SessionLabels {
- sessionLabels[k] = v
- }
-
- // Default configs for session pool.
- if config.MaxOpened == 0 {
- config.MaxOpened = uint64(pool.Num() * 100)
- }
- if config.MaxBurst == 0 {
- config.MaxBurst = DefaultSessionPoolConfig.MaxBurst
- }
- if config.incStep == 0 {
- config.incStep = DefaultSessionPoolConfig.incStep
- }
- if config.BatchTimeout == 0 {
- config.BatchTimeout = time.Minute
- }
-
- md := metadata.Pairs(resourcePrefixHeader, database)
- if config.Compression == gzip.Name {
- md.Append(requestsCompressionHeader, gzip.Name)
- }
-
- // Create a session client.
- sc := newSessionClient(pool, database, config.UserAgent, sessionLabels, config.DatabaseRole, config.DisableRouteToLeader, md, config.BatchTimeout, config.Logger, config.CallOptions)
-
- // Create a OpenTelemetry configuration
- otConfig, err := createOpenTelemetryConfig(config.OpenTelemetryMeterProvider, config.Logger, sc.id, database)
- if err != nil {
- // The error returned here will be due to database name parsing
- return nil, err
- }
- // To prevent data race in unit tests (ex: TestClient_SessionNotFound)
- sc.mu.Lock()
- sc.otConfig = otConfig
- sc.mu.Unlock()
-
- // Create a session pool.
- config.SessionPoolConfig.sessionLabels = sessionLabels
- sp, err := newSessionPool(sc, config.SessionPoolConfig)
- if err != nil {
- sc.close()
- return nil, err
- }
-
- c = &Client{
- sc: sc,
- idleSessions: sp,
- logger: config.Logger,
- qo: getQueryOptions(config.QueryOptions),
- ro: config.ReadOptions,
- ao: config.ApplyOptions,
- txo: config.TransactionOptions,
- bwo: config.BatchWriteOptions,
- ct: getCommonTags(sc),
- disableRouteToLeader: config.DisableRouteToLeader,
- dro: config.DirectedReadOptions,
- otConfig: otConfig,
- }
- return c, nil
-}
-
-// NewMultiEndpointClient is the same as NewMultiEndpointClientWithConfig with
-// the default client configuration.
-//
-// A valid database name has the
-// form projects/PROJECT_ID/instances/INSTANCE_ID/databases/DATABASE_ID.
-func NewMultiEndpointClient(ctx context.Context, database string, gmeCfg *grpcgcp.GCPMultiEndpointOptions, opts ...option.ClientOption) (*Client, *grpcgcp.GCPMultiEndpoint, error) {
- return NewMultiEndpointClientWithConfig(ctx, database, ClientConfig{SessionPoolConfig: DefaultSessionPoolConfig, DisableRouteToLeader: false}, gmeCfg, opts...)
-}
-
-// NewMultiEndpointClientWithConfig creates a client to a database using GCPMultiEndpoint.
-//
-// The purposes of GCPMultiEndpoint are:
-//
-// - Fallback to an alternative endpoint (host:port) when the original
-// endpoint is completely unavailable.
-// - Be able to route a Cloud Spanner call to a specific group of endpoints.
-// - Be able to reconfigure endpoints in runtime.
-//
-// The GRPCgcpConfig and DialFunc in the GCPMultiEndpointOptions are optional
-// and will be configured automatically.
-//
-// For GCPMultiEndpoint the number of channels is configured via MaxSize of the
-// ChannelPool config in the GRPCgcpConfig.
-//
-// The GCPMultiEndpoint returned can be used to update the endpoints in runtime.
-//
-// A valid database name has the
-// form projects/PROJECT_ID/instances/INSTANCE_ID/databases/DATABASE_ID.
-func NewMultiEndpointClientWithConfig(ctx context.Context, database string, config ClientConfig, gmeCfg *grpcgcp.GCPMultiEndpointOptions, opts ...option.ClientOption) (c *Client, gme *grpcgcp.GCPMultiEndpoint, err error) {
- gme, err = createGCPMultiEndpoint(gmeCfg, config, opts...)
- if err != nil {
- return nil, nil, err
- }
- // Align number of channels.
- config.NumChannels = int(gme.GCPConfig().GetChannelPool().GetMaxSize())
- c, err = newClientWithConfig(ctx, database, config, gme, opts...)
- if err != nil {
- return nil, nil, err
- }
- return
-}
-
-// Combines the default options from the generated client, the default options
-// of the hand-written client and the user options to one list of options.
-// Precedence: userOpts > clientDefaultOpts > generatedDefaultOpts
-func allClientOpts(numChannels int, compression string, userOpts ...option.ClientOption) []option.ClientOption {
- generatedDefaultOpts := vkit.DefaultClientOptions()
- clientDefaultOpts := []option.ClientOption{
- option.WithGRPCConnectionPool(numChannels),
- option.WithUserAgent(fmt.Sprintf("spanner-go/v%s", internal.Version)),
- internaloption.AllowNonDefaultServiceAccount(true),
- }
- if enableDirectPathXds, _ := strconv.ParseBool(os.Getenv("GOOGLE_SPANNER_ENABLE_DIRECT_ACCESS")); enableDirectPathXds {
- clientDefaultOpts = append(clientDefaultOpts, internaloption.EnableDirectPath(true), internaloption.EnableDirectPathXds())
- }
- if compression == "gzip" {
- userOpts = append(userOpts, option.WithGRPCDialOption(grpc.WithDefaultCallOptions(
- grpc.UseCompressor(gzip.Name))))
- }
- allDefaultOpts := append(generatedDefaultOpts, clientDefaultOpts...)
- return append(allDefaultOpts, userOpts...)
-}
-
-// getQueryOptions returns the query options overwritten by the environment
-// variables if exist. The input parameter is the query options set by users
-// via application-level configuration. If the environment variables are set,
-// this will return the overwritten query options.
-func getQueryOptions(opts QueryOptions) QueryOptions {
- if opts.Options == nil {
- opts.Options = &sppb.ExecuteSqlRequest_QueryOptions{}
- }
- opv := os.Getenv("SPANNER_OPTIMIZER_VERSION")
- if opv != "" {
- opts.Options.OptimizerVersion = opv
- }
- opsp := os.Getenv("SPANNER_OPTIMIZER_STATISTICS_PACKAGE")
- if opsp != "" {
- opts.Options.OptimizerStatisticsPackage = opsp
- }
- return opts
-}
-
-// Close closes the client.
-func (c *Client) Close() {
- if c.idleSessions != nil {
- ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second)
- defer cancel()
- c.idleSessions.close(ctx)
- }
- c.sc.close()
-}
-
-// Single provides a read-only snapshot transaction optimized for the case
-// where only a single read or query is needed. This is more efficient than
-// using ReadOnlyTransaction() for a single read or query.
-//
-// Single will use a strong TimestampBound by default. Use
-// ReadOnlyTransaction.WithTimestampBound to specify a different
-// TimestampBound. A non-strong bound can be used to reduce latency, or
-// "time-travel" to prior versions of the database, see the documentation of
-// TimestampBound for details.
-func (c *Client) Single() *ReadOnlyTransaction {
- t := &ReadOnlyTransaction{singleUse: true}
- t.txReadOnly.sp = c.idleSessions
- t.txReadOnly.txReadEnv = t
- t.txReadOnly.qo = c.qo
- t.txReadOnly.ro = c.ro
- t.txReadOnly.disableRouteToLeader = true
- t.txReadOnly.replaceSessionFunc = func(ctx context.Context) error {
- if t.sh == nil {
- return spannerErrorf(codes.InvalidArgument, "missing session handle on transaction")
- }
- // Remove the session that returned 'Session not found' from the pool.
- t.sh.destroy()
- // Reset the transaction, acquire a new session and retry.
- t.state = txNew
- sh, _, err := t.acquire(ctx)
- if err != nil {
- return err
- }
- t.sh = sh
- return nil
- }
- t.txReadOnly.qo.DirectedReadOptions = c.dro
- t.txReadOnly.ro.DirectedReadOptions = c.dro
- t.txReadOnly.ro.LockHint = sppb.ReadRequest_LOCK_HINT_UNSPECIFIED
- t.ct = c.ct
- t.otConfig = c.otConfig
- return t
-}
-
-// ReadOnlyTransaction returns a ReadOnlyTransaction that can be used for
-// multiple reads from the database. You must call Close() when the
-// ReadOnlyTransaction is no longer needed to release resources on the server.
-//
-// ReadOnlyTransaction will use a strong TimestampBound by default. Use
-// ReadOnlyTransaction.WithTimestampBound to specify a different
-// TimestampBound. A non-strong bound can be used to reduce latency, or
-// "time-travel" to prior versions of the database, see the documentation of
-// TimestampBound for details.
-func (c *Client) ReadOnlyTransaction() *ReadOnlyTransaction {
- t := &ReadOnlyTransaction{
- singleUse: false,
- txReadyOrClosed: make(chan struct{}),
- }
- t.txReadOnly.sp = c.idleSessions
- t.txReadOnly.txReadEnv = t
- t.txReadOnly.qo = c.qo
- t.txReadOnly.ro = c.ro
- t.txReadOnly.disableRouteToLeader = true
- t.txReadOnly.qo.DirectedReadOptions = c.dro
- t.txReadOnly.ro.DirectedReadOptions = c.dro
- t.txReadOnly.ro.LockHint = sppb.ReadRequest_LOCK_HINT_UNSPECIFIED
- t.ct = c.ct
- t.otConfig = c.otConfig
- return t
-}
-
-// BatchReadOnlyTransaction returns a BatchReadOnlyTransaction that can be used
-// for partitioned reads or queries from a snapshot of the database. This is
-// useful in batch processing pipelines where one wants to divide the work of
-// reading from the database across multiple machines.
-//
-// Note: This transaction does not use the underlying session pool but creates a
-// new session each time, and the session is reused across clients.
-//
-// You should call Close() after the txn is no longer needed on local
-// client, and call Cleanup() when the txn is finished for all clients, to free
-// the session.
-func (c *Client) BatchReadOnlyTransaction(ctx context.Context, tb TimestampBound) (*BatchReadOnlyTransaction, error) {
- var (
- tx transactionID
- rts time.Time
- s *session
- sh *sessionHandle
- err error
- )
-
- // Create session.
- s, err = c.sc.createSession(ctx)
- if err != nil {
- return nil, err
- }
- sh = &sessionHandle{session: s}
- sh.updateLastUseTime()
-
- // Begin transaction.
- res, err := sh.getClient().BeginTransaction(contextWithOutgoingMetadata(ctx, sh.getMetadata(), true), &sppb.BeginTransactionRequest{
- Session: sh.getID(),
- Options: &sppb.TransactionOptions{
- Mode: &sppb.TransactionOptions_ReadOnly_{
- ReadOnly: buildTransactionOptionsReadOnly(tb, true),
- },
- },
- })
- if err != nil {
- return nil, ToSpannerError(err)
- }
- tx = res.Id
- if res.ReadTimestamp != nil {
- rts = time.Unix(res.ReadTimestamp.Seconds, int64(res.ReadTimestamp.Nanos))
- }
-
- t := &BatchReadOnlyTransaction{
- ReadOnlyTransaction: ReadOnlyTransaction{
- tx: tx,
- txReadyOrClosed: make(chan struct{}),
- state: txActive,
- rts: rts,
- isLongRunningTransaction: true,
- },
- ID: BatchReadOnlyTransactionID{
- tid: tx,
- sid: sh.getID(),
- rts: rts,
- },
- }
- t.txReadOnly.sh = sh
- t.txReadOnly.txReadEnv = t
- t.txReadOnly.qo = c.qo
- t.txReadOnly.ro = c.ro
- t.txReadOnly.disableRouteToLeader = true
- t.txReadOnly.qo.DirectedReadOptions = c.dro
- t.txReadOnly.ro.DirectedReadOptions = c.dro
- t.txReadOnly.ro.LockHint = sppb.ReadRequest_LOCK_HINT_UNSPECIFIED
- t.ct = c.ct
- t.otConfig = c.otConfig
- return t, nil
-}
-
-// BatchReadOnlyTransactionFromID reconstruct a BatchReadOnlyTransaction from
-// BatchReadOnlyTransactionID
-func (c *Client) BatchReadOnlyTransactionFromID(tid BatchReadOnlyTransactionID) *BatchReadOnlyTransaction {
- s, err := c.sc.sessionWithID(tid.sid)
- if err != nil {
- logf(c.logger, "unexpected error: %v\nThis is an indication of an internal error in the Spanner client library.", err)
- // Use an invalid session. Preferably, this method should just return
- // the error instead of this, but that would mean an API change.
- s = &session{}
- }
- sh := &sessionHandle{session: s}
-
- t := &BatchReadOnlyTransaction{
- ReadOnlyTransaction: ReadOnlyTransaction{
- tx: tid.tid,
- txReadyOrClosed: make(chan struct{}),
- state: txActive,
- rts: tid.rts,
- isLongRunningTransaction: true,
- },
- ID: tid,
- }
- t.txReadOnly.sh = sh
- t.txReadOnly.txReadEnv = t
- t.txReadOnly.qo = c.qo
- t.txReadOnly.ro = c.ro
- t.txReadOnly.disableRouteToLeader = true
- t.txReadOnly.qo.DirectedReadOptions = c.dro
- t.txReadOnly.ro.DirectedReadOptions = c.dro
- t.txReadOnly.ro.LockHint = sppb.ReadRequest_LOCK_HINT_UNSPECIFIED
- t.ct = c.ct
- t.otConfig = c.otConfig
- return t
-}
-
-type transactionInProgressKey struct{}
-
-func checkNestedTxn(ctx context.Context) error {
- if ctx.Value(transactionInProgressKey{}) != nil {
- return spannerErrorf(codes.FailedPrecondition, "Cloud Spanner does not support nested transactions")
- }
- return nil
-}
-
-// ReadWriteTransaction executes a read-write transaction, with retries as
-// necessary.
-//
-// The function f will be called one or more times. It must not maintain
-// any state between calls.
-//
-// If the transaction cannot be committed or if f returns an ABORTED error,
-// ReadWriteTransaction will call f again. It will continue to call f until the
-// transaction can be committed or the Context times out or is cancelled. If f
-// returns an error other than ABORTED, ReadWriteTransaction will abort the
-// transaction and return the error.
-//
-// To limit the number of retries, set a deadline on the Context rather than
-// using a fixed limit on the number of attempts. ReadWriteTransaction will
-// retry as needed until that deadline is met.
-//
-// See https://godoc.org/cloud.google.com/go/spanner#ReadWriteTransaction for
-// more details.
-func (c *Client) ReadWriteTransaction(ctx context.Context, f func(context.Context, *ReadWriteTransaction) error) (commitTimestamp time.Time, err error) {
- ctx = trace.StartSpan(ctx, "cloud.google.com/go/spanner.ReadWriteTransaction")
- defer func() { trace.EndSpan(ctx, err) }()
- resp, err := c.rwTransaction(ctx, f, TransactionOptions{})
- return resp.CommitTs, err
-}
-
-// ReadWriteTransactionWithOptions executes a read-write transaction with
-// configurable options, with retries as necessary.
-//
-// ReadWriteTransactionWithOptions is a configurable ReadWriteTransaction.
-//
-// See https://godoc.org/cloud.google.com/go/spanner#ReadWriteTransaction for
-// more details.
-func (c *Client) ReadWriteTransactionWithOptions(ctx context.Context, f func(context.Context, *ReadWriteTransaction) error, options TransactionOptions) (resp CommitResponse, err error) {
- ctx = trace.StartSpan(ctx, "cloud.google.com/go/spanner.ReadWriteTransactionWithOptions")
- defer func() { trace.EndSpan(ctx, err) }()
- resp, err = c.rwTransaction(ctx, f, options)
- return resp, err
-}
-
-func (c *Client) rwTransaction(ctx context.Context, f func(context.Context, *ReadWriteTransaction) error, options TransactionOptions) (resp CommitResponse, err error) {
- if err := checkNestedTxn(ctx); err != nil {
- return resp, err
- }
- var (
- sh *sessionHandle
- t *ReadWriteTransaction
- attempt = 0
- )
- defer func() {
- if sh != nil {
- sh.recycle()
- }
- }()
- err = runWithRetryOnAbortedOrFailedInlineBeginOrSessionNotFound(ctx, func(ctx context.Context) error {
- var (
- err error
- )
- if sh == nil || sh.getID() == "" || sh.getClient() == nil {
- // Session handle hasn't been allocated or has been destroyed.
- sh, err = c.idleSessions.take(ctx)
- if err != nil {
- // If session retrieval fails, just fail the transaction.
- return err
- }
-
- // Some operations (for ex BatchUpdate) can be long-running. For such operations set the isLongRunningTransaction flag to be true
- t.setSessionEligibilityForLongRunning(sh)
- }
- if t.shouldExplicitBegin(attempt) {
- // Make sure we set the current session handle before calling BeginTransaction.
- // Note that the t.begin(ctx) call could change the session that is being used by the transaction, as the
- // BeginTransaction RPC invocation will be retried on a new session if it returns SessionNotFound.
- t.txReadOnly.sh = sh
- if err = t.begin(ctx); err != nil {
- trace.TracePrintf(ctx, nil, "Error while BeginTransaction during retrying a ReadWrite transaction: %v", ToSpannerError(err))
- return ToSpannerError(err)
- }
- } else {
- t = &ReadWriteTransaction{
- txReadyOrClosed: make(chan struct{}),
- }
- t.txReadOnly.sh = sh
- }
- attempt++
- t.txReadOnly.sp = c.idleSessions
- t.txReadOnly.txReadEnv = t
- t.txReadOnly.qo = c.qo
- t.txReadOnly.ro = c.ro
- t.txReadOnly.disableRouteToLeader = c.disableRouteToLeader
- t.wb = []*Mutation{}
- t.txOpts = c.txo.merge(options)
- t.ct = c.ct
- t.otConfig = c.otConfig
-
- trace.TracePrintf(ctx, map[string]interface{}{"transactionSelector": t.getTransactionSelector().String()},
- "Starting transaction attempt")
-
- resp, err = t.runInTransaction(ctx, f)
- return err
- })
- return resp, err
-}
-
-// applyOption controls the behavior of Client.Apply.
-type applyOption struct {
- // If atLeastOnce == true, Client.Apply will execute the mutations on Cloud
- // Spanner at least once.
- atLeastOnce bool
- // transactionTag will be included with the CommitRequest.
- transactionTag string
- // priority is the RPC priority that is used for the commit operation.
- priority sppb.RequestOptions_Priority
- // If excludeTxnFromChangeStreams == true, mutations from this Client.Apply
- // will not be recorded in allowed tracking change streams with DDL option
- // allow_txn_exclusion=true.
- excludeTxnFromChangeStreams bool
- // commitOptions is the commit options to use for the commit operation.
- commitOptions CommitOptions
-}
-
-// An ApplyOption is an optional argument to Apply.
-type ApplyOption func(*applyOption)
-
-// ApplyAtLeastOnce returns an ApplyOption that removes replay protection.
-//
-// With this option, Apply may attempt to apply mutations more than once; if
-// the mutations are not idempotent, this may lead to a failure being reported
-// when the mutation was applied more than once. For example, an insert may
-// fail with ALREADY_EXISTS even though the row did not exist before Apply was
-// called. For this reason, most users of the library will prefer not to use
-// this option. However, ApplyAtLeastOnce requires only a single RPC, whereas
-// Apply's default replay protection may require an additional RPC. So this
-// option may be appropriate for latency sensitive and/or high throughput blind
-// writing.
-func ApplyAtLeastOnce() ApplyOption {
- return func(ao *applyOption) {
- ao.atLeastOnce = true
- }
-}
-
-// TransactionTag returns an ApplyOption that will include the given tag as a
-// transaction tag for a write-only transaction.
-func TransactionTag(tag string) ApplyOption {
- return func(ao *applyOption) {
- ao.transactionTag = tag
- }
-}
-
-// Priority returns an ApplyOptions that sets the RPC priority to use for the
-// commit operation.
-func Priority(priority sppb.RequestOptions_Priority) ApplyOption {
- return func(ao *applyOption) {
- ao.priority = priority
- }
-}
-
-// ExcludeTxnFromChangeStreams returns an ApplyOptions that sets whether to exclude recording this commit operation from allowed tracking change streams.
-func ExcludeTxnFromChangeStreams() ApplyOption {
- return func(ao *applyOption) {
- ao.excludeTxnFromChangeStreams = true
- }
-}
-
-// ApplyCommitOptions returns an ApplyOption that sets the commit options to use for the commit operation.
-func ApplyCommitOptions(co CommitOptions) ApplyOption {
- return func(ao *applyOption) {
- ao.commitOptions = co
- }
-}
-
-// Apply applies a list of mutations atomically to the database.
-func (c *Client) Apply(ctx context.Context, ms []*Mutation, opts ...ApplyOption) (commitTimestamp time.Time, err error) {
- ao := &applyOption{}
-
- for _, opt := range c.ao {
- opt(ao)
- }
-
- for _, opt := range opts {
- opt(ao)
- }
-
- ctx = trace.StartSpan(ctx, "cloud.google.com/go/spanner.Apply")
- defer func() { trace.EndSpan(ctx, err) }()
-
- if !ao.atLeastOnce {
- resp, err := c.ReadWriteTransactionWithOptions(ctx, func(ctx context.Context, t *ReadWriteTransaction) error {
- return t.BufferWrite(ms)
- }, TransactionOptions{CommitPriority: ao.priority, TransactionTag: ao.transactionTag, ExcludeTxnFromChangeStreams: ao.excludeTxnFromChangeStreams, CommitOptions: ao.commitOptions})
- return resp.CommitTs, err
- }
- t := &writeOnlyTransaction{sp: c.idleSessions, commitPriority: ao.priority, transactionTag: ao.transactionTag, disableRouteToLeader: c.disableRouteToLeader, excludeTxnFromChangeStreams: ao.excludeTxnFromChangeStreams, commitOptions: ao.commitOptions}
- return t.applyAtLeastOnce(ctx, ms...)
-}
-
-// BatchWriteOptions provides options for a BatchWriteRequest.
-type BatchWriteOptions struct {
- // Priority is the RPC priority to use for this request.
- Priority sppb.RequestOptions_Priority
-
- // The transaction tag to use for this request.
- TransactionTag string
-
- // If excludeTxnFromChangeStreams == true, modifications from all transactions
- // in this batch write request will not be recorded in allowed tracking
- // change treams with DDL option allow_txn_exclusion=true.
- ExcludeTxnFromChangeStreams bool
-}
-
-// merge combines two BatchWriteOptions such that the input parameter will have higher
-// order of precedence.
-func (bwo BatchWriteOptions) merge(opts BatchWriteOptions) BatchWriteOptions {
- merged := BatchWriteOptions{
- TransactionTag: bwo.TransactionTag,
- Priority: bwo.Priority,
- ExcludeTxnFromChangeStreams: bwo.ExcludeTxnFromChangeStreams || opts.ExcludeTxnFromChangeStreams,
- }
- if opts.TransactionTag != "" {
- merged.TransactionTag = opts.TransactionTag
- }
- if opts.Priority != sppb.RequestOptions_PRIORITY_UNSPECIFIED {
- merged.Priority = opts.Priority
- }
- return merged
-}
-
-// BatchWriteResponseIterator is an iterator over BatchWriteResponse structures returned from BatchWrite RPC.
-type BatchWriteResponseIterator struct {
- ctx context.Context
- stream sppb.Spanner_BatchWriteClient
- err error
- dataReceived bool
- replaceSession func(ctx context.Context) error
- rpc func(ctx context.Context) (sppb.Spanner_BatchWriteClient, error)
- release func(error)
- cancel func()
-}
-
-// Next returns the next result. Its second return value is iterator.Done if
-// there are no more results. Once Next returns Done, all subsequent calls
-// will return Done.
-func (r *BatchWriteResponseIterator) Next() (*sppb.BatchWriteResponse, error) {
- for {
- // Stream finished or in error state.
- if r.err != nil {
- return nil, r.err
- }
-
- // RPC not made yet.
- if r.stream == nil {
- r.stream, r.err = r.rpc(r.ctx)
- continue
- }
-
- // Read from the stream.
- var response *sppb.BatchWriteResponse
- response, r.err = r.stream.Recv()
-
- // Return an item.
- if r.err == nil {
- r.dataReceived = true
- return response, nil
- }
-
- // Stream finished.
- if r.err == io.EOF {
- r.err = iterator.Done
- return nil, r.err
- }
-
- // Retry request on session not found error only if no data has been received before.
- if !r.dataReceived && r.replaceSession != nil && isSessionNotFoundError(r.err) {
- r.err = r.replaceSession(r.ctx)
- r.stream = nil
- }
- }
-}
-
-// Stop terminates the iteration. It should be called after you finish using the
-// iterator.
-func (r *BatchWriteResponseIterator) Stop() {
- if r.stream != nil {
- err := r.err
- if err == iterator.Done {
- err = nil
- }
- defer trace.EndSpan(r.ctx, err)
- }
- if r.cancel != nil {
- r.cancel()
- r.cancel = nil
- }
- if r.release != nil {
- r.release(r.err)
- r.release = nil
- }
- if r.err == nil {
- r.err = spannerErrorf(codes.FailedPrecondition, "Next called after Stop")
- }
-}
-
-// Do calls the provided function once in sequence for each item in the
-// iteration. If the function returns a non-nil error, Do immediately returns
-// that error.
-//
-// If there are no items in the iterator, Do will return nil without calling the
-// provided function.
-//
-// Do always calls Stop on the iterator.
-func (r *BatchWriteResponseIterator) Do(f func(r *sppb.BatchWriteResponse) error) error {
- defer r.Stop()
- for {
- row, err := r.Next()
- switch err {
- case iterator.Done:
- return nil
- case nil:
- if err = f(row); err != nil {
- return err
- }
- default:
- return err
- }
- }
-}
-
-// BatchWrite applies a list of mutation groups in a collection of efficient
-// transactions. The mutation groups are applied non-atomically in an
-// unspecified order and thus, they must be independent of each other. Partial
-// failure is possible, i.e., some mutation groups may have been applied
-// successfully, while some may have failed. The results of individual batches
-// are streamed into the response as the batches are applied.
-//
-// BatchWrite requests are not replay protected, meaning that each mutation
-// group may be applied more than once. Replays of non-idempotent mutations
-// may have undesirable effects. For example, replays of an insert mutation
-// may produce an already exists error or if you use generated or commit
-// timestamp-based keys, it may result in additional rows being added to the
-// mutation's table. We recommend structuring your mutation groups to be
-// idempotent to avoid this issue.
-func (c *Client) BatchWrite(ctx context.Context, mgs []*MutationGroup) *BatchWriteResponseIterator {
- return c.BatchWriteWithOptions(ctx, mgs, BatchWriteOptions{})
-}
-
-// BatchWriteWithOptions is same as BatchWrite. It accepts additional options to customize the request.
-func (c *Client) BatchWriteWithOptions(ctx context.Context, mgs []*MutationGroup, opts BatchWriteOptions) *BatchWriteResponseIterator {
- ctx = trace.StartSpan(ctx, "cloud.google.com/go/spanner.BatchWrite")
-
- var err error
- defer func() {
- trace.EndSpan(ctx, err)
- }()
-
- opts = c.bwo.merge(opts)
-
- mgsPb, err := mutationGroupsProto(mgs)
- if err != nil {
- return &BatchWriteResponseIterator{err: err}
- }
-
- var sh *sessionHandle
- sh, err = c.idleSessions.take(ctx)
- if err != nil {
- return &BatchWriteResponseIterator{err: err}
- }
-
- rpc := func(ct context.Context) (sppb.Spanner_BatchWriteClient, error) {
- var md metadata.MD
- sh.updateLastUseTime()
- stream, rpcErr := sh.getClient().BatchWrite(contextWithOutgoingMetadata(ct, sh.getMetadata(), c.disableRouteToLeader), &sppb.BatchWriteRequest{
- Session: sh.getID(),
- MutationGroups: mgsPb,
- RequestOptions: createRequestOptions(opts.Priority, "", opts.TransactionTag),
- ExcludeTxnFromChangeStreams: opts.ExcludeTxnFromChangeStreams,
- }, gax.WithGRPCOptions(grpc.Header(&md)))
-
- if getGFELatencyMetricsFlag() && md != nil && c.ct != nil {
- if metricErr := createContextAndCaptureGFELatencyMetrics(ct, c.ct, md, "BatchWrite"); metricErr != nil {
- trace.TracePrintf(ct, nil, "Error in recording GFE Latency. Try disabling and rerunning. Error: %v", err)
- }
- }
- if metricErr := recordGFELatencyMetricsOT(ct, md, "BatchWrite", c.otConfig); metricErr != nil {
- trace.TracePrintf(ct, nil, "Error in recording GFE Latency through OpenTelemetry. Error: %v", err)
- }
- return stream, rpcErr
- }
-
- replaceSession := func(ct context.Context) error {
- if sh != nil {
- sh.destroy()
- }
- var sessionErr error
- sh, sessionErr = c.idleSessions.take(ct)
- return sessionErr
- }
-
- release := func(err error) {
- if sh == nil {
- return
- }
- if isSessionNotFoundError(err) {
- sh.destroy()
- }
- sh.recycle()
- }
-
- ctx, cancel := context.WithCancel(ctx)
- ctx = trace.StartSpan(ctx, "cloud.google.com/go/spanner.BatchWriteResponseIterator")
- return &BatchWriteResponseIterator{
- ctx: ctx,
- rpc: rpc,
- replaceSession: replaceSession,
- release: release,
- cancel: cancel,
- }
-}
-
-// logf logs the given message to the given logger, or the standard logger if
-// the given logger is nil.
-func logf(logger *log.Logger, format string, v ...interface{}) {
- if logger == nil {
- log.Printf(format, v...)
- } else {
- logger.Printf(format, v...)
- }
-}
diff --git a/vendor/cloud.google.com/go/spanner/doc.go b/vendor/cloud.google.com/go/spanner/doc.go
deleted file mode 100644
index 8c30fdcc2..000000000
--- a/vendor/cloud.google.com/go/spanner/doc.go
+++ /dev/null
@@ -1,357 +0,0 @@
-/*
-Copyright 2017 Google LLC
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-*/
-
-/*
-Package spanner provides a client for reading and writing to Cloud Spanner
-databases. See the packages under admin for clients that operate on databases
-and instances.
-
-See https://cloud.google.com/spanner/docs/getting-started/go/ for an
-introduction to Cloud Spanner and additional help on using this API.
-
-See https://godoc.org/cloud.google.com/go for authentication, timeouts,
-connection pooling and similar aspects of this package.
-
-# Creating a Client
-
-To start working with this package, create a client that refers to the database
-of interest:
-
- ctx := context.Background()
- client, err := spanner.NewClient(ctx, "projects/P/instances/I/databases/D")
- if err != nil {
- // TODO: Handle error.
- }
- defer client.Close()
-
-Remember to close the client after use to free up the sessions in the session
-pool.
-
-To use an emulator with this library, you can set the SPANNER_EMULATOR_HOST
-environment variable to the address at which your emulator is running. This will
-send requests to that address instead of to Cloud Spanner. You can then create
-and use a client as usual:
-
- // Set SPANNER_EMULATOR_HOST environment variable.
- err := os.Setenv("SPANNER_EMULATOR_HOST", "localhost:9010")
- if err != nil {
- // TODO: Handle error.
- }
- // Create client as usual.
- client, err := spanner.NewClient(ctx, "projects/P/instances/I/databases/D")
- if err != nil {
- // TODO: Handle error.
- }
-
-# Simple Reads and Writes
-
-Two Client methods, Apply and Single, work well for simple reads and writes. As
-a quick introduction, here we write a new row to the database and read it back:
-
- _, err := client.Apply(ctx, []*spanner.Mutation{
- spanner.Insert("Users",
- []string{"name", "email"},
- []interface{}{"alice", "a@example.com"})})
- if err != nil {
- // TODO: Handle error.
- }
- row, err := client.Single().ReadRow(ctx, "Users",
- spanner.Key{"alice"}, []string{"email"})
- if err != nil {
- // TODO: Handle error.
- }
-
-All the methods used above are discussed in more detail below.
-
-# Keys
-
-Every Cloud Spanner row has a unique key, composed of one or more columns.
-Construct keys with a literal of type Key:
-
- key1 := spanner.Key{"alice"}
-
-# KeyRanges
-
-The keys of a Cloud Spanner table are ordered. You can specify ranges of keys
-using the KeyRange type:
-
- kr1 := spanner.KeyRange{Start: key1, End: key2}
-
-By default, a KeyRange includes its start key but not its end key. Use
-the Kind field to specify other boundary conditions:
-
- // include both keys
- kr2 := spanner.KeyRange{Start: key1, End: key2, Kind: spanner.ClosedClosed}
-
-# KeySets
-
-A KeySet represents a set of keys. A single Key or KeyRange can act as a KeySet.
-Use the KeySets function to build the union of several KeySets:
-
- ks1 := spanner.KeySets(key1, key2, kr1, kr2)
-
-AllKeys returns a KeySet that refers to all the keys in a table:
-
- ks2 := spanner.AllKeys()
-
-# Transactions
-
-All Cloud Spanner reads and writes occur inside transactions. There are two
-types of transactions, read-only and read-write. Read-only transactions cannot
-change the database, do not acquire locks, and may access either the current
-database state or states in the past. Read-write transactions can read the
-database before writing to it, and always apply to the most recent database
-state.
-
-# Single Reads
-
-The simplest and fastest transaction is a ReadOnlyTransaction that supports a
-single read operation. Use Client.Single to create such a transaction. You can
-chain the call to Single with a call to a Read method.
-
-When you only want one row whose key you know, use ReadRow. Provide the table
-name, key, and the columns you want to read:
-
- row, err := client.Single().ReadRow(ctx, "Accounts", spanner.Key{"alice"}, []string{"balance"})
-
-Read multiple rows with the Read method. It takes a table name, KeySet, and list
-of columns:
-
- iter := client.Single().Read(ctx, "Accounts", keyset1, columns)
-
-Read returns a RowIterator. You can call the Do method on the iterator and pass
-a callback:
-
- err := iter.Do(func(row *Row) error {
- // TODO: use row
- return nil
- })
-
-RowIterator also follows the standard pattern for the Google
-Cloud Client Libraries:
-
- defer iter.Stop()
- for {
- row, err := iter.Next()
- if err == iterator.Done {
- break
- }
- if err != nil {
- // TODO: Handle error.
- }
- // TODO: use row
- }
-
-Always call Stop when you finish using an iterator this way, whether or not you
-iterate to the end. (Failing to call Stop could lead you to exhaust the
-database's session quota.)
-
-To read rows with an index, use ReadUsingIndex.
-
-# Statements
-
-The most general form of reading uses SQL statements. Construct a Statement
-with NewStatement, setting any parameters using the Statement's Params map:
-
- stmt := spanner.NewStatement("SELECT First, Last FROM SINGERS WHERE Last >= @start")
- stmt.Params["start"] = "Dylan"
-
-You can also construct a Statement directly with a struct literal, providing
-your own map of parameters.
-
-Use the Query method to run the statement and obtain an iterator:
-
- iter := client.Single().Query(ctx, stmt)
-
-# Rows
-
-Once you have a Row, via an iterator or a call to ReadRow, you can extract
-column values in several ways. Pass in a pointer to a Go variable of the
-appropriate type when you extract a value.
-
-You can extract by column position or name:
-
- err := row.Column(0, &name)
- err = row.ColumnByName("balance", &balance)
-
-You can extract all the columns at once:
-
- err = row.Columns(&name, &balance)
-
-Or you can define a Go struct that corresponds to your columns, and extract
-into that:
-
- var s struct { Name string; Balance int64 }
- err = row.ToStruct(&s)
-
-For Cloud Spanner columns that may contain NULL, use one of the NullXXX types,
-like NullString:
-
- var ns spanner.NullString
- if err := row.Column(0, &ns); err != nil {
- // TODO: Handle error.
- }
- if ns.Valid {
- fmt.Println(ns.StringVal)
- } else {
- fmt.Println("column is NULL")
- }
-
-# Multiple Reads
-
-To perform more than one read in a transaction, use ReadOnlyTransaction:
-
- txn := client.ReadOnlyTransaction()
- defer txn.Close()
- iter := txn.Query(ctx, stmt1)
- // ...
- iter = txn.Query(ctx, stmt2)
- // ...
-
-You must call Close when you are done with the transaction.
-
-# Timestamps and Timestamp Bounds
-
-Cloud Spanner read-only transactions conceptually perform all their reads at a
-single moment in time, called the transaction's read timestamp. Once a read has
-started, you can call ReadOnlyTransaction's Timestamp method to obtain the read
-timestamp.
-
-By default, a transaction will pick the most recent time (a time where all
-previously committed transactions are visible) for its reads. This provides the
-freshest data, but may involve some delay. You can often get a quicker response
-if you are willing to tolerate "stale" data. You can control the read timestamp
-selected by a transaction by calling the WithTimestampBound method on the
-transaction before using it. For example, to perform a query on data that is at
-most one minute stale, use
-
- client.Single().
- WithTimestampBound(spanner.MaxStaleness(1*time.Minute)).
- Query(ctx, stmt)
-
-See the documentation of TimestampBound for more details.
-
-# Mutations
-
-To write values to a Cloud Spanner database, construct a Mutation. The spanner
-package has functions for inserting, updating and deleting rows. Except for the
-Delete methods, which take a Key or KeyRange, each mutation-building function
-comes in three varieties.
-
-One takes lists of columns and values along with the table name:
-
- m1 := spanner.Insert("Users",
- []string{"name", "email"},
- []interface{}{"alice", "a@example.com"})
-
-One takes a map from column names to values:
-
- m2 := spanner.InsertMap("Users", map[string]interface{}{
- "name": "alice",
- "email": "a@example.com",
- })
-
-And the third accepts a struct value, and determines the columns from the
-struct field names:
-
- type User struct { Name, Email string }
- u := User{Name: "alice", Email: "a@example.com"}
- m3, err := spanner.InsertStruct("Users", u)
-
-# Writes
-
-To apply a list of mutations to the database, use Apply:
-
- _, err := client.Apply(ctx, []*spanner.Mutation{m1, m2, m3})
-
-If you need to read before writing in a single transaction, use a
-ReadWriteTransaction. ReadWriteTransactions may be aborted automatically by the
-backend and need to be retried. You pass in a function to ReadWriteTransaction,
-and the client will handle the retries automatically. Use the transaction's
-BufferWrite method to buffer mutations, which will all be executed at the end
-of the transaction:
-
- _, err := client.ReadWriteTransaction(ctx, func(ctx context.Context, txn *spanner.ReadWriteTransaction) error {
- var balance int64
- row, err := txn.ReadRow(ctx, "Accounts", spanner.Key{"alice"}, []string{"balance"})
- if err != nil {
- // The transaction function will be called again if the error code
- // of this error is Aborted. The backend may automatically abort
- // any read/write transaction if it detects a deadlock or other
- // problems.
- return err
- }
- if err := row.Column(0, &balance); err != nil {
- return err
- }
-
- if balance <= 10 {
- return errors.New("insufficient funds in account")
- }
- balance -= 10
- m := spanner.Update("Accounts", []string{"user", "balance"}, []interface{}{"alice", balance})
- // The buffered mutation will be committed. If the commit
- // fails with an Aborted error, this function will be called
- // again.
- return txn.BufferWrite([]*spanner.Mutation{m})
- })
-
-# Structs
-
-Cloud Spanner STRUCT (aka STRUCT) values
-(https://cloud.google.com/spanner/docs/data-types#struct-type) can be
-represented by a Go struct value.
-
-A proto StructType is built from the field types and field tag information of
-the Go struct. If a field in the struct type definition has a
-"spanner:<field_name>" tag, then the value of the "spanner" key in the tag is
-used as the name for that field in the built StructType, otherwise the field
-name in the struct definition is used. To specify a field with an empty field
-name in a Cloud Spanner STRUCT type, use the `spanner:""` tag annotation against
-the corresponding field in the Go struct's type definition.
-
-A STRUCT value can contain STRUCT-typed and Array-of-STRUCT typed fields and
-these can be specified using named struct-typed and []struct-typed fields inside
-a Go struct. However, embedded struct fields are not allowed. Unexported struct
-fields are ignored.
-
-NULL STRUCT values in Cloud Spanner are typed. A nil pointer to a Go struct
-value can be used to specify a NULL STRUCT value of the corresponding
-StructType. Nil and empty slices of a Go STRUCT type can be used to specify
-NULL and empty array values respectively of the corresponding StructType. A
-slice of pointers to a Go struct type can be used to specify an array of
-NULL-able STRUCT values.
-
-# DML and Partitioned DML
-
-Spanner supports DML statements like INSERT, UPDATE and DELETE. Use
-ReadWriteTransaction.Update to run DML statements. It returns the number of rows
-affected. (You can call use ReadWriteTransaction.Query with a DML statement. The
-first call to Next on the resulting RowIterator will return iterator.Done, and
-the RowCount field of the iterator will hold the number of affected rows.)
-
-For large databases, it may be more efficient to partition the DML statement.
-Use client.PartitionedUpdate to run a DML statement in this way. Not all DML
-statements can be partitioned.
-
-# Tracing
-
-This client has been instrumented to use OpenCensus tracing
-(http://opencensus.io). To enable tracing, see "Enabling Tracing for a Program"
-at https://godoc.org/go.opencensus.io/trace. OpenCensus tracing requires Go 1.8
-or higher.
-*/
-package spanner // import "cloud.google.com/go/spanner"
diff --git a/vendor/cloud.google.com/go/spanner/emulator_test.sh b/vendor/cloud.google.com/go/spanner/emulator_test.sh
deleted file mode 100644
index 8fe69dba7..000000000
--- a/vendor/cloud.google.com/go/spanner/emulator_test.sh
+++ /dev/null
@@ -1,51 +0,0 @@
-#!/bin/bash
-# Copyright 2020 Google LLC
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License..
-
-# Fail on any error
-set -eo pipefail
-
-# Display commands being run
-set -x
-
-export SPANNER_EMULATOR_HOST=localhost:9010
-export GCLOUD_TESTS_GOLANG_PROJECT_ID=emulator-test-project
-echo "Running the Cloud Spanner emulator: $SPANNER_EMULATOR_HOST";
-
-# Download the emulator
-# TODO: Find a way to use 'latest' here.
-EMULATOR_VERSION=1.4.0
-wget https://storage.googleapis.com/cloud-spanner-emulator/releases/${EMULATOR_VERSION}/cloud-spanner-emulator_linux_amd64-${EMULATOR_VERSION}.tar.gz
-tar zxvf cloud-spanner-emulator_linux_amd64-${EMULATOR_VERSION}.tar.gz
-chmod u+x emulator_main
-
-# Start the emulator
-./emulator_main --host_port $SPANNER_EMULATOR_HOST &
-
-EMULATOR_PID=$!
-
-# Stop the emulator & clean the environment variable
-function cleanup() {
- kill -2 $EMULATOR_PID
- unset SPANNER_EMULATOR_HOST
- unset GCLOUD_TESTS_GOLANG_PROJECT_ID
- echo "Cleanup the emulator";
-}
-trap cleanup EXIT
-
-echo "Testing without GCPMultiEnpoint..." | tee -a sponge_log.log
-go test -count=1 -v -timeout 10m ./... -run '^TestIntegration_' 2>&1 | tee -a sponge_log.log
-
-echo "Testing with GCPMultiEnpoint..." | tee -a sponge_log.log
-GCLOUD_TESTS_GOLANG_USE_GRPC_GCP=true go test -count=1 -v -timeout 10m ./... -run '^TestIntegration_' 2>&1 | tee -a sponge_log.log
diff --git a/vendor/cloud.google.com/go/spanner/errors.go b/vendor/cloud.google.com/go/spanner/errors.go
deleted file mode 100644
index ddf506bd5..000000000
--- a/vendor/cloud.google.com/go/spanner/errors.go
+++ /dev/null
@@ -1,228 +0,0 @@
-/*
-Copyright 2017 Google LLC
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-*/
-
-package spanner
-
-import (
- "context"
- "errors"
- "fmt"
-
- "github.com/googleapis/gax-go/v2/apierror"
- "google.golang.org/genproto/googleapis/rpc/errdetails"
- "google.golang.org/grpc/codes"
- "google.golang.org/grpc/status"
-)
-
-var (
- // ErrRowNotFound row not found error
- ErrRowNotFound = errors.New("row not found")
-)
-
-// Error is the structured error returned by Cloud Spanner client.
-//
-// Deprecated: Unwrap any error that is returned by the Spanner client as an APIError
-// to access the error details. Do not try to convert the error to the
-// spanner.Error struct, as that struct may be removed in a future release.
-//
-// Example:
-// var apiErr *apierror.APIError
-// _, err := spanner.NewClient(context.Background())
-// errors.As(err, &apiErr)
-type Error struct {
- // Code is the canonical error code for describing the nature of a
- // particular error.
- //
- // Deprecated: The error code should be extracted from the wrapped error by
- // calling ErrCode(err error). This field will be removed in a future
- // release.
- Code codes.Code
- // err is the wrapped error that caused this Spanner error. The wrapped
- // error can be read with the Unwrap method.
- err error
- // Desc explains more details of the error.
- Desc string
- // additionalInformation optionally contains any additional information
- // about the error.
- additionalInformation string
-}
-
-// TransactionOutcomeUnknownError is wrapped in a Spanner error when the error
-// occurred during a transaction, and the outcome of the transaction is
-// unknown as a result of the error. This could be the case if a timeout or
-// canceled error occurs after a Commit request has been sent, but before the
-// client has received a response from the server.
-type TransactionOutcomeUnknownError struct {
- // err is the wrapped error that caused this TransactionOutcomeUnknownError
- // error. The wrapped error can be read with the Unwrap method.
- err error
-}
-
-const transactionOutcomeUnknownMsg = "transaction outcome unknown"
-
-// Error implements error.Error.
-func (*TransactionOutcomeUnknownError) Error() string { return transactionOutcomeUnknownMsg }
-
-// Unwrap returns the wrapped error (if any).
-func (e *TransactionOutcomeUnknownError) Unwrap() error { return e.err }
-
-// Error implements error.Error.
-func (e *Error) Error() string {
- if e == nil {
- return fmt.Sprintf("spanner: OK")
- }
- code := ErrCode(e)
- if e.additionalInformation == "" {
- return fmt.Sprintf("spanner: code = %q, desc = %q", code, e.Desc)
- }
- return fmt.Sprintf("spanner: code = %q, desc = %q, additional information = %s", code, e.Desc, e.additionalInformation)
-}
-
-// Unwrap returns the wrapped error (if any).
-func (e *Error) Unwrap() error {
- return e.err
-}
-
-// GRPCStatus returns the corresponding gRPC Status of this Spanner error.
-// This allows the error to be converted to a gRPC status using
-// `status.Convert(error)`.
-func (e *Error) GRPCStatus() *status.Status {
- err := unwrap(e)
- for {
- // If the base error is nil, return status created from e.Code and e.Desc.
- if err == nil {
- return status.New(e.Code, e.Desc)
- }
- code := status.Code(err)
- if code != codes.Unknown {
- return status.New(code, e.Desc)
- }
- err = unwrap(err)
- }
-}
-
-// decorate decorates an existing spanner.Error with more information.
-func (e *Error) decorate(info string) {
- e.Desc = fmt.Sprintf("%v, %v", info, e.Desc)
-}
-
-// spannerErrorf generates a *spanner.Error with the given description and an
-// APIError error having given error code as its status.
-func spannerErrorf(code codes.Code, format string, args ...interface{}) error {
- msg := fmt.Sprintf(format, args...)
- wrapped, _ := apierror.FromError(status.Error(code, msg))
- return &Error{
- Code: code,
- err: wrapped,
- Desc: msg,
- }
-}
-
-// ToSpannerError converts a general Go error to *spanner.Error. If the given
-// error is already a *spanner.Error, the original error will be returned.
-//
-// Spanner Errors are normally created by the Spanner client library from the
-// returned APIError of a RPC. This method can also be used to create Spanner
-// errors for use in tests. The recommended way to create test errors is
-// calling this method with a status error, e.g.
-// ToSpannerError(status.New(codes.NotFound, "Table not found").Err())
-func ToSpannerError(err error) error {
- return toSpannerErrorWithCommitInfo(err, false)
-}
-
-// toAPIError converts a general Go error to *gax-go.APIError. If the given
-// error is not convertible to *gax-go.APIError, the original error will be returned.
-func toAPIError(err error) error {
- if apiError, ok := apierror.FromError(err); ok {
- return apiError
- }
- return err
-}
-
-// toSpannerErrorWithCommitInfo converts general Go error to *spanner.Error
-// with additional information if the error occurred during a Commit request.
-//
-// If err is already a *spanner.Error, err is returned unmodified.
-func toSpannerErrorWithCommitInfo(err error, errorDuringCommit bool) error {
- if err == nil {
- return nil
- }
- var se *Error
- if errorAs(err, &se) {
- return se
- }
- switch {
- case err == context.DeadlineExceeded || err == context.Canceled:
- desc := err.Error()
- wrapped := status.FromContextError(err).Err()
- if errorDuringCommit {
- desc = fmt.Sprintf("%s, %s", desc, transactionOutcomeUnknownMsg)
- wrapped = &TransactionOutcomeUnknownError{err: wrapped}
- }
- return &Error{status.FromContextError(err).Code(), toAPIError(wrapped), desc, ""}
- case status.Code(err) == codes.Unknown:
- return &Error{codes.Unknown, toAPIError(err), err.Error(), ""}
- default:
- statusErr := status.Convert(err)
- code, desc := statusErr.Code(), statusErr.Message()
- wrapped := err
- if errorDuringCommit && (code == codes.DeadlineExceeded || code == codes.Canceled) {
- desc = fmt.Sprintf("%s, %s", desc, transactionOutcomeUnknownMsg)
- wrapped = &TransactionOutcomeUnknownError{err: wrapped}
- }
- return &Error{code, toAPIError(wrapped), desc, ""}
- }
-}
-
-// ErrCode extracts the canonical error code from a Go error.
-func ErrCode(err error) codes.Code {
- s, ok := status.FromError(err)
- if !ok {
- return codes.Unknown
- }
- return s.Code()
-}
-
-// ErrDesc extracts the Cloud Spanner error description from a Go error.
-func ErrDesc(err error) string {
- var se *Error
- if !errorAs(err, &se) {
- return err.Error()
- }
- return se.Desc
-}
-
-// extractResourceType extracts the resource type from any ResourceInfo detail
-// included in the error.
-func extractResourceType(err error) (string, bool) {
- var s *status.Status
- var se *Error
- if errorAs(err, &se) {
- // Unwrap statusError.
- s = status.Convert(se.Unwrap())
- } else {
- s = status.Convert(err)
- }
- if s == nil {
- return "", false
- }
- for _, detail := range s.Details() {
- if resourceInfo, ok := detail.(*errdetails.ResourceInfo); ok {
- return resourceInfo.ResourceType, true
- }
- }
- return "", false
-}
diff --git a/vendor/cloud.google.com/go/spanner/errors112.go b/vendor/cloud.google.com/go/spanner/errors112.go
deleted file mode 100644
index a64546968..000000000
--- a/vendor/cloud.google.com/go/spanner/errors112.go
+++ /dev/null
@@ -1,34 +0,0 @@
-// Copyright 2019 Google LLC
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-// TODO: Remove entire file when support for Go1.12 and lower has been dropped.
-//go:build !go1.13
-// +build !go1.13
-
-package spanner
-
-import "golang.org/x/xerrors"
-
-// unwrap is a generic implementation of (errors|xerrors).Unwrap(error). This
-// implementation uses xerrors and is included in Go 1.12 and earlier builds.
-func unwrap(err error) error {
- return xerrors.Unwrap(err)
-}
-
-// errorAs is a generic implementation of
-// (errors|xerrors).As(error, interface{}). This implementation uses xerrors
-// and is included in Go 1.12 and earlier builds.
-func errorAs(err error, target interface{}) bool {
- return xerrors.As(err, target)
-}
diff --git a/vendor/cloud.google.com/go/spanner/errors113.go b/vendor/cloud.google.com/go/spanner/errors113.go
deleted file mode 100644
index 6ae0b89e4..000000000
--- a/vendor/cloud.google.com/go/spanner/errors113.go
+++ /dev/null
@@ -1,34 +0,0 @@
-// Copyright 2019 Google LLC
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-// TODO: Remove entire file when support for Go1.12 and lower has been dropped.
-//go:build go1.13
-// +build go1.13
-
-package spanner
-
-import "errors"
-
-// unwrap is a generic implementation of (errors|xerrors).Unwrap(error). This
-// implementation uses errors and is included in Go 1.13 and later builds.
-func unwrap(err error) error {
- return errors.Unwrap(err)
-}
-
-// errorAs is a generic implementation of
-// (errors|xerrors).As(error, interface{}). This implementation uses errors and
-// is included in Go 1.13 and later builds.
-func errorAs(err error, target interface{}) bool {
- return errors.As(err, target)
-}
diff --git a/vendor/cloud.google.com/go/spanner/internal/version.go b/vendor/cloud.google.com/go/spanner/internal/version.go
deleted file mode 100644
index 1a70149b3..000000000
--- a/vendor/cloud.google.com/go/spanner/internal/version.go
+++ /dev/null
@@ -1,18 +0,0 @@
-// Copyright 2022 Google LLC
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package internal
-
-// Version is the current tagged release of the library.
-const Version = "1.67.0"
diff --git a/vendor/cloud.google.com/go/spanner/key.go b/vendor/cloud.google.com/go/spanner/key.go
deleted file mode 100644
index 26b1d9ba9..000000000
--- a/vendor/cloud.google.com/go/spanner/key.go
+++ /dev/null
@@ -1,435 +0,0 @@
-/*
-Copyright 2017 Google LLC
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-*/
-
-package spanner
-
-import (
- "bytes"
- "fmt"
- "math/big"
- "time"
-
- "cloud.google.com/go/civil"
- sppb "cloud.google.com/go/spanner/apiv1/spannerpb"
- "google.golang.org/grpc/codes"
- "google.golang.org/protobuf/reflect/protoreflect"
- proto3 "google.golang.org/protobuf/types/known/structpb"
-)
-
-// A Key can be either a Cloud Spanner row's primary key or a secondary index
-// key. It is essentially an interface{} array, which represents a set of Cloud
-// Spanner columns. A Key can be used as:
-//
-// - A primary key which uniquely identifies a Cloud Spanner row.
-// - A secondary index key which maps to a set of Cloud Spanner rows indexed under it.
-// - An endpoint of primary key/secondary index ranges; see the KeyRange type.
-//
-// Rows that are identified by the Key type are outputs of read operation or
-// targets of delete operation in a mutation. Note that for
-// Insert/Update/InsertOrUpdate/Update mutation types, although they don't
-// require a primary key explicitly, the column list provided must contain
-// enough columns that can comprise a primary key.
-//
-// Keys are easy to construct. For example, suppose you have a table with a
-// primary key of username and product ID. To make a key for this table:
-//
-// key := spanner.Key{"john", 16}
-//
-// See the description of Row and Mutation types for how Go types are mapped to
-// Cloud Spanner types. For convenience, Key type supports a wide range of Go
-// types:
-// - int, int8, int16, int32, int64, and NullInt64 are mapped to Cloud Spanner's INT64 type.
-// - uint8, uint16 and uint32 are also mapped to Cloud Spanner's INT64 type.
-// - float32, float64, NullFloat64 are mapped to Cloud Spanner's FLOAT64 type.
-// - bool and NullBool are mapped to Cloud Spanner's BOOL type.
-// - []byte is mapped to Cloud Spanner's BYTES type.
-// - string and NullString are mapped to Cloud Spanner's STRING type.
-// - time.Time and NullTime are mapped to Cloud Spanner's TIMESTAMP type.
-// - civil.Date and NullDate are mapped to Cloud Spanner's DATE type.
-// - protoreflect.Enum and NullProtoEnum are mapped to Cloud Spanner's ENUM type.
-type Key []interface{}
-
-// errInvdKeyPartType returns error for unsupported key part type.
-func errInvdKeyPartType(part interface{}) error {
- return spannerErrorf(codes.InvalidArgument, "key part has unsupported type %T", part)
-}
-
-// keyPartValue converts a part of the Key (which is a valid Cloud Spanner type)
-// into a proto3.Value. Used for encoding Key type into protobuf.
-func keyPartValue(part interface{}) (pb *proto3.Value, err error) {
- switch v := part.(type) {
- case int:
- pb, _, err = encodeValue(int64(v))
- case int8:
- pb, _, err = encodeValue(int64(v))
- case int16:
- pb, _, err = encodeValue(int64(v))
- case int32:
- pb, _, err = encodeValue(int64(v))
- case uint8:
- pb, _, err = encodeValue(int64(v))
- case uint16:
- pb, _, err = encodeValue(int64(v))
- case uint32:
- pb, _, err = encodeValue(int64(v))
- case int64, float64, float32, NullInt64, NullFloat64, NullFloat32, bool, NullBool, []byte, string, NullString, time.Time, civil.Date, NullTime, NullDate, big.Rat, NullNumeric, protoreflect.Enum, NullProtoEnum:
- pb, _, err = encodeValue(v)
- case Encoder:
- part, err = v.EncodeSpanner()
- if err != nil {
- return nil, err
- }
- pb, err = keyPartValue(part)
- default:
- return nil, errInvdKeyPartType(v)
- }
- return pb, err
-}
-
-// proto converts a spanner.Key into a proto3.ListValue.
-func (key Key) proto() (*proto3.ListValue, error) {
- lv := &proto3.ListValue{}
- lv.Values = make([]*proto3.Value, 0, len(key))
- for _, part := range key {
- v, err := keyPartValue(part)
- if err != nil {
- return nil, err
- }
- lv.Values = append(lv.Values, v)
- }
- return lv, nil
-}
-
-// keySetProto lets a single Key act as a KeySet.
-func (key Key) keySetProto() (*sppb.KeySet, error) {
- kp, err := key.proto()
- if err != nil {
- return nil, err
- }
- return &sppb.KeySet{Keys: []*proto3.ListValue{kp}}, nil
-}
-
-// String implements fmt.Stringer for Key. For string, []byte and NullString, it
-// prints the uninterpreted bytes of their contents, leaving caller with the
-// opportunity to escape the output.
-func (key Key) String() string {
- b := &bytes.Buffer{}
- fmt.Fprint(b, "(")
- for i, part := range []interface{}(key) {
- if i != 0 {
- fmt.Fprint(b, ",")
- }
- key.elemString(b, part)
- }
- fmt.Fprint(b, ")")
- return b.String()
-}
-
-func (key Key) elemString(b *bytes.Buffer, part interface{}) {
- switch v := part.(type) {
- case int, int8, int16, int32, int64, uint, uint8, uint16, uint32, float32, float64, bool, protoreflect.Enum:
- // Use %v to print numeric types and bool.
- fmt.Fprintf(b, "%v", v)
- case string:
- fmt.Fprintf(b, "%q", v)
- case []byte:
- if v != nil {
- fmt.Fprintf(b, "%q", v)
- } else {
- fmt.Fprint(b, nullString)
- }
- case NullInt64, NullFloat64, NullBool, NullNumeric, NullProtoEnum:
- // The above types implement fmt.Stringer.
- fmt.Fprintf(b, "%s", v)
- case NullString, NullDate, NullTime:
- // Quote the returned string if it is not null.
- if v.(NullableValue).IsNull() {
- fmt.Fprintf(b, "%s", nullString)
- } else {
- fmt.Fprintf(b, "%q", v)
- }
- case civil.Date:
- fmt.Fprintf(b, "%q", v)
- case time.Time:
- fmt.Fprintf(b, "%q", v.Format(time.RFC3339Nano))
- case big.Rat:
- fmt.Fprintf(b, "%v", NumericString(&v))
- case Encoder:
- var err error
- part, err = v.EncodeSpanner()
- if err != nil {
- fmt.Fprintf(b, "error")
- } else {
- key.elemString(b, part)
- }
- default:
- fmt.Fprintf(b, "%v", v)
- }
-}
-
-// AsPrefix returns a KeyRange for all keys where k is the prefix.
-func (key Key) AsPrefix() KeyRange {
- return KeyRange{
- Start: key,
- End: key,
- Kind: ClosedClosed,
- }
-}
-
-// KeyRangeKind describes the kind of interval represented by a KeyRange:
-// whether it is open or closed on the left and right.
-type KeyRangeKind int
-
-const (
- // ClosedOpen is closed on the left and open on the right: the Start
- // key is included, the End key is excluded.
- ClosedOpen KeyRangeKind = iota
-
- // ClosedClosed is closed on the left and the right: both keys are included.
- ClosedClosed
-
- // OpenClosed is open on the left and closed on the right: the Start
- // key is excluded, the End key is included.
- OpenClosed
-
- // OpenOpen is open on the left and the right: neither key is included.
- OpenOpen
-)
-
-// A KeyRange represents a range of rows in a table or index.
-//
-// A range has a Start key and an End key. IncludeStart and IncludeEnd
-// indicate whether the Start and End keys are included in the range.
-//
-// For example, consider the following table definition:
-//
-// CREATE TABLE UserEvents (
-// UserName STRING(MAX),
-// EventDate STRING(10),
-// ) PRIMARY KEY(UserName, EventDate);
-//
-// The following keys name rows in this table:
-//
-// spanner.Key{"Bob", "2014-09-23"}
-// spanner.Key{"Alfred", "2015-06-12"}
-//
-// Since the UserEvents table's PRIMARY KEY clause names two columns, each
-// UserEvents key has two elements; the first is the UserName, and the second
-// is the EventDate.
-//
-// Key ranges with multiple components are interpreted lexicographically by
-// component using the table or index key's declared sort order. For example,
-// the following range returns all events for user "Bob" that occurred in the
-// year 2015:
-//
-// spanner.KeyRange{
-// Start: spanner.Key{"Bob", "2015-01-01"},
-// End: spanner.Key{"Bob", "2015-12-31"},
-// Kind: ClosedClosed,
-// }
-//
-// Start and end keys can omit trailing key components. This affects the
-// inclusion and exclusion of rows that exactly match the provided key
-// components: if IncludeStart is true, then rows that exactly match the
-// provided components of the Start key are included; if IncludeStart is false
-// then rows that exactly match are not included. IncludeEnd and End key
-// behave in the same fashion.
-//
-// For example, the following range includes all events for "Bob" that occurred
-// during and after the year 2000:
-//
-// spanner.KeyRange{
-// Start: spanner.Key{"Bob", "2000-01-01"},
-// End: spanner.Key{"Bob"},
-// Kind: ClosedClosed,
-// }
-//
-// The next example retrieves all events for "Bob":
-//
-// spanner.Key{"Bob"}.AsPrefix()
-//
-// To retrieve events before the year 2000:
-//
-// spanner.KeyRange{
-// Start: spanner.Key{"Bob"},
-// End: spanner.Key{"Bob", "2000-01-01"},
-// Kind: ClosedOpen,
-// }
-//
-// Although we specified a Kind for this KeyRange, we didn't need to, because
-// the default is ClosedOpen. In later examples we'll omit Kind if it is
-// ClosedOpen.
-//
-// The following range includes all rows in a table or under a
-// index:
-//
-// spanner.AllKeys()
-//
-// This range returns all users whose UserName begins with any
-// character from A to C:
-//
-// spanner.KeyRange{
-// Start: spanner.Key{"A"},
-// End: spanner.Key{"D"},
-// }
-//
-// This range returns all users whose UserName begins with B:
-//
-// spanner.KeyRange{
-// Start: spanner.Key{"B"},
-// End: spanner.Key{"C"},
-// }
-//
-// Key ranges honor column sort order. For example, suppose a table is defined
-// as follows:
-//
-// CREATE TABLE DescendingSortedTable {
-// Key INT64,
-// ...
-// ) PRIMARY KEY(Key DESC);
-//
-// The following range retrieves all rows with key values between 1 and 100
-// inclusive:
-//
-// spanner.KeyRange{
-// Start: spanner.Key{100},
-// End: spanner.Key{1},
-// Kind: ClosedClosed,
-// }
-//
-// Note that 100 is passed as the start, and 1 is passed as the end, because
-// Key is a descending column in the schema.
-type KeyRange struct {
- // Start specifies the left boundary of the key range; End specifies
- // the right boundary of the key range.
- Start, End Key
-
- // Kind describes whether the boundaries of the key range include
- // their keys.
- Kind KeyRangeKind
-}
-
-// String implements fmt.Stringer for KeyRange type.
-func (r KeyRange) String() string {
- var left, right string
- switch r.Kind {
- case ClosedClosed:
- left, right = "[", "]"
- case ClosedOpen:
- left, right = "[", ")"
- case OpenClosed:
- left, right = "(", "]"
- case OpenOpen:
- left, right = "(", ")"
- default:
- left, right = "?", "?"
- }
- return fmt.Sprintf("%s%s,%s%s", left, r.Start, r.End, right)
-}
-
-// proto converts KeyRange into sppb.KeyRange.
-func (r KeyRange) proto() (*sppb.KeyRange, error) {
- var err error
- var start, end *proto3.ListValue
- pb := &sppb.KeyRange{}
- if start, err = r.Start.proto(); err != nil {
- return nil, err
- }
- if end, err = r.End.proto(); err != nil {
- return nil, err
- }
- if r.Kind == ClosedClosed || r.Kind == ClosedOpen {
- pb.StartKeyType = &sppb.KeyRange_StartClosed{StartClosed: start}
- } else {
- pb.StartKeyType = &sppb.KeyRange_StartOpen{StartOpen: start}
- }
- if r.Kind == ClosedClosed || r.Kind == OpenClosed {
- pb.EndKeyType = &sppb.KeyRange_EndClosed{EndClosed: end}
- } else {
- pb.EndKeyType = &sppb.KeyRange_EndOpen{EndOpen: end}
- }
- return pb, nil
-}
-
-// keySetProto lets a KeyRange act as a KeySet.
-func (r KeyRange) keySetProto() (*sppb.KeySet, error) {
- rp, err := r.proto()
- if err != nil {
- return nil, err
- }
- return &sppb.KeySet{Ranges: []*sppb.KeyRange{rp}}, nil
-}
-
-// A KeySet defines a collection of Cloud Spanner keys and/or key ranges. All
-// the keys are expected to be in the same table or index. The keys need not be
-// sorted in any particular way.
-//
-// An individual Key can act as a KeySet, as can a KeyRange. Use the KeySets
-// function to create a KeySet consisting of multiple Keys and KeyRanges. To
-// obtain an empty KeySet, call KeySets with no arguments.
-//
-// If the same key is specified multiple times in the set (for example if two
-// ranges, two keys, or a key and a range overlap), the Cloud Spanner backend
-// behaves as if the key were only specified once.
-type KeySet interface {
- keySetProto() (*sppb.KeySet, error)
-}
-
-// AllKeys returns a KeySet that represents all Keys of a table or a index.
-func AllKeys() KeySet {
- return all{}
-}
-
-type all struct{}
-
-func (all) keySetProto() (*sppb.KeySet, error) {
- return &sppb.KeySet{All: true}, nil
-}
-
-// KeySets returns the union of the KeySets. If any of the KeySets is AllKeys,
-// then the resulting KeySet will be equivalent to AllKeys.
-func KeySets(keySets ...KeySet) KeySet {
- u := make(union, len(keySets))
- copy(u, keySets)
- return u
-}
-
-// KeySetFromKeys returns a KeySet containing the given slice of keys.
-func KeySetFromKeys(keys ...Key) KeySet {
- u := make(union, len(keys))
- for i, k := range keys {
- u[i] = k
- }
- return u
-}
-
-type union []KeySet
-
-func (u union) keySetProto() (*sppb.KeySet, error) {
- upb := &sppb.KeySet{}
- for _, ks := range u {
- pb, err := ks.keySetProto()
- if err != nil {
- return nil, err
- }
- if pb.All {
- return pb, nil
- }
- upb.Keys = append(upb.Keys, pb.Keys...)
- upb.Ranges = append(upb.Ranges, pb.Ranges...)
- }
- return upb, nil
-}
diff --git a/vendor/cloud.google.com/go/spanner/mutation.go b/vendor/cloud.google.com/go/spanner/mutation.go
deleted file mode 100644
index b9909742d..000000000
--- a/vendor/cloud.google.com/go/spanner/mutation.go
+++ /dev/null
@@ -1,454 +0,0 @@
-/*
-Copyright 2017 Google LLC
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-*/
-
-package spanner
-
-import (
- "reflect"
-
- sppb "cloud.google.com/go/spanner/apiv1/spannerpb"
- "google.golang.org/grpc/codes"
- proto3 "google.golang.org/protobuf/types/known/structpb"
-)
-
-// op is the mutation operation.
-type op int
-
-const (
- // opDelete removes a row from a table. Succeeds whether or not the
- // key was present.
- opDelete op = iota
- // opInsert inserts a row into a table. If the row already exists, the
- // write or transaction fails.
- opInsert
- // opInsertOrUpdate inserts a row into a table. If the row already
- // exists, it updates it instead. Any column values not explicitly
- // written are preserved.
- opInsertOrUpdate
- // opReplace inserts a row into a table, deleting any existing row.
- // Unlike InsertOrUpdate, this means any values not explicitly written
- // become NULL.
- opReplace
- // opUpdate updates a row in a table. If the row does not already
- // exist, the write or transaction fails.
- opUpdate
-)
-
-// A Mutation describes a modification to one or more Cloud Spanner rows. The
-// mutation represents an insert, update, delete, etc on a table.
-//
-// Many mutations can be applied in a single atomic commit. For purposes of
-// constraint checking (such as foreign key constraints), the operations can be
-// viewed as applying in the same order as the mutations are provided (so that,
-// e.g., a row and its logical "child" can be inserted in the same commit).
-//
-// The Apply function applies series of mutations. For example,
-//
-// m := spanner.Insert("User",
-// []string{"user_id", "profile"},
-// []interface{}{UserID, profile})
-// _, err := client.Apply(ctx, []*spanner.Mutation{m})
-//
-// inserts a new row into the User table. The primary key
-// for the new row is UserID (presuming that "user_id" has been declared as the
-// primary key of the "User" table).
-//
-// To apply a series of mutations as part of an atomic read-modify-write
-// operation, use ReadWriteTransaction.
-//
-// # Updating a row
-//
-// Changing the values of columns in an existing row is very similar to
-// inserting a new row:
-//
-// m := spanner.Update("User",
-// []string{"user_id", "profile"},
-// []interface{}{UserID, profile})
-// _, err := client.Apply(ctx, []*spanner.Mutation{m})
-//
-// # Deleting a row
-//
-// To delete a row, use spanner.Delete:
-//
-// m := spanner.Delete("User", spanner.Key{UserId})
-// _, err := client.Apply(ctx, []*spanner.Mutation{m})
-//
-// spanner.Delete accepts a KeySet, so you can also pass in a KeyRange, or use
-// the spanner.KeySets function to build any combination of Keys and KeyRanges.
-//
-// Note that deleting a row in a table may also delete rows from other tables
-// if cascading deletes are specified in those tables' schemas. Delete does
-// nothing if the named row does not exist (does not yield an error).
-//
-// # Deleting a field
-//
-// To delete/clear a field within a row, use spanner.Update with the value nil:
-//
-// m := spanner.Update("User",
-// []string{"user_id", "profile"},
-// []interface{}{UserID, nil})
-// _, err := client.Apply(ctx, []*spanner.Mutation{m})
-//
-// The valid Go types and their corresponding Cloud Spanner types that can be
-// used in the Insert/Update/InsertOrUpdate functions are:
-//
-// string, *string, NullString - STRING
-// []string, []*string, []NullString - STRING ARRAY
-// []byte - BYTES
-// [][]byte - BYTES ARRAY
-// int, int64, *int64, NullInt64 - INT64
-// []int, []int64, []*int64, []NullInt64 - INT64 ARRAY
-// bool, *bool, NullBool - BOOL
-// []bool, []*bool, []NullBool - BOOL ARRAY
-// float64, *float64, NullFloat64 - FLOAT64
-// []float64, []*float64, []NullFloat64 - FLOAT64 ARRAY
-// time.Time, *time.Time, NullTime - TIMESTAMP
-// []time.Time, []*time.Time, []NullTime - TIMESTAMP ARRAY
-// Date, *Date, NullDate - DATE
-// []Date, []*Date, []NullDate - DATE ARRAY
-// big.Rat, *big.Rat, NullNumeric - NUMERIC
-// []big.Rat, []*big.Rat, []NullNumeric - NUMERIC ARRAY
-//
-// To compare two Mutations for testing purposes, use reflect.DeepEqual.
-type Mutation struct {
- // op is the operation type of the mutation.
- // See documentation for spanner.op for more details.
- op op
- // Table is the name of the target table to be modified.
- table string
- // keySet is a set of primary keys that names the rows
- // in a delete operation.
- keySet KeySet
- // columns names the set of columns that are going to be
- // modified by Insert, InsertOrUpdate, Replace or Update
- // operations.
- columns []string
- // values specifies the new values for the target columns
- // named by Columns.
- values []interface{}
-}
-
-// A MutationGroup is a list of Mutation to be committed atomically.
-type MutationGroup struct {
- // The Mutations in this group
- Mutations []*Mutation
-}
-
-// mapToMutationParams converts Go map into mutation parameters.
-func mapToMutationParams(in map[string]interface{}) ([]string, []interface{}) {
- cols := []string{}
- vals := []interface{}{}
- for k, v := range in {
- cols = append(cols, k)
- vals = append(vals, v)
- }
- return cols, vals
-}
-
-// errNotStruct returns error for not getting a go struct type.
-func errNotStruct(in interface{}) error {
- return spannerErrorf(codes.InvalidArgument, "%T is not a go struct type", in)
-}
-
-// structToMutationParams converts Go struct into mutation parameters.
-// If the input is not a valid Go struct type, structToMutationParams
-// returns error.
-func structToMutationParams(in interface{}) ([]string, []interface{}, error) {
- if in == nil {
- return nil, nil, errNotStruct(in)
- }
- v := reflect.ValueOf(in)
- t := v.Type()
- if t.Kind() == reflect.Ptr && t.Elem().Kind() == reflect.Struct {
- // t is a pointer to a struct.
- if v.IsNil() {
- // Return empty results.
- return nil, nil, nil
- }
- // Get the struct value that in points to.
- v = v.Elem()
- t = t.Elem()
- }
- if t.Kind() != reflect.Struct {
- return nil, nil, errNotStruct(in)
- }
- fields, err := fieldCache.Fields(t)
- if err != nil {
- return nil, nil, ToSpannerError(err)
- }
- var cols []string
- var vals []interface{}
- for _, f := range fields {
- cols = append(cols, f.Name)
- vals = append(vals, v.FieldByIndex(f.Index).Interface())
- }
- return cols, vals, nil
-}
-
-// Insert returns a Mutation to insert a row into a table. If the row already
-// exists, the write or transaction fails with codes.AlreadyExists.
-func Insert(table string, cols []string, vals []interface{}) *Mutation {
- return &Mutation{
- op: opInsert,
- table: table,
- columns: cols,
- values: vals,
- }
-}
-
-// InsertMap returns a Mutation to insert a row into a table, specified by
-// a map of column name to value. If the row already exists, the write or
-// transaction fails with codes.AlreadyExists.
-func InsertMap(table string, in map[string]interface{}) *Mutation {
- cols, vals := mapToMutationParams(in)
- return Insert(table, cols, vals)
-}
-
-// InsertStruct returns a Mutation to insert a row into a table, specified by
-// a Go struct. If the row already exists, the write or transaction fails with
-// codes.AlreadyExists.
-//
-// The in argument must be a struct or a pointer to a struct. Its exported
-// fields specify the column names and values. Use a field tag like `spanner:"name"`
-// to provide an alternative column name, or use `spanner:"-"` to ignore the field.
-func InsertStruct(table string, in interface{}) (*Mutation, error) {
- cols, vals, err := structToMutationParams(in)
- if err != nil {
- return nil, err
- }
- return Insert(table, cols, vals), nil
-}
-
-// Update returns a Mutation to update a row in a table. If the row does not
-// already exist, the write or transaction fails.
-func Update(table string, cols []string, vals []interface{}) *Mutation {
- return &Mutation{
- op: opUpdate,
- table: table,
- columns: cols,
- values: vals,
- }
-}
-
-// UpdateMap returns a Mutation to update a row in a table, specified by
-// a map of column to value. If the row does not already exist, the write or
-// transaction fails.
-func UpdateMap(table string, in map[string]interface{}) *Mutation {
- cols, vals := mapToMutationParams(in)
- return Update(table, cols, vals)
-}
-
-// UpdateStruct returns a Mutation to update a row in a table, specified by a Go
-// struct. If the row does not already exist, the write or transaction fails.
-func UpdateStruct(table string, in interface{}) (*Mutation, error) {
- cols, vals, err := structToMutationParams(in)
- if err != nil {
- return nil, err
- }
- return Update(table, cols, vals), nil
-}
-
-// InsertOrUpdate returns a Mutation to insert a row into a table. If the row
-// already exists, it updates it instead. Any column values not explicitly
-// written are preserved.
-//
-// For a similar example, See Update.
-func InsertOrUpdate(table string, cols []string, vals []interface{}) *Mutation {
- return &Mutation{
- op: opInsertOrUpdate,
- table: table,
- columns: cols,
- values: vals,
- }
-}
-
-// InsertOrUpdateMap returns a Mutation to insert a row into a table,
-// specified by a map of column to value. If the row already exists, it
-// updates it instead. Any column values not explicitly written are preserved.
-//
-// For a similar example, See UpdateMap.
-func InsertOrUpdateMap(table string, in map[string]interface{}) *Mutation {
- cols, vals := mapToMutationParams(in)
- return InsertOrUpdate(table, cols, vals)
-}
-
-// InsertOrUpdateStruct returns a Mutation to insert a row into a table,
-// specified by a Go struct. If the row already exists, it updates it instead.
-// Any column values not explicitly written are preserved.
-//
-// The in argument must be a struct or a pointer to a struct. Its exported
-// fields specify the column names and values. Use a field tag like
-// `spanner:"name"` to provide an alternative column name, or use `spanner:"-"` to
-// ignore the field.
-//
-// For a similar example, See UpdateStruct.
-func InsertOrUpdateStruct(table string, in interface{}) (*Mutation, error) {
- cols, vals, err := structToMutationParams(in)
- if err != nil {
- return nil, err
- }
- return InsertOrUpdate(table, cols, vals), nil
-}
-
-// Replace returns a Mutation to insert a row into a table, deleting any
-// existing row. Unlike InsertOrUpdate, this means any values not explicitly
-// written become NULL.
-//
-// For a similar example, See Update.
-func Replace(table string, cols []string, vals []interface{}) *Mutation {
- return &Mutation{
- op: opReplace,
- table: table,
- columns: cols,
- values: vals,
- }
-}
-
-// ReplaceMap returns a Mutation to insert a row into a table, deleting any
-// existing row. Unlike InsertOrUpdateMap, this means any values not explicitly
-// written become NULL. The row is specified by a map of column to value.
-//
-// For a similar example, See UpdateMap.
-func ReplaceMap(table string, in map[string]interface{}) *Mutation {
- cols, vals := mapToMutationParams(in)
- return Replace(table, cols, vals)
-}
-
-// ReplaceStruct returns a Mutation to insert a row into a table, deleting any
-// existing row. Unlike InsertOrUpdateMap, this means any values not explicitly
-// written become NULL. The row is specified by a Go struct.
-//
-// The in argument must be a struct or a pointer to a struct. Its exported
-// fields specify the column names and values. Use a field tag like `spanner:"name"`
-// to provide an alternative column name, or use `spanner:"-"` to ignore the field.
-//
-// For a similar example, See UpdateStruct.
-func ReplaceStruct(table string, in interface{}) (*Mutation, error) {
- cols, vals, err := structToMutationParams(in)
- if err != nil {
- return nil, err
- }
- return Replace(table, cols, vals), nil
-}
-
-// Delete removes the rows described by the KeySet from the table. It succeeds
-// whether or not the keys were present.
-func Delete(table string, ks KeySet) *Mutation {
- return &Mutation{
- op: opDelete,
- table: table,
- keySet: ks,
- }
-}
-
-// prepareWrite generates sppb.Mutation_Write from table name, column names
-// and new column values.
-func prepareWrite(table string, columns []string, vals []interface{}) (*sppb.Mutation_Write, error) {
- v, err := encodeValueArray(vals)
- if err != nil {
- return nil, err
- }
- return &sppb.Mutation_Write{
- Table: table,
- Columns: columns,
- Values: []*proto3.ListValue{v},
- }, nil
-}
-
-// errInvdMutationOp returns error for unrecognized mutation operation.
-func errInvdMutationOp(m Mutation) error {
- return spannerErrorf(codes.InvalidArgument, "Unknown op type: %d", m.op)
-}
-
-// proto converts spanner.Mutation to sppb.Mutation, in preparation to send
-// RPCs.
-func (m Mutation) proto() (*sppb.Mutation, error) {
- var pb *sppb.Mutation
- switch m.op {
- case opDelete:
- var kp *sppb.KeySet
- if m.keySet != nil {
- var err error
- kp, err = m.keySet.keySetProto()
- if err != nil {
- return nil, err
- }
- }
- pb = &sppb.Mutation{
- Operation: &sppb.Mutation_Delete_{
- Delete: &sppb.Mutation_Delete{
- Table: m.table,
- KeySet: kp,
- },
- },
- }
- case opInsert:
- w, err := prepareWrite(m.table, m.columns, m.values)
- if err != nil {
- return nil, err
- }
- pb = &sppb.Mutation{Operation: &sppb.Mutation_Insert{Insert: w}}
- case opInsertOrUpdate:
- w, err := prepareWrite(m.table, m.columns, m.values)
- if err != nil {
- return nil, err
- }
- pb = &sppb.Mutation{Operation: &sppb.Mutation_InsertOrUpdate{InsertOrUpdate: w}}
- case opReplace:
- w, err := prepareWrite(m.table, m.columns, m.values)
- if err != nil {
- return nil, err
- }
- pb = &sppb.Mutation{Operation: &sppb.Mutation_Replace{Replace: w}}
- case opUpdate:
- w, err := prepareWrite(m.table, m.columns, m.values)
- if err != nil {
- return nil, err
- }
- pb = &sppb.Mutation{Operation: &sppb.Mutation_Update{Update: w}}
- default:
- return nil, errInvdMutationOp(m)
- }
- return pb, nil
-}
-
-// mutationsProto turns a spanner.Mutation array into a sppb.Mutation array,
-// it is convenient for sending batch mutations to Cloud Spanner.
-func mutationsProto(ms []*Mutation) ([]*sppb.Mutation, error) {
- l := make([]*sppb.Mutation, 0, len(ms))
- for _, m := range ms {
- pb, err := m.proto()
- if err != nil {
- return nil, err
- }
- l = append(l, pb)
- }
- return l, nil
-}
-
-// mutationGroupsProto turns a spanner.MutationGroup array into a
-// sppb.BatchWriteRequest_MutationGroup array, in preparation to send RPCs.
-func mutationGroupsProto(mgs []*MutationGroup) ([]*sppb.BatchWriteRequest_MutationGroup, error) {
- gs := make([]*sppb.BatchWriteRequest_MutationGroup, 0, len(mgs))
- for _, mg := range mgs {
- ms, err := mutationsProto(mg.Mutations)
- if err != nil {
- return nil, err
- }
- gs = append(gs, &sppb.BatchWriteRequest_MutationGroup{Mutations: ms})
- }
- return gs, nil
-}
diff --git a/vendor/cloud.google.com/go/spanner/ot_metrics.go b/vendor/cloud.google.com/go/spanner/ot_metrics.go
deleted file mode 100644
index 16190860c..000000000
--- a/vendor/cloud.google.com/go/spanner/ot_metrics.go
+++ /dev/null
@@ -1,263 +0,0 @@
-// Copyright 2024 Google LLC
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package spanner
-
-import (
- "context"
- "log"
- "strconv"
- "strings"
- "sync"
-
- "cloud.google.com/go/spanner/internal"
- "go.opentelemetry.io/otel"
- "go.opentelemetry.io/otel/attribute"
- "go.opentelemetry.io/otel/metric"
- "google.golang.org/grpc/metadata"
-)
-
-// OtInstrumentationScope is the instrumentation name that will be associated with the emitted telemetry.
-const OtInstrumentationScope = "cloud.google.com/go"
-const metricsPrefix = "spanner/"
-
-var (
- attributeKeyClientID = attribute.Key("client_id")
- attributeKeyDatabase = attribute.Key("database")
- attributeKeyInstance = attribute.Key("instance_id")
- attributeKeyLibVersion = attribute.Key("library_version")
- attributeKeyType = attribute.Key("type")
- attributeKeyMethod = attribute.Key("grpc_client_method")
- attributeKeyIsMultiplexed = attribute.Key("is_multiplexed")
-
- attributeNumInUseSessions = attributeKeyType.String("num_in_use_sessions")
- attributeNumSessions = attributeKeyType.String("num_sessions")
- // openTelemetryMetricsEnabled is used to track if OpenTelemetry Metrics need to be recorded
- openTelemetryMetricsEnabled = false
- // mutex to avoid data race in reading/writing the above flag
- otMu = sync.RWMutex{}
-)
-
-func createOpenTelemetryConfig(mp metric.MeterProvider, logger *log.Logger, sessionClientID string, db string) (*openTelemetryConfig, error) {
- config := &openTelemetryConfig{
- attributeMap: []attribute.KeyValue{},
- }
- if !IsOpenTelemetryMetricsEnabled() {
- return config, nil
- }
- _, instance, database, err := parseDatabaseName(db)
- if err != nil {
- return nil, err
- }
-
- // Construct attributes for Metrics
- attributeMap := []attribute.KeyValue{
- attributeKeyClientID.String(sessionClientID),
- attributeKeyDatabase.String(database),
- attributeKeyInstance.String(instance),
- attributeKeyLibVersion.String(internal.Version),
- }
- config.attributeMap = append(config.attributeMap, attributeMap...)
-
- config.attributeMapWithMultiplexed = append(config.attributeMapWithMultiplexed, attributeMap...)
- config.attributeMapWithMultiplexed = append(config.attributeMapWithMultiplexed, attributeKeyIsMultiplexed.String("true"))
-
- config.attributeMapWithoutMultiplexed = append(config.attributeMapWithoutMultiplexed, attributeMap...)
- config.attributeMapWithoutMultiplexed = append(config.attributeMapWithoutMultiplexed, attributeKeyIsMultiplexed.String("false"))
-
- setOpenTelemetryMetricProvider(config, mp, logger)
- return config, nil
-}
-
-func setOpenTelemetryMetricProvider(config *openTelemetryConfig, mp metric.MeterProvider, logger *log.Logger) {
- // Fallback to global meter provider in OpenTelemetry
- if mp == nil {
- mp = otel.GetMeterProvider()
- }
- config.meterProvider = mp
- initializeMetricInstruments(config, logger)
-}
-
-func initializeMetricInstruments(config *openTelemetryConfig, logger *log.Logger) {
- if !IsOpenTelemetryMetricsEnabled() {
- return
- }
- meter := config.meterProvider.Meter(OtInstrumentationScope, metric.WithInstrumentationVersion(internal.Version))
-
- openSessionCountInstrument, err := meter.Int64ObservableGauge(
- metricsPrefix+"open_session_count",
- metric.WithDescription("Number of sessions currently opened"),
- metric.WithUnit("1"),
- )
- if err != nil {
- logf(logger, "Error during registering instrument for metric spanner/open_session_count, error: %v", err)
- }
- config.openSessionCount = openSessionCountInstrument
-
- maxAllowedSessionsCountInstrument, err := meter.Int64ObservableGauge(
- metricsPrefix+"max_allowed_sessions",
- metric.WithDescription("The maximum number of sessions allowed. Configurable by the user."),
- metric.WithUnit("1"),
- )
- if err != nil {
- logf(logger, "Error during registering instrument for metric spanner/max_allowed_sessions, error: %v", err)
- }
- config.maxAllowedSessionsCount = maxAllowedSessionsCountInstrument
-
- sessionsCountInstrument, _ := meter.Int64ObservableGauge(
- metricsPrefix+"num_sessions_in_pool",
- metric.WithDescription("The number of sessions currently in use."),
- metric.WithUnit("1"),
- )
- if err != nil {
- logf(logger, "Error during registering instrument for metric spanner/num_sessions_in_pool, error: %v", err)
- }
- config.sessionsCount = sessionsCountInstrument
-
- maxInUseSessionsCountInstrument, err := meter.Int64ObservableGauge(
- metricsPrefix+"max_in_use_sessions",
- metric.WithDescription("The maximum number of sessions in use during the last 10 minute interval."),
- metric.WithUnit("1"),
- )
- if err != nil {
- logf(logger, "Error during registering instrument for metric spanner/max_in_use_sessions, error: %v", err)
- }
- config.maxInUseSessionsCount = maxInUseSessionsCountInstrument
-
- getSessionTimeoutsCountInstrument, err := meter.Int64Counter(
- metricsPrefix+"get_session_timeouts",
- metric.WithDescription("The number of get sessions timeouts due to pool exhaustion."),
- metric.WithUnit("1"),
- )
- if err != nil {
- logf(logger, "Error during registering instrument for metric spanner/get_session_timeouts, error: %v", err)
- }
- config.getSessionTimeoutsCount = getSessionTimeoutsCountInstrument
-
- acquiredSessionsCountInstrument, err := meter.Int64Counter(
- metricsPrefix+"num_acquired_sessions",
- metric.WithDescription("The number of sessions acquired from the session pool."),
- metric.WithUnit("1"),
- )
- if err != nil {
- logf(logger, "Error during registering instrument for metric spanner/num_acquired_sessions, error: %v", err)
- }
- config.acquiredSessionsCount = acquiredSessionsCountInstrument
-
- releasedSessionsCountInstrument, err := meter.Int64Counter(
- metricsPrefix+"num_released_sessions",
- metric.WithDescription("The number of sessions released by the user and pool maintainer."),
- metric.WithUnit("1"),
- )
- if err != nil {
- logf(logger, "Error during registering instrument for metric spanner/num_released_sessions, error: %v", err)
- }
- config.releasedSessionsCount = releasedSessionsCountInstrument
-
- gfeLatencyInstrument, err := meter.Int64Histogram(
- metricsPrefix+"gfe_latency",
- metric.WithDescription("Latency between Google's network receiving an RPC and reading back the first byte of the response"),
- metric.WithUnit("ms"),
- metric.WithExplicitBucketBoundaries(0.0, 0.01, 0.05, 0.1, 0.3, 0.6, 0.8, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 8.0, 10.0, 13.0,
- 16.0, 20.0, 25.0, 30.0, 40.0, 50.0, 65.0, 80.0, 100.0, 130.0, 160.0, 200.0, 250.0,
- 300.0, 400.0, 500.0, 650.0, 800.0, 1000.0, 2000.0, 5000.0, 10000.0, 20000.0, 50000.0,
- 100000.0),
- )
- if err != nil {
- logf(logger, "Error during registering instrument for metric spanner/gfe_latency, error: %v", err)
- }
- config.gfeLatency = gfeLatencyInstrument
-
- gfeHeaderMissingCountInstrument, err := meter.Int64Counter(
- metricsPrefix+"gfe_header_missing_count",
- metric.WithDescription("Number of RPC responses received without the server-timing header, most likely means that the RPC never reached Google's network"),
- metric.WithUnit("1"),
- )
- if err != nil {
- logf(logger, "Error during registering instrument for metric spanner/gfe_header_missing_count, error: %v", err)
- }
- config.gfeHeaderMissingCount = gfeHeaderMissingCountInstrument
-}
-
-func registerSessionPoolOTMetrics(pool *sessionPool) error {
- otConfig := pool.otConfig
- if !IsOpenTelemetryMetricsEnabled() || otConfig == nil {
- return nil
- }
-
- attributes := otConfig.attributeMap
- attributesInUseSessions := append(attributes, attributeNumInUseSessions)
- attributesAvailableSessions := append(attributes, attributeNumSessions)
-
- reg, err := otConfig.meterProvider.Meter(OtInstrumentationScope, metric.WithInstrumentationVersion(internal.Version)).RegisterCallback(
- func(ctx context.Context, o metric.Observer) error {
- pool.mu.Lock()
- defer pool.mu.Unlock()
- if pool.multiplexedSession != nil {
- o.ObserveInt64(otConfig.openSessionCount, int64(1), metric.WithAttributes(otConfig.attributeMapWithMultiplexed...))
- }
- o.ObserveInt64(otConfig.openSessionCount, int64(pool.numOpened), metric.WithAttributes(attributes...))
- o.ObserveInt64(otConfig.maxAllowedSessionsCount, int64(pool.SessionPoolConfig.MaxOpened), metric.WithAttributes(attributes...))
- o.ObserveInt64(otConfig.sessionsCount, int64(pool.numInUse), metric.WithAttributes(append(attributesInUseSessions, attribute.Key("is_multiplexed").String("false"))...))
- o.ObserveInt64(otConfig.sessionsCount, int64(pool.numSessions), metric.WithAttributes(attributesAvailableSessions...))
- o.ObserveInt64(otConfig.maxInUseSessionsCount, int64(pool.maxNumInUse), metric.WithAttributes(append(attributes, attribute.Key("is_multiplexed").String("false"))...))
- return nil
- },
- otConfig.openSessionCount,
- otConfig.maxAllowedSessionsCount,
- otConfig.sessionsCount,
- otConfig.maxInUseSessionsCount,
- )
- pool.otConfig.otMetricRegistration = reg
- return err
-}
-
-// EnableOpenTelemetryMetrics enables OpenTelemetery metrics
-func EnableOpenTelemetryMetrics() {
- setOpenTelemetryMetricsFlag(true)
-}
-
-// IsOpenTelemetryMetricsEnabled tells whether OpenTelemtery metrics is enabled or not.
-func IsOpenTelemetryMetricsEnabled() bool {
- otMu.RLock()
- defer otMu.RUnlock()
- return openTelemetryMetricsEnabled
-}
-
-func setOpenTelemetryMetricsFlag(enable bool) {
- otMu.Lock()
- openTelemetryMetricsEnabled = enable
- otMu.Unlock()
-}
-
-func recordGFELatencyMetricsOT(ctx context.Context, md metadata.MD, keyMethod string, otConfig *openTelemetryConfig) error {
- if !IsOpenTelemetryMetricsEnabled() || md == nil && otConfig == nil {
- return nil
- }
- attr := otConfig.attributeMap
- if len(md.Get("server-timing")) == 0 && otConfig.gfeHeaderMissingCount != nil {
- otConfig.gfeHeaderMissingCount.Add(ctx, 1, metric.WithAttributes(attr...))
- return nil
- }
- serverTiming := md.Get("server-timing")[0]
- gfeLatency, err := strconv.Atoi(strings.TrimPrefix(serverTiming, "gfet4t7; dur="))
- if !strings.HasPrefix(serverTiming, "gfet4t7; dur=") || err != nil {
- return err
- }
- attr = append(attr, attributeKeyMethod.String(keyMethod))
- if otConfig.gfeLatency != nil {
- otConfig.gfeLatency.Record(ctx, int64(gfeLatency), metric.WithAttributes(attr...))
- }
- return nil
-}
diff --git a/vendor/cloud.google.com/go/spanner/pdml.go b/vendor/cloud.google.com/go/spanner/pdml.go
deleted file mode 100644
index bb33ef291..000000000
--- a/vendor/cloud.google.com/go/spanner/pdml.go
+++ /dev/null
@@ -1,147 +0,0 @@
-// Copyright 2018 Google LLC
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package spanner
-
-import (
- "context"
-
- "cloud.google.com/go/internal/trace"
- sppb "cloud.google.com/go/spanner/apiv1/spannerpb"
- "github.com/googleapis/gax-go/v2"
- "go.opencensus.io/tag"
- "google.golang.org/grpc"
- "google.golang.org/grpc/codes"
- "google.golang.org/grpc/metadata"
-)
-
-// PartitionedUpdate executes a DML statement in parallel across the database,
-// using separate, internal transactions that commit independently. The DML
-// statement must be fully partitionable: it must be expressible as the union
-// of many statements each of which accesses only a single row of the table. The
-// statement should also be idempotent, because it may be applied more than once.
-//
-// PartitionedUpdate returns an estimated count of the number of rows affected.
-// The actual number of affected rows may be greater than the estimate.
-func (c *Client) PartitionedUpdate(ctx context.Context, statement Statement) (count int64, err error) {
- return c.partitionedUpdate(ctx, statement, c.qo)
-}
-
-// PartitionedUpdateWithOptions executes a DML statement in parallel across the database,
-// using separate, internal transactions that commit independently. The sql
-// query execution will be optimized based on the given query options.
-func (c *Client) PartitionedUpdateWithOptions(ctx context.Context, statement Statement, opts QueryOptions) (count int64, err error) {
- return c.partitionedUpdate(ctx, statement, c.qo.merge(opts))
-}
-
-func (c *Client) partitionedUpdate(ctx context.Context, statement Statement, options QueryOptions) (count int64, err error) {
- ctx = trace.StartSpan(ctx, "cloud.google.com/go/spanner.PartitionedUpdate")
- defer func() { trace.EndSpan(ctx, err) }()
- if err := checkNestedTxn(ctx); err != nil {
- return 0, err
- }
-
- sh, err := c.idleSessions.take(ctx)
- if err != nil {
- return 0, ToSpannerError(err)
- }
- if sh != nil {
- defer sh.recycle()
- }
- // Mark isLongRunningTransaction to true, as the session in case of partitioned dml can be long-running
- sh.mu.Lock()
- sh.eligibleForLongRunning = true
- sh.mu.Unlock()
-
- // Create the parameters and the SQL request, but without a transaction.
- // The transaction reference will be added by the executePdml method.
- params, paramTypes, err := statement.convertParams()
- if err != nil {
- return 0, ToSpannerError(err)
- }
- req := &sppb.ExecuteSqlRequest{
- Session: sh.getID(),
- Sql: statement.SQL,
- Params: params,
- ParamTypes: paramTypes,
- QueryOptions: options.Options,
- RequestOptions: createRequestOptions(options.Priority, options.RequestTag, ""),
- }
-
- // Make a retryer for Aborted and certain Internal errors.
- retryer := onCodes(DefaultRetryBackoff, codes.Aborted, codes.Internal)
- // Execute the PDML and retry if the transaction is aborted.
- executePdmlWithRetry := func(ctx context.Context) (int64, error) {
- for {
- count, err := executePdml(contextWithOutgoingMetadata(ctx, sh.getMetadata(), c.disableRouteToLeader), sh, req, options)
- if err == nil {
- return count, nil
- }
- delay, shouldRetry := retryer.Retry(err)
- if !shouldRetry {
- return 0, err
- }
- if err := gax.Sleep(ctx, delay); err != nil {
- return 0, err
- }
- }
- }
- return executePdmlWithRetry(ctx)
-}
-
-// executePdml executes the following steps:
-// 1. Begin a PDML transaction
-// 2. Add the ID of the PDML transaction to the SQL request.
-// 3. Execute the update statement on the PDML transaction
-//
-// Note that PDML transactions cannot be committed or rolled back.
-func executePdml(ctx context.Context, sh *sessionHandle, req *sppb.ExecuteSqlRequest, options QueryOptions) (count int64, err error) {
- var md metadata.MD
- sh.updateLastUseTime()
- // Begin transaction.
- res, err := sh.getClient().BeginTransaction(ctx, &sppb.BeginTransactionRequest{
- Session: sh.getID(),
- Options: &sppb.TransactionOptions{
- Mode: &sppb.TransactionOptions_PartitionedDml_{PartitionedDml: &sppb.TransactionOptions_PartitionedDml{}},
- ExcludeTxnFromChangeStreams: options.ExcludeTxnFromChangeStreams,
- },
- })
- if err != nil {
- return 0, ToSpannerError(err)
- }
- // Add a reference to the PDML transaction on the ExecuteSql request.
- req.Transaction = &sppb.TransactionSelector{
- Selector: &sppb.TransactionSelector_Id{Id: res.Id},
- }
-
- sh.updateLastUseTime()
- resultSet, err := sh.getClient().ExecuteSql(ctx, req, gax.WithGRPCOptions(grpc.Header(&md)))
- if getGFELatencyMetricsFlag() && md != nil && sh.session.pool != nil {
- err := captureGFELatencyStats(tag.NewContext(ctx, sh.session.pool.tagMap), md, "executePdml_ExecuteSql")
- if err != nil {
- trace.TracePrintf(ctx, nil, "Error in recording GFE Latency. Try disabling and rerunning. Error: %v", err)
- }
- }
- if metricErr := recordGFELatencyMetricsOT(ctx, md, "executePdml_ExecuteSql", sh.session.pool.otConfig); metricErr != nil {
- trace.TracePrintf(ctx, nil, "Error in recording GFE Latency through OpenTelemetry. Error: %v", metricErr)
- }
- if err != nil {
- return 0, err
- }
-
- if resultSet.Stats == nil {
- return 0, spannerErrorf(codes.InvalidArgument, "query passed to Update: %q", req.Sql)
- }
- return extractRowCount(resultSet.Stats)
-}
diff --git a/vendor/cloud.google.com/go/spanner/protoutils.go b/vendor/cloud.google.com/go/spanner/protoutils.go
deleted file mode 100644
index 988e71594..000000000
--- a/vendor/cloud.google.com/go/spanner/protoutils.go
+++ /dev/null
@@ -1,165 +0,0 @@
-/*
-Copyright 2017 Google LLC
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-*/
-
-package spanner
-
-import (
- "encoding/base64"
- "math/big"
- "strconv"
- "time"
-
- "cloud.google.com/go/civil"
- sppb "cloud.google.com/go/spanner/apiv1/spannerpb"
- "google.golang.org/protobuf/proto"
- "google.golang.org/protobuf/reflect/protoreflect"
- proto3 "google.golang.org/protobuf/types/known/structpb"
-)
-
-// Helpers to generate protobuf values and Cloud Spanner types.
-
-func stringProto(s string) *proto3.Value {
- return &proto3.Value{Kind: stringKind(s)}
-}
-
-func stringKind(s string) *proto3.Value_StringValue {
- return &proto3.Value_StringValue{StringValue: s}
-}
-
-func stringType() *sppb.Type {
- return &sppb.Type{Code: sppb.TypeCode_STRING}
-}
-
-func boolProto(b bool) *proto3.Value {
- return &proto3.Value{Kind: &proto3.Value_BoolValue{BoolValue: b}}
-}
-
-func boolType() *sppb.Type {
- return &sppb.Type{Code: sppb.TypeCode_BOOL}
-}
-
-func intProto(n int64) *proto3.Value {
- return &proto3.Value{Kind: &proto3.Value_StringValue{StringValue: strconv.FormatInt(n, 10)}}
-}
-
-func intType() *sppb.Type {
- return &sppb.Type{Code: sppb.TypeCode_INT64}
-}
-
-func float32Proto(n float32) *proto3.Value {
- return &proto3.Value{Kind: &proto3.Value_NumberValue{NumberValue: float64(n)}}
-}
-
-func float32Type() *sppb.Type {
- return &sppb.Type{Code: sppb.TypeCode_FLOAT32}
-}
-
-func floatProto(n float64) *proto3.Value {
- return &proto3.Value{Kind: &proto3.Value_NumberValue{NumberValue: n}}
-}
-
-func floatType() *sppb.Type {
- return &sppb.Type{Code: sppb.TypeCode_FLOAT64}
-}
-
-func numericProto(n *big.Rat) *proto3.Value {
- return &proto3.Value{Kind: &proto3.Value_StringValue{StringValue: NumericString(n)}}
-}
-
-func numericType() *sppb.Type {
- return &sppb.Type{Code: sppb.TypeCode_NUMERIC}
-}
-
-func pgNumericType() *sppb.Type {
- return &sppb.Type{Code: sppb.TypeCode_NUMERIC, TypeAnnotation: sppb.TypeAnnotationCode_PG_NUMERIC}
-}
-
-func pgOidType() *sppb.Type {
- return &sppb.Type{Code: sppb.TypeCode_INT64, TypeAnnotation: sppb.TypeAnnotationCode_PG_OID}
-}
-
-func jsonType() *sppb.Type {
- return &sppb.Type{Code: sppb.TypeCode_JSON}
-}
-
-func pgJsonbType() *sppb.Type {
- return &sppb.Type{Code: sppb.TypeCode_JSON, TypeAnnotation: sppb.TypeAnnotationCode_PG_JSONB}
-}
-
-func bytesProto(b []byte) *proto3.Value {
- return &proto3.Value{Kind: &proto3.Value_StringValue{StringValue: base64.StdEncoding.EncodeToString(b)}}
-}
-
-func bytesType() *sppb.Type {
- return &sppb.Type{Code: sppb.TypeCode_BYTES}
-}
-
-func timeProto(t time.Time) *proto3.Value {
- return stringProto(t.UTC().Format(time.RFC3339Nano))
-}
-
-func timeType() *sppb.Type {
- return &sppb.Type{Code: sppb.TypeCode_TIMESTAMP}
-}
-
-func dateProto(d civil.Date) *proto3.Value {
- return stringProto(d.String())
-}
-
-func dateType() *sppb.Type {
- return &sppb.Type{Code: sppb.TypeCode_DATE}
-}
-
-func listProto(p ...*proto3.Value) *proto3.Value {
- return &proto3.Value{Kind: &proto3.Value_ListValue{ListValue: &proto3.ListValue{Values: p}}}
-}
-
-func listValueProto(p ...*proto3.Value) *proto3.ListValue {
- return &proto3.ListValue{Values: p}
-}
-
-func listType(t *sppb.Type) *sppb.Type {
- return &sppb.Type{Code: sppb.TypeCode_ARRAY, ArrayElementType: t}
-}
-
-func mkField(n string, t *sppb.Type) *sppb.StructType_Field {
- return &sppb.StructType_Field{Name: n, Type: t}
-}
-
-func structType(fields ...*sppb.StructType_Field) *sppb.Type {
- return &sppb.Type{Code: sppb.TypeCode_STRUCT, StructType: &sppb.StructType{Fields: fields}}
-}
-
-func nullProto() *proto3.Value {
- return &proto3.Value{Kind: &proto3.Value_NullValue{NullValue: proto3.NullValue_NULL_VALUE}}
-}
-
-func protoMessageType(fqn string) *sppb.Type {
- return &sppb.Type{Code: sppb.TypeCode_PROTO, ProtoTypeFqn: fqn}
-}
-
-func protoEnumType(fqn string) *sppb.Type {
- return &sppb.Type{Code: sppb.TypeCode_ENUM, ProtoTypeFqn: fqn}
-}
-
-func protoMessageProto(m proto.Message) *proto3.Value {
- var b, _ = proto.Marshal(m)
- return &proto3.Value{Kind: &proto3.Value_StringValue{StringValue: base64.StdEncoding.EncodeToString(b)}}
-}
-
-func protoEnumProto(e protoreflect.Enum) *proto3.Value {
- return &proto3.Value{Kind: &proto3.Value_StringValue{StringValue: strconv.FormatInt(int64(e.Number()), 10)}}
-}
diff --git a/vendor/cloud.google.com/go/spanner/read.go b/vendor/cloud.google.com/go/spanner/read.go
deleted file mode 100644
index 83755722e..000000000
--- a/vendor/cloud.google.com/go/spanner/read.go
+++ /dev/null
@@ -1,826 +0,0 @@
-/*
-Copyright 2017 Google LLC
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-*/
-
-package spanner
-
-import (
- "bytes"
- "context"
- "io"
- "log"
- "sync/atomic"
- "time"
-
- "cloud.google.com/go/internal/protostruct"
- "cloud.google.com/go/internal/trace"
- sppb "cloud.google.com/go/spanner/apiv1/spannerpb"
- "github.com/googleapis/gax-go/v2"
- "google.golang.org/api/iterator"
- "google.golang.org/grpc/codes"
- "google.golang.org/protobuf/proto"
- proto3 "google.golang.org/protobuf/types/known/structpb"
-)
-
-// streamingReceiver is the interface for receiving data from a client side
-// stream.
-type streamingReceiver interface {
- Recv() (*sppb.PartialResultSet, error)
-}
-
-// errEarlyReadEnd returns error for read finishes when gRPC stream is still
-// active.
-func errEarlyReadEnd() error {
- return spannerErrorf(codes.FailedPrecondition, "read completed with active stream")
-}
-
-// stream is the internal fault tolerant method for streaming data from Cloud
-// Spanner.
-func stream(
- ctx context.Context,
- logger *log.Logger,
- rpc func(ct context.Context, resumeToken []byte) (streamingReceiver, error),
- setTimestamp func(time.Time),
- release func(error),
-) *RowIterator {
- return streamWithReplaceSessionFunc(
- ctx,
- logger,
- rpc,
- nil,
- nil,
- setTimestamp,
- release,
- )
-}
-
-// this stream method will automatically retry the stream on a new session if
-// the replaceSessionFunc function has been defined. This function should only be
-// used for single-use transactions.
-func streamWithReplaceSessionFunc(
- ctx context.Context,
- logger *log.Logger,
- rpc func(ct context.Context, resumeToken []byte) (streamingReceiver, error),
- replaceSession func(ctx context.Context) error,
- setTransactionID func(transactionID),
- setTimestamp func(time.Time),
- release func(error),
-) *RowIterator {
- ctx, cancel := context.WithCancel(ctx)
- ctx = trace.StartSpan(ctx, "cloud.google.com/go/spanner.RowIterator")
- return &RowIterator{
- streamd: newResumableStreamDecoder(ctx, logger, rpc, replaceSession),
- rowd: &partialResultSetDecoder{},
- setTransactionID: setTransactionID,
- setTimestamp: setTimestamp,
- release: release,
- cancel: cancel,
- }
-}
-
-// rowIterator is an interface for iterating over Rows.
-type rowIterator interface {
- Next() (*Row, error)
- Do(f func(r *Row) error) error
- Stop()
-}
-
-// RowIterator is an iterator over Rows.
-type RowIterator struct {
- // The plan for the query. Available after RowIterator.Next returns
- // iterator.Done if QueryWithStats was called.
- QueryPlan *sppb.QueryPlan
-
- // Execution statistics for the query. Available after RowIterator.Next
- // returns iterator.Done if QueryWithStats was called.
- QueryStats map[string]interface{}
-
- // For a DML statement, the number of rows affected. For PDML, this is a
- // lower bound. Available for DML statements after RowIterator.Next returns
- // iterator.Done.
- RowCount int64
-
- // The metadata of the results of the query. The metadata are available
- // after the first call to RowIterator.Next(), unless the first call to
- // RowIterator.Next() returned an error that is not equal to iterator.Done.
- Metadata *sppb.ResultSetMetadata
-
- streamd *resumableStreamDecoder
- rowd *partialResultSetDecoder
- setTransactionID func(transactionID)
- setTimestamp func(time.Time)
- release func(error)
- cancel func()
- err error
- rows []*Row
- sawStats bool
-}
-
-// this is for safety from future changes to RowIterator making sure that it implements rowIterator interface.
-var _ rowIterator = (*RowIterator)(nil)
-
-// Next returns the next result. Its second return value is iterator.Done if
-// there are no more results. Once Next returns Done, all subsequent calls
-// will return Done.
-func (r *RowIterator) Next() (*Row, error) {
- if r.err != nil {
- return nil, r.err
- }
- for len(r.rows) == 0 && r.streamd.next() {
- prs := r.streamd.get()
- if r.setTransactionID != nil {
- // this is when Read/Query is executed using ReadWriteTransaction
- // and server returned the first stream response.
- if prs.Metadata != nil && prs.Metadata.Transaction != nil {
- r.setTransactionID(prs.Metadata.Transaction.GetId())
- } else {
- // This code block should never run ideally, server is expected to return a transactionID in response
- // if request contains TransactionSelector::Begin option, this is here as fallback to retry with
- // explicit transactionID after a retry.
- r.setTransactionID(nil)
- r.err = errInlineBeginTransactionFailed()
- return nil, r.err
- }
- r.setTransactionID = nil
- }
- if prs.Stats != nil {
- r.sawStats = true
- r.QueryPlan = prs.Stats.QueryPlan
- r.QueryStats = protostruct.DecodeToMap(prs.Stats.QueryStats)
- if prs.Stats.RowCount != nil {
- rc, err := extractRowCount(prs.Stats)
- if err != nil {
- return nil, err
- }
- r.RowCount = rc
- }
- }
- var metadata *sppb.ResultSetMetadata
- r.rows, metadata, r.err = r.rowd.add(prs)
- if metadata != nil {
- r.Metadata = metadata
- }
- if r.err != nil {
- return nil, r.err
- }
- if !r.rowd.ts.IsZero() && r.setTimestamp != nil {
- r.setTimestamp(r.rowd.ts)
- r.setTimestamp = nil
- }
- }
- if len(r.rows) > 0 {
- row := r.rows[0]
- r.rows = r.rows[1:]
- return row, nil
- }
- if err := r.streamd.lastErr(); err != nil {
- r.err = ToSpannerError(err)
- } else if !r.rowd.done() {
- r.err = errEarlyReadEnd()
- } else {
- r.err = iterator.Done
- }
- return nil, r.err
-}
-
-func extractRowCount(stats *sppb.ResultSetStats) (int64, error) {
- if stats.RowCount == nil {
- return 0, spannerErrorf(codes.Internal, "missing RowCount")
- }
- switch rc := stats.RowCount.(type) {
- case *sppb.ResultSetStats_RowCountExact:
- return rc.RowCountExact, nil
- case *sppb.ResultSetStats_RowCountLowerBound:
- return rc.RowCountLowerBound, nil
- default:
- return 0, spannerErrorf(codes.Internal, "unknown RowCount type %T", stats.RowCount)
- }
-}
-
-// Do calls the provided function once in sequence for each row in the
-// iteration. If the function returns a non-nil error, Do immediately returns
-// that error.
-//
-// If there are no rows in the iterator, Do will return nil without calling the
-// provided function.
-//
-// Do always calls Stop on the iterator.
-func (r *RowIterator) Do(f func(r *Row) error) error {
- defer r.Stop()
- for {
- row, err := r.Next()
- switch err {
- case iterator.Done:
- return nil
- case nil:
- if err = f(row); err != nil {
- return err
- }
- default:
- return err
- }
- }
-}
-
-// Stop terminates the iteration. It should be called after you finish using the
-// iterator.
-func (r *RowIterator) Stop() {
- if r.streamd != nil {
- if r.err != nil && r.err != iterator.Done {
- defer trace.EndSpan(r.streamd.ctx, r.err)
- } else {
- defer trace.EndSpan(r.streamd.ctx, nil)
- }
- }
- if r.cancel != nil {
- r.cancel()
- }
- if r.release != nil {
- r.release(r.err)
- if r.err == nil {
- r.err = spannerErrorf(codes.FailedPrecondition, "Next called after Stop")
- }
- r.release = nil
- }
-}
-
-// partialResultQueue implements a simple FIFO queue. The zero value is a valid
-// queue.
-type partialResultQueue struct {
- q []*sppb.PartialResultSet
- first int
- last int
- n int // number of elements in queue
-}
-
-// empty returns if the partialResultQueue is empty.
-func (q *partialResultQueue) empty() bool {
- return q.n == 0
-}
-
-// errEmptyQueue returns error for dequeuing an empty queue.
-func errEmptyQueue() error {
- return spannerErrorf(codes.OutOfRange, "empty partialResultQueue")
-}
-
-// peekLast returns the last item in partialResultQueue; if the queue
-// is empty, it returns error.
-func (q *partialResultQueue) peekLast() (*sppb.PartialResultSet, error) {
- if q.empty() {
- return nil, errEmptyQueue()
- }
- return q.q[(q.last+cap(q.q)-1)%cap(q.q)], nil
-}
-
-// push adds an item to the tail of partialResultQueue.
-func (q *partialResultQueue) push(r *sppb.PartialResultSet) {
- if q.q == nil {
- q.q = make([]*sppb.PartialResultSet, 8 /* arbitrary */)
- }
- if q.n == cap(q.q) {
- buf := make([]*sppb.PartialResultSet, cap(q.q)*2)
- for i := 0; i < q.n; i++ {
- buf[i] = q.q[(q.first+i)%cap(q.q)]
- }
- q.q = buf
- q.first = 0
- q.last = q.n
- }
- q.q[q.last] = r
- q.last = (q.last + 1) % cap(q.q)
- q.n++
-}
-
-// pop removes an item from the head of partialResultQueue and returns it.
-func (q *partialResultQueue) pop() *sppb.PartialResultSet {
- if q.n == 0 {
- return nil
- }
- r := q.q[q.first]
- q.q[q.first] = nil
- q.first = (q.first + 1) % cap(q.q)
- q.n--
- return r
-}
-
-// clear empties partialResultQueue.
-func (q *partialResultQueue) clear() {
- *q = partialResultQueue{}
-}
-
-// dump retrieves all items from partialResultQueue and return them in a slice.
-// It is used only in tests.
-func (q *partialResultQueue) dump() []*sppb.PartialResultSet {
- var dq []*sppb.PartialResultSet
- for i := q.first; len(dq) < q.n; i = (i + 1) % cap(q.q) {
- dq = append(dq, q.q[i])
- }
- return dq
-}
-
-// resumableStreamDecoderState encodes resumableStreamDecoder's status. See also
-// the comments for resumableStreamDecoder.Next.
-type resumableStreamDecoderState int
-
-const (
- unConnected resumableStreamDecoderState = iota // 0
- queueingRetryable // 1
- queueingUnretryable // 2
- aborted // 3
- finished // 4
-)
-
-// resumableStreamDecoder provides a resumable interface for receiving
-// sppb.PartialResultSet(s) from a given query wrapped by
-// resumableStreamDecoder.rpc().
-type resumableStreamDecoder struct {
- // state is the current status of resumableStreamDecoder, see also
- // the comments for resumableStreamDecoder.Next.
- state resumableStreamDecoderState
-
- // stateWitness when non-nil is called to observe state change,
- // used for testing.
- stateWitness func(resumableStreamDecoderState)
-
- // ctx is the caller's context, used for cancel/timeout Next().
- ctx context.Context
-
- // rpc is a factory of streamingReceiver, which might resume
- // a previous stream from the point encoded in restartToken.
- // rpc is always a wrapper of a Cloud Spanner query which is
- // resumable.
- rpc func(ctx context.Context, restartToken []byte) (streamingReceiver, error)
-
- // replaceSessionFunc is a function that can be used to replace the session
- // that is being used to execute the read operation. This function should
- // only be defined for single-use transactions that can safely retry the
- // read operation on a new session. If this function is nil, the stream
- // does not support retrying the query on a new session.
- replaceSessionFunc func(ctx context.Context) error
-
- // logger is the logger to use.
- logger *log.Logger
-
- // stream is the current RPC streaming receiver.
- stream streamingReceiver
-
- // q buffers received yet undecoded partial results.
- q partialResultQueue
-
- // bytesBetweenResumeTokens is the proxy of the byte size of
- // PartialResultSets being queued between two resume tokens. Once
- // bytesBetweenResumeTokens is greater than maxBytesBetweenResumeTokens,
- // resumableStreamDecoder goes into queueingUnretryable state.
- bytesBetweenResumeTokens int32
-
- // maxBytesBetweenResumeTokens is the max number of bytes that can be
- // buffered between two resume tokens. It is always copied from the global
- // maxBytesBetweenResumeTokens atomically.
- maxBytesBetweenResumeTokens int32
-
- // np is the next sppb.PartialResultSet ready to be returned
- // to caller of resumableStreamDecoder.Get().
- np *sppb.PartialResultSet
-
- // resumeToken stores the resume token that resumableStreamDecoder has
- // last revealed to caller.
- resumeToken []byte
-
- // err is the last error resumableStreamDecoder has encountered so far.
- err error
-
- // backoff is used for the retry settings
- backoff gax.Backoff
-}
-
-// newResumableStreamDecoder creates a new resumeableStreamDecoder instance.
-// Parameter rpc should be a function that creates a new stream beginning at the
-// restartToken if non-nil.
-func newResumableStreamDecoder(ctx context.Context, logger *log.Logger, rpc func(ct context.Context, restartToken []byte) (streamingReceiver, error), replaceSession func(ctx context.Context) error) *resumableStreamDecoder {
- return &resumableStreamDecoder{
- ctx: ctx,
- logger: logger,
- rpc: rpc,
- replaceSessionFunc: replaceSession,
- maxBytesBetweenResumeTokens: atomic.LoadInt32(&maxBytesBetweenResumeTokens),
- backoff: DefaultRetryBackoff,
- }
-}
-
-// changeState fulfills state transition for resumableStateDecoder.
-func (d *resumableStreamDecoder) changeState(target resumableStreamDecoderState) {
- if d.state == queueingRetryable && d.state != target {
- // Reset bytesBetweenResumeTokens because it is only meaningful/changed
- // under queueingRetryable state.
- d.bytesBetweenResumeTokens = 0
- }
- d.state = target
- if d.stateWitness != nil {
- d.stateWitness(target)
- }
-}
-
-// isNewResumeToken returns if the observed resume token is different from
-// the one returned from server last time.
-func (d *resumableStreamDecoder) isNewResumeToken(rt []byte) bool {
- if rt == nil {
- return false
- }
- if bytes.Equal(rt, d.resumeToken) {
- return false
- }
- return true
-}
-
-// Next advances to the next available partial result set. If error or no
-// more, returns false, call Err to determine if an error was encountered.
-// The following diagram illustrates the state machine of resumableStreamDecoder
-// that Next() implements. Note that state transition can be only triggered by
-// RPC activities.
-/*
- rpc() fails retryable
- +---------+
- | | rpc() fails unretryable/ctx timeouts or cancelled
- | | +------------------------------------------------+
- | | | |
- | v | v
- | +---+---+---+ +--------+ +------+--+
- +-----+unConnected| |finished| | aborted |<----+
- | | ++-----+-+ +------+--+ |
- +---+----+--+ ^ ^ ^ |
- | ^ | | | |
- | | | | recv() fails |
- | | | | | |
- | |recv() fails retryable | | | |
- | |with valid ctx | | | |
- | | | | | |
- rpc() succeeds | +-----------------------+ | | |
- | | | recv EOF recv EOF | |
- | | | | | |
- v | | Queue size exceeds | | |
- +---+----+---+----+threshold +-------+-----------+ | |
-+---------->+ +--------------->+ +-+ |
-| |queueingRetryable| |queueingUnretryable| |
-| | +<---------------+ | |
-| +---+----------+--+ pop() returns +--+----+-----------+ |
-| | | resume token | ^ |
-| | | | | |
-| | | | | |
-+---------------+ | | | |
- recv() succeeds | +----+ |
- | recv() succeeds |
- | |
- | |
- | |
- | |
- | |
- +--------------------------------------------------+
- recv() fails unretryable
-
-*/
-var (
- // maxBytesBetweenResumeTokens is the maximum amount of bytes that
- // resumableStreamDecoder in queueingRetryable state can use to queue
- // PartialResultSets before getting into queueingUnretryable state.
- maxBytesBetweenResumeTokens = int32(128 * 1024 * 1024)
-)
-
-func (d *resumableStreamDecoder) next() bool {
- retryer := onCodes(d.backoff, codes.Unavailable, codes.ResourceExhausted, codes.Internal)
- for {
- switch d.state {
- case unConnected:
- // If no gRPC stream is available, try to initiate one.
- d.stream, d.err = d.rpc(d.ctx, d.resumeToken)
- if d.err == nil {
- d.changeState(queueingRetryable)
- continue
- }
- delay, shouldRetry := retryer.Retry(d.err)
- if !shouldRetry {
- d.changeState(aborted)
- continue
- }
- trace.TracePrintf(d.ctx, nil, "Backing off stream read for %s", delay)
- if err := gax.Sleep(d.ctx, delay); err == nil {
- // Be explicit about state transition, although the
- // state doesn't actually change. State transition
- // will be triggered only by RPC activity, regardless of
- // whether there is an actual state change or not.
- d.changeState(unConnected)
- } else {
- d.err = err
- d.changeState(aborted)
- }
- continue
-
- case queueingRetryable:
- fallthrough
- case queueingUnretryable:
- // Receiving queue is not empty.
- last, err := d.q.peekLast()
- if err != nil {
- // Only the case that receiving queue is empty could cause
- // peekLast to return error and in such case, we should try to
- // receive from stream.
- d.tryRecv(retryer)
- continue
- }
- if d.isNewResumeToken(last.ResumeToken) {
- // Got new resume token, return buffered sppb.PartialResultSets
- // to caller.
- d.np = d.q.pop()
- if d.q.empty() {
- d.bytesBetweenResumeTokens = 0
- // The new resume token was just popped out from queue,
- // record it.
- d.resumeToken = d.np.ResumeToken
- d.changeState(queueingRetryable)
- }
- return true
- }
- if d.bytesBetweenResumeTokens >= d.maxBytesBetweenResumeTokens && d.state == queueingRetryable {
- d.changeState(queueingUnretryable)
- continue
- }
- if d.state == queueingUnretryable {
- // When there is no resume token observed, only yield
- // sppb.PartialResultSets to caller under queueingUnretryable
- // state.
- d.np = d.q.pop()
- return true
- }
- // Needs to receive more from gRPC stream till a new resume token
- // is observed.
- d.tryRecv(retryer)
- continue
- case aborted:
- // Discard all pending items because none of them should be yield
- // to caller.
- d.q.clear()
- return false
- case finished:
- // If query has finished, check if there are still buffered messages.
- if d.q.empty() {
- // No buffered PartialResultSet.
- return false
- }
- // Although query has finished, there are still buffered
- // PartialResultSets.
- d.np = d.q.pop()
- return true
-
- default:
- logf(d.logger, "Unexpected resumableStreamDecoder.state: %v", d.state)
- return false
- }
- }
-}
-
-// tryRecv attempts to receive a PartialResultSet from gRPC stream.
-func (d *resumableStreamDecoder) tryRecv(retryer gax.Retryer) {
- var res *sppb.PartialResultSet
- res, d.err = d.stream.Recv()
- if d.err == nil {
- d.q.push(res)
- if d.state == queueingRetryable && !d.isNewResumeToken(res.ResumeToken) {
- d.bytesBetweenResumeTokens += int32(proto.Size(res))
- }
- d.changeState(d.state)
- return
- }
- if d.err == io.EOF {
- d.err = nil
- d.changeState(finished)
- return
- }
- if d.replaceSessionFunc != nil && isSessionNotFoundError(d.err) && d.resumeToken == nil {
- // A 'Session not found' error occurred before we received a resume
- // token and a replaceSessionFunc function is defined. Try to restart
- // the stream on a new session.
- if err := d.replaceSessionFunc(d.ctx); err != nil {
- d.err = err
- d.changeState(aborted)
- return
- }
- } else {
- delay, shouldRetry := retryer.Retry(d.err)
- if !shouldRetry || d.state != queueingRetryable {
- d.changeState(aborted)
- return
- }
- if err := gax.Sleep(d.ctx, delay); err != nil {
- d.err = err
- d.changeState(aborted)
- return
- }
- }
- // Clear error and retry the stream.
- d.err = nil
- // Discard all queue items (none have resume tokens).
- d.q.clear()
- d.stream = nil
- d.changeState(unConnected)
-}
-
-// get returns the most recent PartialResultSet generated by a call to next.
-func (d *resumableStreamDecoder) get() *sppb.PartialResultSet {
- return d.np
-}
-
-// lastErr returns the last non-EOF error encountered.
-func (d *resumableStreamDecoder) lastErr() error {
- return d.err
-}
-
-// partialResultSetDecoder assembles PartialResultSet(s) into Cloud Spanner
-// Rows.
-type partialResultSetDecoder struct {
- row Row
- tx *sppb.Transaction
- chunked bool // if true, next value should be merged with last values
- // entry.
- ts time.Time // read timestamp
-}
-
-// yield checks we have a complete row, and if so returns it. A row is not
-// complete if it doesn't have enough columns, or if this is a chunked response
-// and there are no further values to process.
-func (p *partialResultSetDecoder) yield(chunked, last bool) *Row {
- if len(p.row.vals) == len(p.row.fields) && (!chunked || !last) {
- // When partialResultSetDecoder gets enough number of Column values.
- // There are two cases that a new Row should be yield:
- //
- // 1. The incoming PartialResultSet is not chunked;
- // 2. The incoming PartialResultSet is chunked, but the
- // proto3.Value being merged is not the last one in
- // the PartialResultSet.
- //
- // Use a fresh Row to simplify clients that want to use yielded results
- // after the next row is retrieved. Note that fields is never changed
- // so it doesn't need to be copied.
- fresh := Row{
- fields: p.row.fields,
- vals: make([]*proto3.Value, len(p.row.vals)),
- }
- copy(fresh.vals, p.row.vals)
- p.row.vals = p.row.vals[:0] // empty and reuse slice
- return &fresh
- }
- return nil
-}
-
-// yieldTx returns transaction information via caller supplied callback.
-func errChunkedEmptyRow() error {
- return spannerErrorf(codes.FailedPrecondition, "got invalid chunked PartialResultSet with empty Row")
-}
-
-// add tries to merge a new PartialResultSet into buffered Row. It returns any
-// rows that have been completed as a result.
-func (p *partialResultSetDecoder) add(r *sppb.PartialResultSet) ([]*Row, *sppb.ResultSetMetadata, error) {
- var rows []*Row
- if r.Metadata != nil {
- // Metadata should only be returned in the first result.
- if p.row.fields == nil {
- p.row.fields = r.Metadata.RowType.Fields
- }
- if p.tx == nil && r.Metadata.Transaction != nil {
- p.tx = r.Metadata.Transaction
- if p.tx.ReadTimestamp != nil {
- p.ts = time.Unix(p.tx.ReadTimestamp.Seconds, int64(p.tx.ReadTimestamp.Nanos))
- }
- }
- }
- if len(r.Values) == 0 {
- return nil, r.Metadata, nil
- }
- if p.chunked {
- p.chunked = false
- // Try to merge first value in r.Values into uncompleted row.
- last := len(p.row.vals) - 1
- if last < 0 { // confidence check
- return nil, nil, errChunkedEmptyRow()
- }
- var err error
- // If p is chunked, then we should always try to merge p.last with
- // r.first.
- if p.row.vals[last], err = p.merge(p.row.vals[last], r.Values[0]); err != nil {
- return nil, r.Metadata, err
- }
- r.Values = r.Values[1:]
- // Merge is done, try to yield a complete Row.
- if row := p.yield(r.ChunkedValue, len(r.Values) == 0); row != nil {
- rows = append(rows, row)
- }
- }
- for i, v := range r.Values {
- // The rest values in r can be appened into p directly.
- p.row.vals = append(p.row.vals, v)
- // Again, check to see if a complete Row can be yielded because of the
- // newly added value.
- if row := p.yield(r.ChunkedValue, i == len(r.Values)-1); row != nil {
- rows = append(rows, row)
- }
- }
- if r.ChunkedValue {
- // After dealing with all values in r, if r is chunked then p must be
- // also chunked.
- p.chunked = true
- }
- return rows, r.Metadata, nil
-}
-
-// isMergeable returns if a protobuf Value can be potentially merged with other
-// protobuf Values.
-func (p *partialResultSetDecoder) isMergeable(a *proto3.Value) bool {
- switch a.Kind.(type) {
- case *proto3.Value_StringValue:
- return true
- case *proto3.Value_ListValue:
- return true
- default:
- return false
- }
-}
-
-// errIncompatibleMergeTypes returns error for incompatible protobuf types that
-// cannot be merged by partialResultSetDecoder.
-func errIncompatibleMergeTypes(a, b *proto3.Value) error {
- return spannerErrorf(codes.FailedPrecondition, "incompatible type in chunked PartialResultSet. expected (%T), got (%T)", a.Kind, b.Kind)
-}
-
-// errUnsupportedMergeType returns error for protobuf type that cannot be merged
-// to other protobufs.
-func errUnsupportedMergeType(a *proto3.Value) error {
- return spannerErrorf(codes.FailedPrecondition, "unsupported type merge (%T)", a.Kind)
-}
-
-// merge tries to combine two protobuf Values if possible.
-func (p *partialResultSetDecoder) merge(a, b *proto3.Value) (*proto3.Value, error) {
- var err error
- typeErr := errIncompatibleMergeTypes(a, b)
- switch t := a.Kind.(type) {
- case *proto3.Value_StringValue:
- s, ok := b.Kind.(*proto3.Value_StringValue)
- if !ok {
- return nil, typeErr
- }
- return &proto3.Value{
- Kind: &proto3.Value_StringValue{StringValue: t.StringValue + s.StringValue},
- }, nil
- case *proto3.Value_ListValue:
- l, ok := b.Kind.(*proto3.Value_ListValue)
- if !ok {
- return nil, typeErr
- }
- if l.ListValue == nil || len(l.ListValue.Values) <= 0 {
- // b is an empty list, just return a.
- return a, nil
- }
- if t.ListValue == nil || len(t.ListValue.Values) <= 0 {
- // a is an empty list, just return b.
- return b, nil
- }
- if la := len(t.ListValue.Values) - 1; p.isMergeable(t.ListValue.Values[la]) {
- // When the last item in a is of type String, List or Struct
- // (encoded into List by Cloud Spanner), try to Merge last item in
- // a and first item in b.
- t.ListValue.Values[la], err = p.merge(t.ListValue.Values[la], l.ListValue.Values[0])
- if err != nil {
- return nil, err
- }
- l.ListValue.Values = l.ListValue.Values[1:]
- }
- return &proto3.Value{
- Kind: &proto3.Value_ListValue{
- ListValue: &proto3.ListValue{
- Values: append(t.ListValue.Values, l.ListValue.Values...),
- },
- },
- }, nil
- default:
- return nil, errUnsupportedMergeType(a)
- }
-
-}
-
-// Done returns if partialResultSetDecoder has already done with all buffered
-// values.
-func (p *partialResultSetDecoder) done() bool {
- // There is no explicit end of stream marker, but ending part way through a
- // row is obviously bad, or ending with the last column still awaiting
- // completion.
- return len(p.row.vals) == 0 && !p.chunked
-}
diff --git a/vendor/cloud.google.com/go/spanner/retry.go b/vendor/cloud.google.com/go/spanner/retry.go
deleted file mode 100644
index a140ce62e..000000000
--- a/vendor/cloud.google.com/go/spanner/retry.go
+++ /dev/null
@@ -1,156 +0,0 @@
-/*
-Copyright 2017 Google LLC
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-*/
-
-package spanner
-
-import (
- "context"
- "strings"
- "time"
-
- "cloud.google.com/go/internal/trace"
- "github.com/googleapis/gax-go/v2"
- "google.golang.org/genproto/googleapis/rpc/errdetails"
- "google.golang.org/grpc/codes"
- "google.golang.org/grpc/status"
-)
-
-const (
- retryInfoKey = "google.rpc.retryinfo-bin"
-)
-
-// DefaultRetryBackoff is used for retryers as a fallback value when the server
-// did not return any retry information.
-var DefaultRetryBackoff = gax.Backoff{
- Initial: 20 * time.Millisecond,
- Max: 32 * time.Second,
- Multiplier: 1.3,
-}
-
-// spannerRetryer extends the generic gax Retryer, but also checks for any
-// retry info returned by Cloud Spanner and uses that if present.
-type spannerRetryer struct {
- gax.Retryer
-}
-
-// onCodes returns a spannerRetryer that will retry on the specified error
-// codes. For Internal errors, only errors that have one of a list of known
-// descriptions should be retried.
-func onCodes(bo gax.Backoff, cc ...codes.Code) gax.Retryer {
- return &spannerRetryer{
- Retryer: gax.OnCodes(cc, bo),
- }
-}
-
-// Retry returns the retry delay returned by Cloud Spanner if that is present.
-// Otherwise it returns the retry delay calculated by the generic gax Retryer.
-func (r *spannerRetryer) Retry(err error) (time.Duration, bool) {
- if status.Code(err) == codes.Internal &&
- !strings.Contains(err.Error(), "stream terminated by RST_STREAM") &&
- // See b/25451313.
- !strings.Contains(err.Error(), "HTTP/2 error code: INTERNAL_ERROR") &&
- // See b/27794742.
- !strings.Contains(err.Error(), "Connection closed with unknown cause") &&
- !strings.Contains(err.Error(), "Received unexpected EOS on DATA frame from server") {
- return 0, false
- }
-
- delay, shouldRetry := r.Retryer.Retry(err)
- if !shouldRetry {
- return 0, false
- }
- if serverDelay, hasServerDelay := ExtractRetryDelay(err); hasServerDelay {
- delay = serverDelay
- }
- return delay, true
-}
-
-// runWithRetryOnAbortedOrFailedInlineBeginOrSessionNotFound executes the given function and
-// retries it if it returns an Aborted, Session not found error or certain Internal errors. The retry
-// is delayed if the error was Aborted or Internal error. The delay between retries is the delay
-// returned by Cloud Spanner, or if none is returned, the calculated delay with
-// a minimum of 10ms and maximum of 32s. There is no delay before the retry if
-// the error was Session not found or failed inline begin transaction.
-func runWithRetryOnAbortedOrFailedInlineBeginOrSessionNotFound(ctx context.Context, f func(context.Context) error) error {
- retryer := onCodes(DefaultRetryBackoff, codes.Aborted, codes.ResourceExhausted, codes.Internal)
- funcWithRetry := func(ctx context.Context) error {
- for {
- err := f(ctx)
- if err == nil {
- return nil
- }
- // Get Spanner or GRPC status error.
- // TODO(loite): Refactor to unwrap Status error instead of Spanner
- // error when statusError implements the (errors|xerrors).Wrapper
- // interface.
- var retryErr error
- var se *Error
- if errorAs(err, &se) {
- // It is a (wrapped) Spanner error. Use that to check whether
- // we should retry.
- retryErr = se
- } else {
- // It's not a Spanner error, check if it is a status error.
- _, ok := status.FromError(err)
- if !ok {
- return err
- }
- retryErr = err
- }
- if isSessionNotFoundError(retryErr) {
- trace.TracePrintf(ctx, nil, "Retrying after Session not found")
- continue
- }
- if isFailedInlineBeginTransaction(retryErr) {
- trace.TracePrintf(ctx, nil, "Retrying after failed inline begin transaction")
- continue
- }
- delay, shouldRetry := retryer.Retry(retryErr)
- if !shouldRetry {
- return err
- }
- trace.TracePrintf(ctx, nil, "Backing off after ABORTED for %s, then retrying", delay)
- if err := gax.Sleep(ctx, delay); err != nil {
- return err
- }
- }
- }
- return funcWithRetry(ctx)
-}
-
-// ExtractRetryDelay extracts retry backoff from a *spanner.Error if present.
-func ExtractRetryDelay(err error) (time.Duration, bool) {
- var se *Error
- var s *status.Status
- // Unwrap status error.
- if errorAs(err, &se) {
- s = status.Convert(se.Unwrap())
- } else {
- s = status.Convert(err)
- }
- if s == nil {
- return 0, false
- }
- for _, detail := range s.Details() {
- if retryInfo, ok := detail.(*errdetails.RetryInfo); ok {
- if !retryInfo.GetRetryDelay().IsValid() {
- return 0, false
- }
- return retryInfo.GetRetryDelay().AsDuration(), true
- }
- }
- return 0, false
-}
diff --git a/vendor/cloud.google.com/go/spanner/row.go b/vendor/cloud.google.com/go/spanner/row.go
deleted file mode 100644
index f61350435..000000000
--- a/vendor/cloud.google.com/go/spanner/row.go
+++ /dev/null
@@ -1,575 +0,0 @@
-/*
-Copyright 2017 Google LLC
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-*/
-
-package spanner
-
-import (
- "fmt"
- "reflect"
- "strings"
-
- sppb "cloud.google.com/go/spanner/apiv1/spannerpb"
- "google.golang.org/grpc/codes"
- proto3 "google.golang.org/protobuf/types/known/structpb"
-)
-
-// A Row is a view of a row of data returned by a Cloud Spanner read.
-// It consists of a number of columns; the number depends on the columns
-// used to construct the read.
-//
-// The column values can be accessed by index. For instance, if the read specified
-// []string{"photo_id", "caption"}, then each row will contain two
-// columns: "photo_id" with index 0, and "caption" with index 1.
-//
-// Column values are decoded by using one of the Column, ColumnByName, or
-// Columns methods. The valid values passed to these methods depend on the
-// column type. For example:
-//
-// var photoID int64
-// err := row.Column(0, &photoID) // Decode column 0 as an integer.
-//
-// var caption string
-// err := row.Column(1, &caption) // Decode column 1 as a string.
-//
-// // Decode all the columns.
-// err := row.Columns(&photoID, &caption)
-//
-// Supported types and their corresponding Cloud Spanner column type(s) are:
-//
-// *string(not NULL), *NullString - STRING
-// *[]string, *[]NullString - STRING ARRAY
-// *[]byte - BYTES
-// *[][]byte - BYTES ARRAY
-// *int64(not NULL), *NullInt64 - INT64
-// *[]int64, *[]NullInt64 - INT64 ARRAY
-// *bool(not NULL), *NullBool - BOOL
-// *[]bool, *[]NullBool - BOOL ARRAY
-// *float32(not NULL), *NullFloat32 - FLOAT32
-// *[]float32, *[]NullFloat32 - FLOAT32 ARRAY
-// *float64(not NULL), *NullFloat64 - FLOAT64
-// *[]float64, *[]NullFloat64 - FLOAT64 ARRAY
-// *big.Rat(not NULL), *NullNumeric - NUMERIC
-// *[]big.Rat, *[]NullNumeric - NUMERIC ARRAY
-// *time.Time(not NULL), *NullTime - TIMESTAMP
-// *[]time.Time, *[]NullTime - TIMESTAMP ARRAY
-// *Date(not NULL), *NullDate - DATE
-// *[]civil.Date, *[]NullDate - DATE ARRAY
-// *[]*some_go_struct, *[]NullRow - STRUCT ARRAY
-// *NullJSON - JSON
-// *[]NullJSON - JSON ARRAY
-// *GenericColumnValue - any Cloud Spanner type
-//
-// For TIMESTAMP columns, the returned time.Time object will be in UTC.
-//
-// To fetch an array of BYTES, pass a *[][]byte. To fetch an array of (sub)rows, pass
-// a *[]spanner.NullRow or a *[]*some_go_struct where some_go_struct holds all
-// information of the subrow, see spanner.Row.ToStruct for the mapping between a
-// Cloud Spanner row and a Go struct. To fetch an array of other types, pass a
-// *[]spanner.NullXXX type of the appropriate type. Use GenericColumnValue when you
-// don't know in advance what column type to expect.
-//
-// Row decodes the row contents lazily; as a result, each call to a getter has
-// a chance of returning an error.
-//
-// A column value may be NULL if the corresponding value is not present in
-// Cloud Spanner. The spanner.NullXXX types (spanner.NullInt64 et al.) allow fetching
-// values that may be null. A NULL BYTES can be fetched into a *[]byte as nil.
-// It is an error to fetch a NULL value into any other type.
-type Row struct {
- fields []*sppb.StructType_Field
- vals []*proto3.Value // keep decoded for now
-}
-
-// String implements fmt.stringer.
-func (r *Row) String() string {
- return fmt.Sprintf("{fields: %s, values: %s}", r.fields, r.vals)
-}
-
-// errNamesValuesMismatch returns error for when columnNames count is not equal
-// to columnValues count.
-func errNamesValuesMismatch(columnNames []string, columnValues []interface{}) error {
- return spannerErrorf(codes.FailedPrecondition,
- "different number of names(%v) and values(%v)", len(columnNames), len(columnValues))
-}
-
-// NewRow returns a Row containing the supplied data. This can be useful for
-// mocking Cloud Spanner Read and Query responses for unit testing.
-func NewRow(columnNames []string, columnValues []interface{}) (*Row, error) {
- if len(columnValues) != len(columnNames) {
- return nil, errNamesValuesMismatch(columnNames, columnValues)
- }
- r := Row{
- fields: make([]*sppb.StructType_Field, len(columnValues)),
- vals: make([]*proto3.Value, len(columnValues)),
- }
- for i := range columnValues {
- val, typ, err := encodeValue(columnValues[i])
- if err != nil {
- return nil, err
- }
- r.fields[i] = &sppb.StructType_Field{
- Name: columnNames[i],
- Type: typ,
- }
- r.vals[i] = val
- }
- return &r, nil
-}
-
-// Size is the number of columns in the row.
-func (r *Row) Size() int {
- return len(r.fields)
-}
-
-// ColumnName returns the name of column i, or empty string for invalid column.
-func (r *Row) ColumnName(i int) string {
- if i < 0 || i >= len(r.fields) {
- return ""
- }
- return r.fields[i].Name
-}
-
-// ColumnIndex returns the index of the column with the given name. The
-// comparison is case-sensitive.
-func (r *Row) ColumnIndex(name string) (int, error) {
- found := false
- var index int
- if len(r.vals) != len(r.fields) {
- return 0, errFieldsMismatchVals(r)
- }
- for i, f := range r.fields {
- if f == nil {
- return 0, errNilColType(i)
- }
- if name == f.Name {
- if found {
- return 0, errDupColName(name)
- }
- found = true
- index = i
- }
- }
- if !found {
- return 0, errColNotFound(name)
- }
- return index, nil
-}
-
-// ColumnNames returns all column names of the row.
-func (r *Row) ColumnNames() []string {
- var n []string
- for _, c := range r.fields {
- n = append(n, c.Name)
- }
- return n
-}
-
-// ColumnType returns the Cloud Spanner Type of column i, or nil for invalid column.
-func (r *Row) ColumnType(i int) *sppb.Type {
- if i < 0 || i >= len(r.fields) {
- return nil
- }
- return r.fields[i].Type
-}
-
-// ColumnValue returns the Cloud Spanner Value of column i, or nil for invalid column.
-func (r *Row) ColumnValue(i int) *proto3.Value {
- if i < 0 || i >= len(r.vals) {
- return nil
- }
- return r.vals[i]
-}
-
-// errColIdxOutOfRange returns error for requested column index is out of the
-// range of the target Row's columns.
-func errColIdxOutOfRange(i int, r *Row) error {
- return spannerErrorf(codes.OutOfRange, "column index %d out of range [0,%d)", i, len(r.vals))
-}
-
-// errDecodeColumn returns error for not being able to decode a indexed column.
-func errDecodeColumn(i int, err error) error {
- if err == nil {
- return nil
- }
- var se *Error
- if !errorAs(err, &se) {
- return spannerErrorf(codes.InvalidArgument, "failed to decode column %v, error = <%v>", i, err)
- }
- se.decorate(fmt.Sprintf("failed to decode column %v", i))
- return se
-}
-
-// errFieldsMismatchVals returns error for field count isn't equal to value count in a Row.
-func errFieldsMismatchVals(r *Row) error {
- return spannerErrorf(codes.FailedPrecondition, "row has different number of fields(%v) and values(%v)",
- len(r.fields), len(r.vals))
-}
-
-// errNilColType returns error for column type for column i being nil in the row.
-func errNilColType(i int) error {
- return spannerErrorf(codes.FailedPrecondition, "column(%v)'s type is nil", i)
-}
-
-// Column fetches the value from the ith column, decoding it into ptr.
-// See the Row documentation for the list of acceptable argument types.
-// see Client.ReadWriteTransaction for an example.
-func (r *Row) Column(i int, ptr interface{}) error {
- if len(r.vals) != len(r.fields) {
- return errFieldsMismatchVals(r)
- }
- if i < 0 || i >= len(r.fields) {
- return errColIdxOutOfRange(i, r)
- }
- if r.fields[i] == nil {
- return errNilColType(i)
- }
- if err := decodeValue(r.vals[i], r.fields[i].Type, ptr); err != nil {
- return errDecodeColumn(i, err)
- }
- return nil
-}
-
-// errDupColName returns error for duplicated column name in the same row.
-func errDupColName(n string) error {
- return spannerErrorf(codes.FailedPrecondition, "ambiguous column name %q", n)
-}
-
-// errColNotFound returns error for not being able to find a named column.
-func errColNotFound(n string) error {
- return spannerErrorf(codes.NotFound, "column %q not found", n)
-}
-
-func errNotASlicePointer() error {
- return spannerErrorf(codes.InvalidArgument, "destination must be a pointer to a slice")
-}
-
-func errNilSlicePointer() error {
- return spannerErrorf(codes.InvalidArgument, "destination must be a non nil pointer")
-}
-
-func errTooManyColumns() error {
- return spannerErrorf(codes.InvalidArgument, "too many columns returned for primitive slice")
-}
-
-// ColumnByName fetches the value from the named column, decoding it into ptr.
-// See the Row documentation for the list of acceptable argument types.
-func (r *Row) ColumnByName(name string, ptr interface{}) error {
- index, err := r.ColumnIndex(name)
- if err != nil {
- return err
- }
- return r.Column(index, ptr)
-}
-
-// errNumOfColValue returns error for providing wrong number of values to Columns.
-func errNumOfColValue(n int, r *Row) error {
- return spannerErrorf(codes.InvalidArgument,
- "Columns(): number of arguments (%d) does not match row size (%d)", n, len(r.vals))
-}
-
-// Columns fetches all the columns in the row at once.
-//
-// The value of the kth column will be decoded into the kth argument to Columns. See
-// Row for the list of acceptable argument types. The number of arguments must be
-// equal to the number of columns. Pass nil to specify that a column should be
-// ignored.
-func (r *Row) Columns(ptrs ...interface{}) error {
- if len(ptrs) != len(r.vals) {
- return errNumOfColValue(len(ptrs), r)
- }
- if len(r.vals) != len(r.fields) {
- return errFieldsMismatchVals(r)
- }
- for i, p := range ptrs {
- if p == nil {
- continue
- }
- if err := r.Column(i, p); err != nil {
- return err
- }
- }
- return nil
-}
-
-// errToStructArgType returns error for p not having the correct data type(pointer to Go struct) to
-// be the argument of Row.ToStruct.
-func errToStructArgType(p interface{}) error {
- return spannerErrorf(codes.InvalidArgument, "ToStruct(): type %T is not a valid pointer to Go struct", p)
-}
-
-// ToStruct fetches the columns in a row into the fields of a struct.
-// The rules for mapping a row's columns into a struct's exported fields
-// are:
-//
-// 1. If a field has a `spanner: "column_name"` tag, then decode column
-// 'column_name' into the field. A special case is the `spanner: "-"`
-// tag, which instructs ToStruct to ignore the field during decoding.
-//
-// 2. Otherwise, if the name of a field matches the name of a column (ignoring case),
-// decode the column into the field.
-//
-// 3. The number of columns in the row must match the number of exported fields in the struct.
-// There must be exactly one match for each column in the row. The method will return an error
-// if a column in the row cannot be assigned to a field in the struct.
-//
-// The fields of the destination struct can be of any type that is acceptable
-// to spanner.Row.Column.
-//
-// Slice and pointer fields will be set to nil if the source column is NULL, and a
-// non-nil value if the column is not NULL. To decode NULL values of other types, use
-// one of the spanner.NullXXX types as the type of the destination field.
-//
-// If ToStruct returns an error, the contents of p are undefined. Some fields may
-// have been successfully populated, while others were not; you should not use any of
-// the fields.
-func (r *Row) ToStruct(p interface{}) error {
- // Check if p is a pointer to a struct
- if t := reflect.TypeOf(p); t == nil || t.Kind() != reflect.Ptr || t.Elem().Kind() != reflect.Struct {
- return errToStructArgType(p)
- }
- if len(r.vals) != len(r.fields) {
- return errFieldsMismatchVals(r)
- }
- // Call decodeStruct directly to decode the row as a typed proto.ListValue.
- return decodeStruct(
- &sppb.StructType{Fields: r.fields},
- &proto3.ListValue{Values: r.vals},
- p,
- false,
- )
-}
-
-// ToStructLenient fetches the columns in a row into the fields of a struct.
-// The rules for mapping a row's columns into a struct's exported fields
-// are:
-//
-// 1. If a field has a `spanner: "column_name"` tag, then decode column
-// 'column_name' into the field. A special case is the `spanner: "-"`
-// tag, which instructs ToStruct to ignore the field during decoding.
-//
-// 2. Otherwise, if the name of a field matches the name of a column (ignoring case),
-// decode the column into the field.
-//
-// 3. The number of columns in the row and exported fields in the struct do not need to match.
-// Any field in the struct that cannot not be assigned a value from the row is assigned its default value.
-// Any column in the row that does not have a corresponding field in the struct is ignored.
-//
-// The fields of the destination struct can be of any type that is acceptable
-// to spanner.Row.Column.
-//
-// Slice and pointer fields will be set to nil if the source column is NULL, and a
-// non-nil value if the column is not NULL. To decode NULL values of other types, use
-// one of the spanner.NullXXX types as the type of the destination field.
-//
-// If ToStructLenient returns an error, the contents of p are undefined. Some fields may
-// have been successfully populated, while others were not; you should not use any of
-// the fields.
-func (r *Row) ToStructLenient(p interface{}) error {
- // Check if p is a pointer to a struct
- if t := reflect.TypeOf(p); t == nil || t.Kind() != reflect.Ptr || t.Elem().Kind() != reflect.Struct {
- return errToStructArgType(p)
- }
- if len(r.vals) != len(r.fields) {
- return errFieldsMismatchVals(r)
- }
- // Call decodeStruct directly to decode the row as a typed proto.ListValue.
- return decodeStruct(
- &sppb.StructType{Fields: r.fields},
- &proto3.ListValue{Values: r.vals},
- p,
- true,
- )
-}
-
-// SelectAll iterates all rows to the end. After iterating it closes the rows
-// and propagates any errors that could pop up with destination slice partially filled.
-// It expects that destination should be a slice. For each row, it scans data and appends it to the destination slice.
-// SelectAll supports both types of slices: slice of pointers and slice of structs or primitives by value,
-// for example:
-//
-// type Singer struct {
-// ID string
-// Name string
-// }
-//
-// var singersByPtr []*Singer
-// var singersByValue []Singer
-//
-// Both singersByPtr and singersByValue are valid destinations for SelectAll function.
-//
-// Add the option `spanner.WithLenient()` to instruct SelectAll to ignore additional columns in the rows that are not present in the destination struct.
-// example:
-//
-// var singersByPtr []*Singer
-// err := spanner.SelectAll(row, &singersByPtr, spanner.WithLenient())
-func SelectAll(rows rowIterator, destination interface{}, options ...DecodeOptions) error {
- if rows == nil {
- return fmt.Errorf("rows is nil")
- }
- if destination == nil {
- return fmt.Errorf("destination is nil")
- }
- dstVal := reflect.ValueOf(destination)
- if !dstVal.IsValid() || (dstVal.Kind() == reflect.Ptr && dstVal.IsNil()) {
- return errNilSlicePointer()
- }
- if dstVal.Kind() != reflect.Ptr {
- return errNotASlicePointer()
- }
- dstVal = dstVal.Elem()
- dstType := dstVal.Type()
- if k := dstType.Kind(); k != reflect.Slice {
- return errNotASlicePointer()
- }
-
- itemType := dstType.Elem()
- var itemByPtr bool
- // If it's a slice of pointers to structs,
- // we handle it the same way as it would be slice of struct by value
- // and dereference pointers to values,
- // because eventually we work with fields.
- // But if it's a slice of primitive type e.g. or []string or []*string,
- // we must leave and pass elements as is.
- if itemType.Kind() == reflect.Ptr {
- elementBaseTypeElem := itemType.Elem()
- if elementBaseTypeElem.Kind() == reflect.Struct {
- itemType = elementBaseTypeElem
- itemByPtr = true
- }
- }
- s := &decodeSetting{}
- for _, opt := range options {
- opt.Apply(s)
- }
-
- isPrimitive := itemType.Kind() != reflect.Struct
- var pointers []interface{}
- isFirstRow := true
- var err error
- return rows.Do(func(row *Row) error {
- sliceItem := reflect.New(itemType)
- if isFirstRow && !isPrimitive {
- defer func() {
- isFirstRow = false
- }()
- if pointers, err = structPointers(sliceItem.Elem(), row.fields, s.Lenient); err != nil {
- return err
- }
- } else if isPrimitive {
- if len(row.fields) > 1 && !s.Lenient {
- return errTooManyColumns()
- }
- pointers = []interface{}{sliceItem.Interface()}
- }
- if len(pointers) == 0 {
- return nil
- }
- err = row.Columns(pointers...)
- if err != nil {
- return err
- }
- if !isPrimitive {
- e := sliceItem.Elem()
- idx := 0
- for _, p := range pointers {
- if p == nil {
- continue
- }
- e.Field(idx).Set(reflect.ValueOf(p).Elem())
- idx++
- }
- }
- var elemVal reflect.Value
- if itemByPtr {
- if isFirstRow {
- // create a new pointer to the struct with all the values copied from sliceItem
- // because same underlying pointers array will be used for next rows
- elemVal = reflect.New(itemType)
- elemVal.Elem().Set(sliceItem.Elem())
- } else {
- elemVal = sliceItem
- }
- } else {
- elemVal = sliceItem.Elem()
- }
- dstVal.Set(reflect.Append(dstVal, elemVal))
- return nil
- })
-}
-
-func structPointers(sliceItem reflect.Value, cols []*sppb.StructType_Field, lenient bool) ([]interface{}, error) {
- pointers := make([]interface{}, 0, len(cols))
- fieldTag := make(map[string]reflect.Value, len(cols))
- initFieldTag(sliceItem, &fieldTag)
-
- for i, colName := range cols {
- if colName.Name == "" {
- return nil, errColNotFound(fmt.Sprintf("column %d", i))
- }
-
- var fieldVal reflect.Value
- if v, ok := fieldTag[strings.ToLower(colName.GetName())]; ok {
- fieldVal = v
- } else {
- if !lenient {
- return nil, errNoOrDupGoField(sliceItem, colName.GetName())
- }
- fieldVal = sliceItem.FieldByName(colName.GetName())
- }
- if !fieldVal.IsValid() || !fieldVal.CanSet() {
- // have to add if we found a column because Columns() requires
- // len(cols) arguments or it will error. This way we can scan to
- // a useless pointer
- pointers = append(pointers, nil)
- continue
- }
-
- pointers = append(pointers, fieldVal.Addr().Interface())
- }
- return pointers, nil
-}
-
-// Initialization the tags from struct.
-func initFieldTag(sliceItem reflect.Value, fieldTagMap *map[string]reflect.Value) {
- typ := sliceItem.Type()
-
- for i := 0; i < sliceItem.NumField(); i++ {
- fieldType := typ.Field(i)
- exported := (fieldType.PkgPath == "")
- // If a named field is unexported, ignore it. An anonymous
- // unexported field is processed, because it may contain
- // exported fields, which are visible.
- if !exported && !fieldType.Anonymous {
- continue
- }
- if fieldType.Type.Kind() == reflect.Struct {
- // found an embedded struct
- if fieldType.Anonymous {
- sliceItemOfAnonymous := sliceItem.Field(i)
- initFieldTag(sliceItemOfAnonymous, fieldTagMap)
- continue
- }
- }
- name, keep, _, _ := spannerTagParser(fieldType.Tag)
- if !keep {
- continue
- }
- if name == "" {
- name = fieldType.Name
- }
- (*fieldTagMap)[strings.ToLower(name)] = sliceItem.Field(i)
- }
-}
diff --git a/vendor/cloud.google.com/go/spanner/session.go b/vendor/cloud.google.com/go/spanner/session.go
deleted file mode 100644
index 3e587095a..000000000
--- a/vendor/cloud.google.com/go/spanner/session.go
+++ /dev/null
@@ -1,1971 +0,0 @@
-/*
-Copyright 2017 Google LLC
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-*/
-
-package spanner
-
-import (
- "container/heap"
- "container/list"
- "context"
- "fmt"
- "log"
- "math"
- "math/rand"
- "os"
- "runtime/debug"
- "strings"
- "sync"
- "time"
-
- "cloud.google.com/go/internal/trace"
- vkit "cloud.google.com/go/spanner/apiv1"
- sppb "cloud.google.com/go/spanner/apiv1/spannerpb"
- "cloud.google.com/go/spanner/internal"
- "go.opencensus.io/stats"
- "go.opencensus.io/tag"
- octrace "go.opencensus.io/trace"
- "go.opentelemetry.io/otel/attribute"
- "go.opentelemetry.io/otel/metric"
- "google.golang.org/grpc/codes"
- "google.golang.org/grpc/metadata"
-)
-
-const (
- healthCheckIntervalMins = 50
- multiplexSessionRefreshInterval = 7 * 24 * time.Hour
-)
-
-// ActionOnInactiveTransactionKind describes the kind of action taken when there are inactive transactions.
-type ActionOnInactiveTransactionKind int
-
-const (
- actionUnspecified ActionOnInactiveTransactionKind = iota
- // NoAction action does not perform any action on inactive transactions.
- NoAction
- // Warn action logs inactive transactions. Any inactive transaction gets logged only once.
- Warn
- // Close action closes inactive transactions without logging.
- Close
- // WarnAndClose action logs and closes the inactive transactions.
- WarnAndClose
-)
-
-// InactiveTransactionRemovalOptions has configurations for action on long-running transactions.
-type InactiveTransactionRemovalOptions struct {
- // ActionOnInactiveTransaction is the configuration to choose action for inactive transactions.
- // It can be one of Warn, Close, WarnAndClose.
- ActionOnInactiveTransaction ActionOnInactiveTransactionKind
- // long-running transactions will be cleaned up if utilisation is
- // greater than the below value.
- usedSessionsRatioThreshold float64
- // A transaction is considered to be idle if it has not been used for
- // a duration greater than the below value.
- idleTimeThreshold time.Duration
- // frequency for closing inactive transactions
- executionFrequency time.Duration
- // variable that keeps track of the last execution time when inactive transactions
- // were removed by the maintainer task.
- lastExecutionTime time.Time
-}
-
-// sessionHandle is an interface for transactions to access Cloud Spanner
-// sessions safely. It is generated by sessionPool.take().
-type sessionHandle struct {
- // mu guarantees that the inner session object is returned / destroyed only
- // once.
- mu sync.Mutex
- // session is a pointer to a session object. Transactions never need to
- // access it directly.
- session *session
- // client is the RPC channel to Cloud Spanner. It is set only once during session acquisition.
- client *vkit.Client
- // checkoutTime is the time the session was checked out of the pool.
- checkoutTime time.Time
- // lastUseTime is the time the session was last used after checked out of the pool.
- lastUseTime time.Time
- // trackedSessionHandle is the linked list node which links the session to
- // the list of tracked session handles. trackedSessionHandle is only set if
- // TrackSessionHandles has been enabled in the session pool configuration.
- trackedSessionHandle *list.Element
- // stack is the call stack of the goroutine that checked out the session
- // from the pool. This can be used to track down session leak problems.
- stack []byte
- // eligibleForLongRunning tells if the inner session is eligible to be long-running.
- eligibleForLongRunning bool
- // if the inner session object is long-running then the stack gets logged once.
- isSessionLeakLogged bool
-}
-
-// recycle gives the inner session object back to its home session pool. It is
-// safe to call recycle multiple times but only the first one would take effect.
-func (sh *sessionHandle) recycle() {
- sh.mu.Lock()
- if sh.session == nil {
- // sessionHandle has already been recycled.
- sh.mu.Unlock()
- return
- }
- p := sh.session.pool
- tracked := sh.trackedSessionHandle
- s := sh.session
- sh.session = nil
- sh.client = nil
- sh.trackedSessionHandle = nil
- sh.checkoutTime = time.Time{}
- sh.lastUseTime = time.Time{}
- sh.stack = nil
- sh.mu.Unlock()
- s.recycle()
- if tracked != nil {
- p.mu.Lock()
- p.trackedSessionHandles.Remove(tracked)
- p.mu.Unlock()
- }
-}
-
-// getID gets the Cloud Spanner session ID from the internal session object.
-// getID returns empty string if the sessionHandle is nil or the inner session
-// object has been released by recycle / destroy.
-func (sh *sessionHandle) getID() string {
- sh.mu.Lock()
- defer sh.mu.Unlock()
- if sh.session == nil {
- // sessionHandle has already been recycled/destroyed.
- return ""
- }
- return sh.session.getID()
-}
-
-// getClient gets the Cloud Spanner RPC client associated with the session ID
-// in sessionHandle.
-func (sh *sessionHandle) getClient() *vkit.Client {
- sh.mu.Lock()
- defer sh.mu.Unlock()
- if sh.session == nil {
- return nil
- }
- if sh.client != nil {
- // Use the gRPC connection from the session handle
- return sh.client
- }
- return sh.session.client
-}
-
-// getMetadata returns the metadata associated with the session in sessionHandle.
-func (sh *sessionHandle) getMetadata() metadata.MD {
- sh.mu.Lock()
- defer sh.mu.Unlock()
- if sh.session == nil {
- return nil
- }
- return sh.session.md
-}
-
-// getTransactionID returns the transaction id in the session if available.
-func (sh *sessionHandle) getTransactionID() transactionID {
- sh.mu.Lock()
- defer sh.mu.Unlock()
- if sh.session == nil {
- return nil
- }
- return sh.session.tx
-}
-
-// destroy destroys the inner session object. It is safe to call destroy
-// multiple times and only the first call would attempt to
-// destroy the inner session object.
-func (sh *sessionHandle) destroy() {
- sh.mu.Lock()
- s := sh.session
- if s == nil {
- // sessionHandle has already been recycled.
- sh.mu.Unlock()
- return
- }
- tracked := sh.trackedSessionHandle
- sh.session = nil
- sh.client = nil
- sh.trackedSessionHandle = nil
- sh.checkoutTime = time.Time{}
- sh.lastUseTime = time.Time{}
- sh.stack = nil
- sh.mu.Unlock()
-
- if tracked != nil {
- p := s.pool
- p.mu.Lock()
- p.trackedSessionHandles.Remove(tracked)
- p.mu.Unlock()
- }
- // since sessionHandle is always used by Transactions we can safely destroy the session with wasInUse=true
- s.destroy(false, true)
-}
-
-func (sh *sessionHandle) updateLastUseTime() {
- sh.mu.Lock()
- defer sh.mu.Unlock()
- if sh.session != nil {
- sh.lastUseTime = time.Now()
- }
-}
-
-// session wraps a Cloud Spanner session ID through which transactions are
-// created and executed.
-type session struct {
- // client is the RPC channel to Cloud Spanner. It is set only once during
- // session's creation.
- client *vkit.Client
- // id is the unique id of the session in Cloud Spanner. It is set only once
- // during session's creation.
- id string
- // pool is the session's home session pool where it was created. It is set
- // only once during session's creation.
- pool *sessionPool
- // createTime is the timestamp of the session's creation. It is set only
- // once during session's creation.
- createTime time.Time
- // logger is the logger configured for the Spanner client that created the
- // session. If nil, logging will be directed to the standard logger.
- logger *log.Logger
-
- // mu protects the following fields from concurrent access: both
- // healthcheck workers and transactions can modify them.
- mu sync.Mutex
- // valid marks the validity of a session.
- valid bool
- // hcIndex is the index of the session inside the global healthcheck queue.
- // If hcIndex < 0, session has been unregistered from the queue.
- hcIndex int
- // idleList is the linkedlist node which links the session to its home
- // session pool's idle list. If idleList == nil, the
- // session is not in idle list.
- idleList *list.Element
- // nextCheck is the timestamp of next scheduled healthcheck of the session.
- // It is maintained by the global health checker.
- nextCheck time.Time
- // checkingHelath is true if currently this session is being processed by
- // health checker. Must be modified under health checker lock.
- checkingHealth bool
- // md is the Metadata to be sent with each request.
- md metadata.MD
- // tx contains the transaction id if the session has been prepared for
- // write.
- tx transactionID
- // firstHCDone indicates whether the first health check is done or not.
- firstHCDone bool
- // isMultiplexed is true if the session is multiplexed.
- isMultiplexed bool
-}
-
-// isValid returns true if the session is still valid for use.
-func (s *session) isValid() bool {
- s.mu.Lock()
- defer s.mu.Unlock()
- return s.valid
-}
-
-// isWritePrepared returns true if the session is prepared for write.
-func (s *session) isWritePrepared() bool {
- s.mu.Lock()
- defer s.mu.Unlock()
- return s.tx != nil
-}
-
-// String implements fmt.Stringer for session.
-func (s *session) String() string {
- s.mu.Lock()
- defer s.mu.Unlock()
- return fmt.Sprintf("<id=%v, hcIdx=%v, idleList=%p, valid=%v, create=%v, nextcheck=%v>",
- s.id, s.hcIndex, s.idleList, s.valid, s.createTime, s.nextCheck)
-}
-
-// ping verifies if the session is still alive in Cloud Spanner.
-func (s *session) ping() error {
- ctx, cancel := context.WithTimeout(context.Background(), time.Second)
- defer cancel()
-
- // Start parent span that doesn't record.
- _, span := octrace.StartSpan(ctx, "cloud.google.com/go/spanner.ping", octrace.WithSampler(octrace.NeverSample()))
- defer span.End()
-
- // s.getID is safe even when s is invalid.
- _, err := s.client.ExecuteSql(contextWithOutgoingMetadata(ctx, s.md, true), &sppb.ExecuteSqlRequest{
- Session: s.getID(),
- Sql: "SELECT 1",
- })
- return err
-}
-
-// setHcIndex atomically sets the session's index in the healthcheck queue and
-// returns the old index.
-func (s *session) setHcIndex(i int) int {
- s.mu.Lock()
- defer s.mu.Unlock()
- oi := s.hcIndex
- s.hcIndex = i
- return oi
-}
-
-// setIdleList atomically sets the session's idle list link and returns the old
-// link.
-func (s *session) setIdleList(le *list.Element) *list.Element {
- s.mu.Lock()
- defer s.mu.Unlock()
- old := s.idleList
- s.idleList = le
- return old
-}
-
-// invalidate marks a session as invalid and returns the old validity.
-func (s *session) invalidate() bool {
- s.mu.Lock()
- defer s.mu.Unlock()
- ov := s.valid
- s.valid = false
- return ov
-}
-
-// setNextCheck sets the timestamp for next healthcheck on the session.
-func (s *session) setNextCheck(t time.Time) {
- s.mu.Lock()
- defer s.mu.Unlock()
- s.nextCheck = t
-}
-
-// setTransactionID sets the transaction id in the session
-func (s *session) setTransactionID(tx transactionID) {
- s.mu.Lock()
- defer s.mu.Unlock()
- s.tx = tx
-}
-
-// getID returns the session ID which uniquely identifies the session in Cloud
-// Spanner.
-func (s *session) getID() string {
- s.mu.Lock()
- defer s.mu.Unlock()
- return s.id
-}
-
-// getHcIndex returns the session's index into the global healthcheck priority
-// queue.
-func (s *session) getHcIndex() int {
- s.mu.Lock()
- defer s.mu.Unlock()
- return s.hcIndex
-}
-
-// getIdleList returns the session's link in its home session pool's idle list.
-func (s *session) getIdleList() *list.Element {
- s.mu.Lock()
- defer s.mu.Unlock()
- return s.idleList
-}
-
-// getNextCheck returns the timestamp for next healthcheck on the session.
-func (s *session) getNextCheck() time.Time {
- s.mu.Lock()
- defer s.mu.Unlock()
- return s.nextCheck
-}
-
-// recycle turns the session back to its home session pool.
-func (s *session) recycle() {
- s.setTransactionID(nil)
- s.pool.mu.Lock()
- if s.isMultiplexed {
- s.pool.decNumMultiplexedInUseLocked(context.Background())
- s.pool.mu.Unlock()
- return
- }
- if !s.pool.recycleLocked(s) {
- // s is rejected by its home session pool because it expired and the
- // session pool currently has enough open sessions.
- s.pool.mu.Unlock()
- s.destroy(false, true)
- s.pool.mu.Lock()
- }
- s.pool.decNumInUseLocked(context.Background())
- s.pool.mu.Unlock()
-}
-
-// destroy removes the session from its home session pool, healthcheck queue
-// and Cloud Spanner service.
-func (s *session) destroy(isExpire, wasInUse bool) bool {
- ctx, cancel := context.WithTimeout(context.Background(), 15*time.Second)
- defer cancel()
- return s.destroyWithContext(ctx, isExpire, wasInUse)
-}
-
-func (s *session) destroyWithContext(ctx context.Context, isExpire, wasInUse bool) bool {
- // Remove s from session pool.
- if !s.pool.remove(s, isExpire, wasInUse) {
- return false
- }
- // Unregister s from healthcheck queue.
- s.pool.hc.unregister(s)
- return true
-}
-
-func (s *session) delete(ctx context.Context) {
- // Ignore the error because even if we fail to explicitly destroy the
- // session, it will be eventually garbage collected by Cloud Spanner.
- err := s.client.DeleteSession(contextWithOutgoingMetadata(ctx, s.md, true), &sppb.DeleteSessionRequest{Name: s.getID()})
- // Do not log DeadlineExceeded errors when deleting sessions, as these do
- // not indicate anything the user can or should act upon.
- if err != nil && ErrCode(err) != codes.DeadlineExceeded {
- logf(s.logger, "Failed to delete session %v. Error: %v", s.getID(), err)
- }
-}
-
-// SessionPoolConfig stores configurations of a session pool.
-type SessionPoolConfig struct {
- // MaxOpened is the maximum number of opened sessions allowed by the session
- // pool. If the client tries to open a session and there are already
- // MaxOpened sessions, it will block until one becomes available or the
- // context passed to the client method is canceled or times out.
- //
- // Defaults to NumChannels * 100.
- MaxOpened uint64
-
- // MinOpened is the minimum number of opened sessions that the session pool
- // tries to maintain. Session pool won't continue to expire sessions if
- // number of opened connections drops below MinOpened. However, if a session
- // is found to be broken, it will still be evicted from the session pool,
- // therefore it is posssible that the number of opened sessions drops below
- // MinOpened.
- //
- // Defaults to 100.
- MinOpened uint64
-
- // MaxIdle is the maximum number of idle sessions that are allowed in the
- // session pool.
- //
- // Defaults to 0.
- MaxIdle uint64
-
- // MaxBurst is the maximum number of concurrent session creation requests.
- //
- // Deprecated: MaxBurst exists for historical compatibility and should not
- // be used. MaxBurst was used to limit the number of sessions that the
- // session pool could create within a time frame. This was an early safety
- // valve to prevent a client from overwhelming the backend if a large number
- // of sessions was suddenly needed. The session pool would then pause the
- // creation of sessions for a while. Such a pause is no longer needed and
- // the implementation has been removed from the pool.
- //
- // Defaults to 10.
- MaxBurst uint64
-
- // incStep is the number of sessions to create in one batch when at least
- // one more session is needed.
- //
- // Defaults to 25.
- incStep uint64
-
- // WriteSessions is the fraction of sessions we try to keep prepared for
- // write.
- //
- // Deprecated: The session pool no longer prepares a fraction of the sessions with a read/write transaction.
- // This setting therefore does not have any meaning anymore, and may be removed in the future.
- //
- // Defaults to 0.2.
- WriteSessions float64
-
- // HealthCheckWorkers is number of workers used by health checker for this
- // pool.
- //
- // Defaults to 10.
- HealthCheckWorkers int
-
- // HealthCheckInterval is how often the health checker pings a session.
- //
- // Defaults to 50m.
- HealthCheckInterval time.Duration
-
- // MultiplexSessionCheckInterval is the interval at which the multiplexed session is checked whether it needs to be refreshed.
- //
- // Defaults to 10 mins.
- MultiplexSessionCheckInterval time.Duration
-
- // TrackSessionHandles determines whether the session pool will keep track
- // of the stacktrace of the goroutines that take sessions from the pool.
- // This setting can be used to track down session leak problems.
- //
- // Defaults to false.
- TrackSessionHandles bool
-
- // healthCheckSampleInterval is how often the health checker samples live
- // session (for use in maintaining session pool size).
- //
- // Defaults to 1m.
- healthCheckSampleInterval time.Duration
-
- // sessionLabels for the sessions created in the session pool.
- sessionLabels map[string]string
-
- InactiveTransactionRemovalOptions
-}
-
-// DefaultSessionPoolConfig is the default configuration for the session pool
-// that will be used for a Spanner client, unless the user supplies a specific
-// session pool config.
-var DefaultSessionPoolConfig = SessionPoolConfig{
- MinOpened: 100,
- MaxOpened: numChannels * 100,
- MaxBurst: 10,
- incStep: 25,
- WriteSessions: 0.2,
- HealthCheckWorkers: 10,
- HealthCheckInterval: healthCheckIntervalMins * time.Minute,
- InactiveTransactionRemovalOptions: InactiveTransactionRemovalOptions{
- ActionOnInactiveTransaction: Warn,
- executionFrequency: 2 * time.Minute,
- idleTimeThreshold: 60 * time.Minute,
- usedSessionsRatioThreshold: 0.95,
- },
-}
-
-// errMinOpenedGTMapOpened returns error for SessionPoolConfig.MaxOpened < SessionPoolConfig.MinOpened when SessionPoolConfig.MaxOpened is set.
-func errMinOpenedGTMaxOpened(maxOpened, minOpened uint64) error {
- return spannerErrorf(codes.InvalidArgument,
- "require SessionPoolConfig.MaxOpened >= SessionPoolConfig.MinOpened, got %d and %d", maxOpened, minOpened)
-}
-
-// errWriteFractionOutOfRange returns error for
-// SessionPoolConfig.WriteFraction < 0 or SessionPoolConfig.WriteFraction > 1
-func errWriteFractionOutOfRange(writeFraction float64) error {
- return spannerErrorf(codes.InvalidArgument,
- "require SessionPoolConfig.WriteSessions >= 0.0 && SessionPoolConfig.WriteSessions <= 1.0, got %.2f", writeFraction)
-}
-
-// errHealthCheckWorkersNegative returns error for
-// SessionPoolConfig.HealthCheckWorkers < 0
-func errHealthCheckWorkersNegative(workers int) error {
- return spannerErrorf(codes.InvalidArgument,
- "require SessionPoolConfig.HealthCheckWorkers >= 0, got %d", workers)
-}
-
-// errHealthCheckIntervalNegative returns error for
-// SessionPoolConfig.HealthCheckInterval < 0
-func errHealthCheckIntervalNegative(interval time.Duration) error {
- return spannerErrorf(codes.InvalidArgument,
- "require SessionPoolConfig.HealthCheckInterval >= 0, got %v", interval)
-}
-
-// validate verifies that the SessionPoolConfig is good for use.
-func (spc *SessionPoolConfig) validate() error {
- if spc.MinOpened > spc.MaxOpened && spc.MaxOpened > 0 {
- return errMinOpenedGTMaxOpened(spc.MaxOpened, spc.MinOpened)
- }
- if spc.HealthCheckWorkers < 0 {
- return errHealthCheckWorkersNegative(spc.HealthCheckWorkers)
- }
- if spc.HealthCheckInterval < 0 {
- return errHealthCheckIntervalNegative(spc.HealthCheckInterval)
- }
- return nil
-}
-
-type muxSessionCreateRequest struct {
- ctx context.Context
- force bool
-}
-
-// sessionPool creates and caches Cloud Spanner sessions.
-type sessionPool struct {
- // mu protects sessionPool from concurrent access.
- mu sync.Mutex
- // valid marks the validity of the session pool.
- valid bool
- // sc is used to create the sessions for the pool.
- sc *sessionClient
- // trackedSessionHandles contains all sessions handles that have been
- // checked out of the pool. The list is only filled if TrackSessionHandles
- // has been enabled.
- trackedSessionHandles list.List
- // idleList caches idle session IDs. Session IDs in this list can be
- // allocated for use.
- idleList list.List
- // multiplexSessionClientCounter is the counter for the multiplexed session client.
- multiplexSessionClientCounter int
- // clientPool is a pool of Cloud Spanner grpc clients.
- clientPool []*vkit.Client
- // multiplexedSession contains the multiplexed session
- multiplexedSession *session
- // mayGetSession is for broadcasting that session retrival/creation may
- // proceed.
- mayGetSession chan struct{}
- // multiplexedSessionReq is the ongoing multiplexed session creation request (if any).
- multiplexedSessionReq chan muxSessionCreateRequest
- // mayGetMultiplexedSession is for broadcasting that multiplexed session retrieval is possible.
- mayGetMultiplexedSession chan bool
- // sessionCreationError is the last error that occurred during session
- // creation and is propagated to any waiters waiting for a session.
- sessionCreationError error
- // multiplexedSessionCreationError is the error that occurred during multiplexed session
- // creation for the first time and is propagated to any waiters waiting for a session.
- multiplexedSessionCreationError error
- // numOpened is the total number of open sessions from the session pool.
- numOpened uint64
- // createReqs is the number of ongoing session creation requests.
- createReqs uint64
- // numWaiters is the number of processes waiting for a session to
- // become available.
- numWaiters uint64
- // disableBackgroundPrepareSessions indicates that the BeginTransaction
- // call for a read/write transaction failed with a permanent error, such as
- // PermissionDenied or `Database not found`. Further background calls to
- // prepare sessions will be disabled.
- disableBackgroundPrepareSessions bool
- // configuration of the session pool.
- SessionPoolConfig
- // hc is the health checker
- hc *healthChecker
- // rand is a separately sourced random generator.
- rand *rand.Rand
- // numInUse is the number of sessions that are currently in use (checked out
- // from the session pool).
- numInUse uint64
- // maxNumInUse is the maximum number of sessions in use concurrently in the
- // current 10 minute interval.
- maxNumInUse uint64
- // lastResetTime is the start time of the window for recording maxNumInUse.
- lastResetTime time.Time
- // numSessions is the number of sessions that are idle for read/write.
- numSessions uint64
-
- // mw is the maintenance window containing statistics for the max number of
- // sessions checked out of the pool during the last 10 minutes.
- mw *maintenanceWindow
-
- // tagMap is a map of all tags that are associated with the emitted metrics.
- tagMap *tag.Map
-
- // indicates the number of leaked sessions removed from the session pool.
- // This is valid only when ActionOnInactiveTransaction is WarnAndClose or ActionOnInactiveTransaction is Close in InactiveTransactionRemovalOptions.
- numOfLeakedSessionsRemoved uint64
-
- otConfig *openTelemetryConfig
-
- // enableMultiplexSession is a flag to enable multiplexed session.
- enableMultiplexSession bool
-}
-
-// newSessionPool creates a new session pool.
-func newSessionPool(sc *sessionClient, config SessionPoolConfig) (*sessionPool, error) {
- if err := config.validate(); err != nil {
- return nil, err
- }
- if config.HealthCheckWorkers == 0 {
- // With 10 workers and assuming average latency of 5ms for
- // BeginTransaction, we will be able to prepare 2000 tx/sec in advance.
- // If the rate of takeWriteSession is more than that, it will degrade to
- // doing BeginTransaction inline.
- //
- // TODO: consider resizing the worker pool dynamically according to the load.
- config.HealthCheckWorkers = 10
- }
- if config.HealthCheckInterval == 0 {
- config.HealthCheckInterval = healthCheckIntervalMins * time.Minute
- }
- if config.healthCheckSampleInterval == 0 {
- config.healthCheckSampleInterval = time.Minute
- }
- if config.ActionOnInactiveTransaction == actionUnspecified {
- config.ActionOnInactiveTransaction = DefaultSessionPoolConfig.ActionOnInactiveTransaction
- }
- if config.idleTimeThreshold == 0 {
- config.idleTimeThreshold = DefaultSessionPoolConfig.idleTimeThreshold
- }
- if config.executionFrequency == 0 {
- config.executionFrequency = DefaultSessionPoolConfig.executionFrequency
- }
- if config.usedSessionsRatioThreshold == 0 {
- config.usedSessionsRatioThreshold = DefaultSessionPoolConfig.usedSessionsRatioThreshold
- }
- if config.MultiplexSessionCheckInterval == 0 {
- config.MultiplexSessionCheckInterval = 10 * time.Minute
- }
- isMultiplexed := strings.ToLower(os.Getenv("GOOGLE_CLOUD_SPANNER_MULTIPLEXED_SESSIONS"))
- if isMultiplexed != "" && isMultiplexed != "true" && isMultiplexed != "false" {
- return nil, spannerErrorf(codes.InvalidArgument, "GOOGLE_CLOUD_SPANNER_MULTIPLEXED_SESSIONS must be either true or false")
- }
- pool := &sessionPool{
- sc: sc,
- valid: true,
- mayGetSession: make(chan struct{}),
- mayGetMultiplexedSession: make(chan bool),
- multiplexedSessionReq: make(chan muxSessionCreateRequest),
- SessionPoolConfig: config,
- mw: newMaintenanceWindow(config.MaxOpened),
- rand: rand.New(rand.NewSource(time.Now().UnixNano())),
- otConfig: sc.otConfig,
- enableMultiplexSession: isMultiplexed == "true",
- }
-
- _, instance, database, err := parseDatabaseName(sc.database)
- if err != nil {
- return nil, err
- }
- // Errors should not prevent initializing the session pool.
- ctx, err := tag.New(context.Background(),
- tag.Upsert(tagKeyClientID, sc.id),
- tag.Upsert(tagKeyDatabase, database),
- tag.Upsert(tagKeyInstance, instance),
- tag.Upsert(tagKeyLibVersion, internal.Version),
- )
- if err != nil {
- logf(pool.sc.logger, "Failed to create tag map, error: %v", err)
- }
- pool.tagMap = tag.FromContext(ctx)
-
- // On GCE VM, within the same region an healthcheck ping takes on average
- // 10ms to finish, given a 5 minutes interval and 10 healthcheck workers, a
- // healthChecker can effectively mantain
- // 100 checks_per_worker/sec * 10 workers * 300 seconds = 300K sessions.
- pool.hc = newHealthChecker(config.HealthCheckInterval, config.MultiplexSessionCheckInterval, config.HealthCheckWorkers, config.healthCheckSampleInterval, pool)
-
- // First initialize the pool before we indicate that the healthchecker is
- // ready. This prevents the maintainer from starting before the pool has
- // been initialized, which means that we guarantee that the initial
- // sessions are created using BatchCreateSessions.
- if config.MinOpened > 0 {
- numSessions := minUint64(config.MinOpened, math.MaxInt32)
- if err := pool.initPool(numSessions); err != nil {
- return nil, err
- }
- }
- if pool.enableMultiplexSession {
- go pool.createMultiplexedSession()
- ctx, cancel := context.WithTimeout(ctx, 30*time.Second)
- pool.multiplexedSessionReq <- muxSessionCreateRequest{force: true, ctx: ctx}
- // listen for the session to be created
- go func() {
- select {
- case <-ctx.Done():
- cancel()
- return
- // wait for the session to be created
- case <-pool.mayGetMultiplexedSession:
- }
- return
- }()
- }
- pool.recordStat(context.Background(), MaxAllowedSessionsCount, int64(config.MaxOpened))
-
- err = registerSessionPoolOTMetrics(pool)
- if err != nil {
- logf(pool.sc.logger, "Error when registering session pool metrics in OpenTelemetry, error: %v", err)
- }
-
- close(pool.hc.ready)
- return pool, nil
-}
-
-func (p *sessionPool) recordStat(ctx context.Context, m *stats.Int64Measure, n int64, tags ...tag.Tag) {
- ctx = tag.NewContext(ctx, p.tagMap)
- mutators := make([]tag.Mutator, len(tags))
- for i, t := range tags {
- mutators[i] = tag.Upsert(t.Key, t.Value)
- }
- ctx, err := tag.New(ctx, mutators...)
- if err != nil {
- logf(p.sc.logger, "Failed to tag metrics, error: %v", err)
- }
- recordStat(ctx, m, n)
-}
-
-type recordOTStatOption struct {
- attr []attribute.KeyValue
-}
-
-func (p *sessionPool) recordOTStat(ctx context.Context, m metric.Int64Counter, val int64, option recordOTStatOption) {
- if m != nil {
- attrs := p.otConfig.attributeMap
- if len(option.attr) > 0 {
- attrs = option.attr
- }
- m.Add(ctx, val, metric.WithAttributes(attrs...))
- }
-}
-
-func (p *sessionPool) getRatioOfSessionsInUseLocked() float64 {
- maxSessions := p.MaxOpened
- if maxSessions == 0 {
- return 0
- }
- return float64(p.numInUse) / float64(maxSessions)
-}
-
-// gets sessions which are unexpectedly long-running.
-func (p *sessionPool) getLongRunningSessionsLocked() []*sessionHandle {
- usedSessionsRatio := p.getRatioOfSessionsInUseLocked()
- var longRunningSessions []*sessionHandle
- if usedSessionsRatio > p.usedSessionsRatioThreshold {
- element := p.trackedSessionHandles.Front()
- for element != nil {
- sh := element.Value.(*sessionHandle)
- sh.mu.Lock()
- if sh.session == nil {
- // sessionHandle has already been recycled/destroyed.
- sh.mu.Unlock()
- element = element.Next()
- continue
- }
- diff := time.Since(sh.lastUseTime)
- if !sh.eligibleForLongRunning && diff.Seconds() >= p.idleTimeThreshold.Seconds() {
- if (p.ActionOnInactiveTransaction == Warn || p.ActionOnInactiveTransaction == WarnAndClose) && !sh.isSessionLeakLogged {
- if p.ActionOnInactiveTransaction == Warn {
- if sh.stack != nil {
- logf(p.sc.logger, "session %s checked out of pool at %s is long running due to possible session leak for goroutine: \n%s", sh.session.getID(), sh.checkoutTime.Format(time.RFC3339), sh.stack)
- } else {
- logf(p.sc.logger, "session %s checked out of pool at %s is long running due to possible session leak for goroutine: \nEnable SessionPoolConfig.TrackSessionHandles to get stack trace associated with the session", sh.session.getID(), sh.checkoutTime.Format(time.RFC3339))
- }
- sh.isSessionLeakLogged = true
- } else if p.ActionOnInactiveTransaction == WarnAndClose {
- if sh.stack != nil {
- logf(p.sc.logger, "session %s checked out of pool at %s is long running and will be removed due to possible session leak for goroutine: \n%s", sh.session.getID(), sh.checkoutTime.Format(time.RFC3339), sh.stack)
- } else {
- logf(p.sc.logger, "session %s checked out of pool at %s is long running and will be removed due to possible session leak for goroutine: \nEnable SessionPoolConfig.TrackSessionHandles to get stack trace associated with the session", sh.session.getID(), sh.checkoutTime.Format(time.RFC3339))
- }
- }
- }
- if p.ActionOnInactiveTransaction == WarnAndClose || p.ActionOnInactiveTransaction == Close {
- longRunningSessions = append(longRunningSessions, sh)
- }
- }
- sh.mu.Unlock()
- element = element.Next()
- }
- }
- return longRunningSessions
-}
-
-// removes or logs sessions that are unexpectedly long-running.
-func (p *sessionPool) removeLongRunningSessions() {
- p.mu.Lock()
- longRunningSessions := p.getLongRunningSessionsLocked()
- p.mu.Unlock()
-
- // destroy long-running sessions
- if p.ActionOnInactiveTransaction == WarnAndClose || p.ActionOnInactiveTransaction == Close {
- var leakedSessionsRemovedCount uint64
- for _, sh := range longRunningSessions {
- // removes inner session out of the pool to reduce the probability of two processes trying
- // to use the same session at the same time.
- sh.destroy()
- leakedSessionsRemovedCount++
- }
- p.mu.Lock()
- p.numOfLeakedSessionsRemoved += leakedSessionsRemovedCount
- p.mu.Unlock()
- }
-}
-
-func (p *sessionPool) initPool(numSessions uint64) error {
- p.mu.Lock()
- defer p.mu.Unlock()
- return p.growPoolLocked(numSessions, true)
-}
-
-func (p *sessionPool) growPoolLocked(numSessions uint64, distributeOverChannels bool) error {
- // Take budget before the actual session creation.
- numSessions = minUint64(numSessions, math.MaxInt32)
- p.numOpened += uint64(numSessions)
- p.recordStat(context.Background(), OpenSessionCount, int64(p.numOpened))
- p.createReqs += uint64(numSessions)
- // Asynchronously create a batch of sessions for the pool.
- return p.sc.batchCreateSessions(int32(numSessions), distributeOverChannels, p)
-}
-
-func (p *sessionPool) createMultiplexedSession() {
- for c := range p.multiplexedSessionReq {
- p.mu.Lock()
- sess := p.multiplexedSession
- p.mu.Unlock()
- if c.force || sess == nil {
- p.mu.Lock()
- p.sc.mu.Lock()
- client, err := p.sc.nextClient()
- p.sc.mu.Unlock()
- p.mu.Unlock()
- if err != nil {
- // If we can't get a client, we can't create a session.
- p.mu.Lock()
- p.multiplexedSessionCreationError = err
- p.mu.Unlock()
- p.mayGetMultiplexedSession <- true
- continue
- }
- p.sc.executeCreateMultiplexedSession(c.ctx, client, p.sc.md, p)
- continue
- }
- select {
- case p.mayGetMultiplexedSession <- true:
- case <-c.ctx.Done():
- return
- }
- }
-}
-
-// sessionReady is executed by the SessionClient when a session has been
-// created and is ready to use. This method will add the new session to the
-// pool and decrease the number of sessions that is being created.
-func (p *sessionPool) sessionReady(ctx context.Context, s *session) {
- p.mu.Lock()
- defer p.mu.Unlock()
- // Clear any session creation error.
- if s.isMultiplexed {
- s.pool = p
- p.multiplexedSession = s
- p.multiplexedSessionCreationError = nil
- p.recordStat(context.Background(), OpenSessionCount, int64(1), tag.Tag{Key: tagKeyIsMultiplexed, Value: "true"})
- p.recordStat(context.Background(), SessionsCount, 1, tagNumSessions, tag.Tag{Key: tagKeyIsMultiplexed, Value: "true"})
- // either notify the waiting goroutine or skip if no one is waiting
- select {
- case p.mayGetMultiplexedSession <- true:
- case <-ctx.Done():
- return
- }
- return
- }
- p.sessionCreationError = nil
- // Set this pool as the home pool of the session and register it with the
- // health checker.
- s.pool = p
- p.hc.register(s)
- p.createReqs--
- // Insert the session at a random position in the pool to prevent all
- // sessions affiliated with a channel to be placed at sequentially in the
- // pool.
- if p.idleList.Len() > 0 {
- pos := rand.Intn(p.idleList.Len())
- before := p.idleList.Front()
- for i := 0; i < pos; i++ {
- before = before.Next()
- }
- s.setIdleList(p.idleList.InsertBefore(s, before))
- } else {
- s.setIdleList(p.idleList.PushBack(s))
- }
- p.incNumSessionsLocked(context.Background())
- // Notify other waiters blocking on session creation.
- close(p.mayGetSession)
- p.mayGetSession = make(chan struct{})
-}
-
-// sessionCreationFailed is called by the SessionClient when the creation of one
-// or more requested sessions finished with an error. sessionCreationFailed will
-// decrease the number of sessions being created and notify any waiters that
-// the session creation failed.
-func (p *sessionPool) sessionCreationFailed(ctx context.Context, err error, numSessions int32, isMultiplexed bool) {
- p.mu.Lock()
- defer p.mu.Unlock()
- if isMultiplexed {
- // Ignore the error if multiplexed session already present
- if p.multiplexedSession != nil {
- p.multiplexedSessionCreationError = nil
- select {
- case p.mayGetMultiplexedSession <- true:
- case <-ctx.Done():
- return
- }
- return
- }
- p.recordStat(context.Background(), OpenSessionCount, int64(0), tag.Tag{Key: tagKeyIsMultiplexed, Value: "true"})
- p.multiplexedSessionCreationError = err
- select {
- case p.mayGetMultiplexedSession <- true:
- case <-ctx.Done():
- return
- }
- return
- }
- p.createReqs -= uint64(numSessions)
- p.numOpened -= uint64(numSessions)
- p.recordStat(context.Background(), OpenSessionCount, int64(p.numOpened), tag.Tag{Key: tagKeyIsMultiplexed, Value: "false"})
- // Notify other waiters blocking on session creation.
- p.sessionCreationError = err
- close(p.mayGetSession)
- p.mayGetSession = make(chan struct{})
-}
-
-// isValid checks if the session pool is still valid.
-func (p *sessionPool) isValid() bool {
- if p == nil {
- return false
- }
- p.mu.Lock()
- defer p.mu.Unlock()
- return p.valid
-}
-
-// close marks the session pool as closed and deletes all sessions in parallel.
-// Any errors that are returned by the Delete RPC are logged but otherwise
-// ignored, except for DeadlineExceeded errors, which are ignored and not
-// logged.
-func (p *sessionPool) close(ctx context.Context) {
- if p == nil {
- return
- }
- p.mu.Lock()
- if !p.valid {
- p.mu.Unlock()
- return
- }
- p.valid = false
- if p.otConfig != nil && p.otConfig.otMetricRegistration != nil {
- err := p.otConfig.otMetricRegistration.Unregister()
- if err != nil {
- logf(p.sc.logger, "Failed to unregister callback from the OpenTelemetry meter, error : %v", err)
- }
- }
- p.mu.Unlock()
- p.hc.close()
- // destroy all the sessions
- p.hc.mu.Lock()
- allSessions := make([]*session, len(p.hc.queue.sessions))
- copy(allSessions, p.hc.queue.sessions)
- p.hc.mu.Unlock()
- wg := sync.WaitGroup{}
- for _, s := range allSessions {
- wg.Add(1)
- go closeSession(ctx, s, &wg)
- }
- wg.Wait()
-}
-
-func closeSession(ctx context.Context, s *session, wg *sync.WaitGroup) {
- defer wg.Done()
- s.destroyWithContext(ctx, false, false)
-}
-
-// errInvalidSessionPool is the error for using an invalid session pool.
-var errInvalidSessionPool = spannerErrorf(codes.InvalidArgument, "invalid session pool")
-
-// errGetSessionTimeout returns error for context timeout during
-// sessionPool.take().
-var errGetSessionTimeout = spannerErrorf(codes.Canceled, "timeout / context canceled during getting session")
-
-// newSessionHandle creates a new session handle for the given session for this
-// session pool. The session handle will also hold a copy of the current call
-// stack if the session pool has been configured to track the call stacks of
-// sessions being checked out of the pool.
-func (p *sessionPool) newSessionHandle(s *session) (sh *sessionHandle) {
- sh = &sessionHandle{session: s, checkoutTime: time.Now(), lastUseTime: time.Now()}
- if s.isMultiplexed {
- p.mu.Lock()
- sh.client = p.getRoundRobinClient()
- p.mu.Unlock()
- return sh
- }
- if p.TrackSessionHandles || p.ActionOnInactiveTransaction == Warn || p.ActionOnInactiveTransaction == WarnAndClose || p.ActionOnInactiveTransaction == Close {
- p.mu.Lock()
- sh.trackedSessionHandle = p.trackedSessionHandles.PushBack(sh)
- if p.TrackSessionHandles {
- sh.stack = debug.Stack()
- }
- p.mu.Unlock()
- }
- return sh
-}
-
-func (p *sessionPool) getRoundRobinClient() *vkit.Client {
- p.sc.mu.Lock()
- defer func() {
- p.multiplexSessionClientCounter++
- p.sc.mu.Unlock()
- }()
- if len(p.clientPool) == 0 {
- p.clientPool = make([]*vkit.Client, p.sc.connPool.Num())
- for i := 0; i < p.sc.connPool.Num(); i++ {
- c, err := p.sc.nextClient()
- if err != nil {
- // If we can't get a client, use the session's client.
- return nil
- }
- p.clientPool[i] = c
- }
- }
- p.multiplexSessionClientCounter = p.multiplexSessionClientCounter % len(p.clientPool)
- return p.clientPool[p.multiplexSessionClientCounter]
-}
-
-// errGetSessionTimeout returns error for context timeout during
-// sessionPool.take() or sessionPool.takeMultiplexed().
-func (p *sessionPool) errGetSessionTimeout(ctx context.Context) error {
- var code codes.Code
- if ctx.Err() == context.DeadlineExceeded {
- code = codes.DeadlineExceeded
- } else {
- code = codes.Canceled
- }
- if p.TrackSessionHandles {
- return p.errGetSessionTimeoutWithTrackedSessionHandles(code)
- }
- return p.errGetBasicSessionTimeout(code)
-}
-
-// errGetBasicSessionTimeout returns error for context timout during
-// sessionPool.take() without any tracked sessionHandles.
-func (p *sessionPool) errGetBasicSessionTimeout(code codes.Code) error {
- return spannerErrorf(code, "timeout / context canceled during getting session.\n"+
- "Enable SessionPoolConfig.TrackSessionHandles if you suspect a session leak to get more information about the checked out sessions.")
-}
-
-// errGetSessionTimeoutWithTrackedSessionHandles returns error for context
-// timout during sessionPool.take() including a stacktrace of each checked out
-// session handle.
-func (p *sessionPool) errGetSessionTimeoutWithTrackedSessionHandles(code codes.Code) error {
- err := spannerErrorf(code, "timeout / context canceled during getting session.")
- err.(*Error).additionalInformation = p.getTrackedSessionHandleStacksLocked()
- return err
-}
-
-// getTrackedSessionHandleStacksLocked returns a string containing the
-// stacktrace of all currently checked out sessions of the pool. This method
-// requires the caller to have locked p.mu.
-func (p *sessionPool) getTrackedSessionHandleStacksLocked() string {
- p.mu.Lock()
- defer p.mu.Unlock()
- stackTraces := ""
- i := 1
- element := p.trackedSessionHandles.Front()
- for element != nil {
- sh := element.Value.(*sessionHandle)
- sh.mu.Lock()
- if sh.stack != nil {
- stackTraces = fmt.Sprintf("%s\n\nSession %d checked out of pool at %s by goroutine:\n%s", stackTraces, i, sh.checkoutTime.Format(time.RFC3339), sh.stack)
- }
- sh.mu.Unlock()
- element = element.Next()
- i++
- }
- return stackTraces
-}
-
-func (p *sessionPool) isHealthy(s *session) bool {
- if s.getNextCheck().Add(2 * p.hc.getInterval()).Before(time.Now()) {
- if err := s.ping(); isSessionNotFoundError(err) {
- // The session is already bad, continue to fetch/create a new one.
- s.destroy(false, false)
- return false
- }
- p.hc.scheduledHC(s)
- }
- return true
-}
-
-// take returns a cached session if there are available ones; if there isn't
-// any, it tries to allocate a new one.
-func (p *sessionPool) take(ctx context.Context) (*sessionHandle, error) {
- trace.TracePrintf(ctx, nil, "Acquiring a session")
- for {
- var s *session
-
- p.mu.Lock()
- if !p.valid {
- p.mu.Unlock()
- return nil, errInvalidSessionPool
- }
- if p.idleList.Len() > 0 {
- // Idle sessions are available, get one from the top of the idle
- // list.
- s = p.idleList.Remove(p.idleList.Front()).(*session)
- trace.TracePrintf(ctx, map[string]interface{}{"sessionID": s.getID()},
- "Acquired session")
- p.decNumSessionsLocked(ctx)
- }
- if s != nil {
- s.setIdleList(nil)
- numCheckedOut := p.currSessionsCheckedOutLocked()
- p.mu.Unlock()
- p.mw.updateMaxSessionsCheckedOutDuringWindow(numCheckedOut)
- // From here, session is no longer in idle list, so healthcheck
- // workers won't destroy it. If healthcheck workers failed to
- // schedule healthcheck for the session timely, do the check here.
- // Because session check is still much cheaper than session
- // creation, they should be reused as much as possible.
- if !p.isHealthy(s) {
- continue
- }
- p.incNumInUse(ctx)
- return p.newSessionHandle(s), nil
- }
-
- // No session available. Start the creation of a new batch of sessions
- // if that is allowed, and then wait for a session to come available.
- if p.numWaiters >= p.createReqs {
- numSessions := minUint64(p.MaxOpened-p.numOpened, p.incStep)
- if err := p.growPoolLocked(numSessions, false); err != nil {
- p.mu.Unlock()
- return nil, err
- }
- }
-
- p.numWaiters++
- mayGetSession := p.mayGetSession
- p.mu.Unlock()
- trace.TracePrintf(ctx, nil, "Waiting for read-only session to become available")
- select {
- case <-ctx.Done():
- trace.TracePrintf(ctx, nil, "Context done waiting for session")
- p.recordStat(ctx, GetSessionTimeoutsCount, 1, tag.Tag{Key: tagKeyIsMultiplexed, Value: "false"})
- if p.otConfig != nil {
- p.recordOTStat(ctx, p.otConfig.getSessionTimeoutsCount, 1, recordOTStatOption{attr: p.otConfig.attributeMapWithoutMultiplexed})
- }
- p.mu.Lock()
- p.numWaiters--
- p.mu.Unlock()
- return nil, p.errGetSessionTimeout(ctx)
- case <-mayGetSession:
- p.mu.Lock()
- p.numWaiters--
- if p.sessionCreationError != nil {
- trace.TracePrintf(ctx, nil, "Error creating session: %v", p.sessionCreationError)
- err := p.sessionCreationError
- p.mu.Unlock()
- return nil, err
- }
- p.mu.Unlock()
- }
- }
-}
-
-// takeMultiplexed returns a cached session if there is available one; if there isn't
-// any, it tries to allocate a new one.
-func (p *sessionPool) takeMultiplexed(ctx context.Context) (*sessionHandle, error) {
- trace.TracePrintf(ctx, nil, "Acquiring a multiplexed session")
- for {
- var s *session
- p.mu.Lock()
- if !p.valid {
- p.mu.Unlock()
- return nil, errInvalidSessionPool
- }
- if !p.enableMultiplexSession {
- p.mu.Unlock()
- return p.take(ctx)
- }
- // use the multiplex session if it is available
- if p.multiplexedSession != nil {
- // Multiplexed session is available, get it.
- s = p.multiplexedSession
- trace.TracePrintf(ctx, map[string]interface{}{"sessionID": s.getID()},
- "Acquired multiplexed session")
- p.mu.Unlock()
- p.incNumMultiplexedInUse(ctx)
- return p.newSessionHandle(s), nil
- }
- mayGetSession := p.mayGetMultiplexedSession
- p.mu.Unlock()
- p.multiplexedSessionReq <- muxSessionCreateRequest{force: false, ctx: ctx}
- select {
- case <-ctx.Done():
- trace.TracePrintf(ctx, nil, "Context done waiting for multiplexed session")
- p.recordStat(ctx, GetSessionTimeoutsCount, 1, tag.Tag{Key: tagKeyIsMultiplexed, Value: "true"})
- if p.otConfig != nil {
- p.recordOTStat(ctx, p.otConfig.getSessionTimeoutsCount, 1, recordOTStatOption{attr: p.otConfig.attributeMapWithMultiplexed})
- }
- return nil, p.errGetSessionTimeout(ctx)
- case <-mayGetSession: // Block until multiplexed session is created.
- p.mu.Lock()
- if p.multiplexedSessionCreationError != nil {
- trace.TracePrintf(ctx, nil, "Error creating multiplexed session: %v", p.multiplexedSessionCreationError)
- err := p.multiplexedSessionCreationError
- if isUnimplementedError(err) {
- logf(p.sc.logger, "Multiplexed session is not enabled on this project, continuing with regular sessions")
- p.enableMultiplexSession = false
- } else {
- p.mu.Unlock()
- // If the error is a timeout, there is a chance that the session was
- // created on the server but is not known to the session pool. In this
- // case, we should retry to get the session.
- return nil, err
- }
- }
- p.mu.Unlock()
- }
- }
-}
-
-// recycle puts session s back to the session pool's idle list, it returns true
-// if the session pool successfully recycles session s.
-func (p *sessionPool) recycle(s *session) bool {
- p.mu.Lock()
- defer p.mu.Unlock()
- return p.recycleLocked(s)
-}
-
-func (p *sessionPool) recycleLocked(s *session) bool {
- if !s.isValid() || !p.valid {
- // Reject the session if session is invalid or pool itself is invalid.
- return false
- }
- ctx := context.Background()
- // Put session at the top of the list to be handed out in LIFO order for load balancing
- // across channels.
- s.setIdleList(p.idleList.PushFront(s))
- p.incNumSessionsLocked(ctx)
- // Broadcast that a session has been returned to idle list.
- close(p.mayGetSession)
- p.mayGetSession = make(chan struct{})
- return true
-}
-
-// remove atomically removes session s from the session pool and invalidates s.
-// If isExpire == true, the removal is triggered by session expiration and in
-// such cases, only idle sessions can be removed.
-func (p *sessionPool) remove(s *session, isExpire bool, wasInUse bool) bool {
- if s.isMultiplexed {
- return false
- }
- p.mu.Lock()
- defer p.mu.Unlock()
- if isExpire && (p.numOpened <= p.MinOpened || s.getIdleList() == nil) {
- // Don't expire session if the session is not in idle list (in use), or
- // if number of open sessions is going below p.MinOpened.
- return false
- }
-
- ol := s.setIdleList(nil)
- ctx := context.Background()
- // If the session is in the idlelist, remove it.
- if ol != nil {
- // Remove from the list it is in.
- p.idleList.Remove(ol)
- p.decNumSessionsLocked(ctx)
- }
- if s.invalidate() {
- // Decrease the number of opened sessions.
- p.numOpened--
- // Decrease the number of sessions in use, only when not from idle list.
- if wasInUse {
- p.decNumInUseLocked(ctx)
- }
- p.recordStat(ctx, OpenSessionCount, int64(p.numOpened))
- close(p.mayGetSession)
- p.mayGetSession = make(chan struct{})
- return true
- }
- return false
-}
-
-func (p *sessionPool) currSessionsCheckedOutLocked() uint64 {
- return p.numOpened - uint64(p.idleList.Len())
-}
-
-func (p *sessionPool) incNumInUse(ctx context.Context) {
- p.mu.Lock()
- p.incNumInUseLocked(ctx)
- p.mu.Unlock()
-}
-
-func (p *sessionPool) incNumInUseLocked(ctx context.Context) {
- p.numInUse++
- p.recordStat(ctx, SessionsCount, int64(p.numInUse), tagNumInUseSessions, tag.Tag{Key: tagKeyIsMultiplexed, Value: "false"})
- p.recordStat(ctx, AcquiredSessionsCount, 1, tag.Tag{Key: tagKeyIsMultiplexed, Value: "false"})
- if p.otConfig != nil {
- p.recordOTStat(ctx, p.otConfig.acquiredSessionsCount, 1, recordOTStatOption{attr: p.otConfig.attributeMapWithoutMultiplexed})
- }
- if p.numInUse > p.maxNumInUse {
- p.maxNumInUse = p.numInUse
- p.recordStat(ctx, MaxInUseSessionsCount, int64(p.maxNumInUse), tag.Tag{Key: tagKeyIsMultiplexed, Value: "false"})
- }
-}
-
-func (p *sessionPool) incNumMultiplexedInUse(ctx context.Context) {
- p.recordStat(ctx, AcquiredSessionsCount, 1, tag.Tag{Key: tagKeyIsMultiplexed, Value: "true"})
- if p.otConfig != nil {
- p.recordOTStat(ctx, p.otConfig.acquiredSessionsCount, 1, recordOTStatOption{attr: p.otConfig.attributeMapWithMultiplexed})
- }
-}
-
-func (p *sessionPool) decNumInUseLocked(ctx context.Context) {
- p.numInUse--
- if int64(p.numInUse) < 0 {
- // print whole call stack trace
- logf(p.sc.logger, "Number of sessions in use is negative, resetting it to currSessionsCheckedOutLocked. Stack trace: %s", string(debug.Stack()))
- p.numInUse = p.currSessionsCheckedOutLocked()
- }
- p.recordStat(ctx, SessionsCount, int64(p.numInUse), tagNumInUseSessions, tag.Tag{Key: tagKeyIsMultiplexed, Value: "false"})
- p.recordStat(ctx, ReleasedSessionsCount, 1, tag.Tag{Key: tagKeyIsMultiplexed, Value: "false"})
- if p.otConfig != nil {
- p.recordOTStat(ctx, p.otConfig.releasedSessionsCount, 1, recordOTStatOption{attr: p.otConfig.attributeMapWithoutMultiplexed})
- }
-}
-
-func (p *sessionPool) decNumMultiplexedInUseLocked(ctx context.Context) {
- p.recordStat(ctx, ReleasedSessionsCount, 1, tag.Tag{Key: tagKeyIsMultiplexed, Value: "true"})
- if p.otConfig != nil {
- p.recordOTStat(ctx, p.otConfig.releasedSessionsCount, 1, recordOTStatOption{attr: p.otConfig.attributeMapWithMultiplexed})
- }
-}
-
-func (p *sessionPool) incNumSessionsLocked(ctx context.Context) {
- p.numSessions++
- p.recordStat(ctx, SessionsCount, int64(p.numSessions), tagNumSessions)
-}
-
-func (p *sessionPool) decNumSessionsLocked(ctx context.Context) {
- p.numSessions--
- p.recordStat(ctx, SessionsCount, int64(p.numSessions), tagNumSessions)
-}
-
-// hcHeap implements heap.Interface. It is used to create the priority queue for
-// session healthchecks.
-type hcHeap struct {
- sessions []*session
-}
-
-// Len implements heap.Interface.Len.
-func (h hcHeap) Len() int {
- return len(h.sessions)
-}
-
-// Less implements heap.Interface.Less.
-func (h hcHeap) Less(i, j int) bool {
- return h.sessions[i].getNextCheck().Before(h.sessions[j].getNextCheck())
-}
-
-// Swap implements heap.Interface.Swap.
-func (h hcHeap) Swap(i, j int) {
- h.sessions[i], h.sessions[j] = h.sessions[j], h.sessions[i]
- h.sessions[i].setHcIndex(i)
- h.sessions[j].setHcIndex(j)
-}
-
-// Push implements heap.Interface.Push.
-func (h *hcHeap) Push(s interface{}) {
- ns := s.(*session)
- ns.setHcIndex(len(h.sessions))
- h.sessions = append(h.sessions, ns)
-}
-
-// Pop implements heap.Interface.Pop.
-func (h *hcHeap) Pop() interface{} {
- old := h.sessions
- n := len(old)
- s := old[n-1]
- h.sessions = old[:n-1]
- s.setHcIndex(-1)
- return s
-}
-
-// maintenanceWindowSize specifies the number of health check cycles that
-// defines a maintenance window. The maintenance window keeps track of a
-// rolling set of numbers for the number of maximum checked out sessions during
-// the maintenance window. This is used by the maintainer to determine the
-// number of sessions to create or delete at the end of each health check
-// cycle.
-const maintenanceWindowSize = 10
-
-// maintenanceWindow contains the statistics that are gathered during a health
-// check maintenance window.
-type maintenanceWindow struct {
- mu sync.Mutex
- // maxSessionsCheckedOut contains the maximum number of sessions that was
- // checked out of the session pool during a health check cycle. This number
- // indicates the number of sessions that was actually needed by the pool to
- // serve the load during that cycle. The values are kept as a rolling set
- // containing the values for the past 10 cycles (minutes). The maintainer
- // uses these values to determine the number of sessions to keep at the end
- // of each cycle.
- maxSessionsCheckedOut [maintenanceWindowSize]uint64
-}
-
-// maxSessionsCheckedOutDuringWindow returns the maximum number of sessions
-// that has been checked out during the last maintenance window of 10 cycles
-// (minutes).
-func (mw *maintenanceWindow) maxSessionsCheckedOutDuringWindow() uint64 {
- mw.mu.Lock()
- defer mw.mu.Unlock()
- var max uint64
- for _, cycleMax := range mw.maxSessionsCheckedOut {
- max = maxUint64(max, cycleMax)
- }
- return max
-}
-
-// updateMaxSessionsCheckedOutDuringWindow updates the maximum number of
-// sessions that has been checked out of the pool during the current
-// cycle of the maintenance window. A maintenance window consists of 10
-// maintenance cycles. Each cycle keeps track of the max number of sessions in
-// use during that cycle. The rolling maintenance window of 10 cycles is used
-// to determine the number of sessions to keep at the end of a cycle by
-// calculating the max in use during the last 10 cycles.
-func (mw *maintenanceWindow) updateMaxSessionsCheckedOutDuringWindow(currNumSessionsCheckedOut uint64) {
- mw.mu.Lock()
- defer mw.mu.Unlock()
- mw.maxSessionsCheckedOut[0] = maxUint64(currNumSessionsCheckedOut, mw.maxSessionsCheckedOut[0])
-}
-
-// startNewCycle starts a new health check cycle with the specified number of
-// checked out sessions as its initial value.
-func (mw *maintenanceWindow) startNewCycle(currNumSessionsCheckedOut uint64) {
- mw.mu.Lock()
- defer mw.mu.Unlock()
- copy(mw.maxSessionsCheckedOut[1:], mw.maxSessionsCheckedOut[:9])
- mw.maxSessionsCheckedOut[0] = currNumSessionsCheckedOut
-}
-
-// newMaintenanceWindow creates a new maintenance window with all values for
-// maxSessionsCheckedOut set to maxOpened. This ensures that a complete
-// maintenance window must pass before the maintainer will start to delete any
-// sessions.
-func newMaintenanceWindow(maxOpened uint64) *maintenanceWindow {
- mw := &maintenanceWindow{}
- // Initialize the rolling window with max values to prevent the maintainer
- // from deleting sessions before a complete window of 10 cycles has
- // finished.
- for i := 0; i < maintenanceWindowSize; i++ {
- mw.maxSessionsCheckedOut[i] = maxOpened
- }
- return mw
-}
-
-// healthChecker performs periodical healthchecks on registered sessions.
-type healthChecker struct {
- // mu protects concurrent access to healthChecker.
- mu sync.Mutex
- // queue is the priority queue for session healthchecks. Sessions with lower
- // nextCheck rank higher in the queue.
- queue hcHeap
- // interval is the average interval between two healthchecks on a session.
- interval time.Duration
- // workers is the number of concurrent healthcheck workers.
- workers int
- // waitWorkers waits for all healthcheck workers to exit
- waitWorkers sync.WaitGroup
- // pool is the underlying session pool.
- pool *sessionPool
- // sampleInterval is the interval of sampling by the maintainer.
- sampleInterval time.Duration
- // multiplexSessionRefreshInterval is the interval of refreshing multiplexed session.
- multiplexSessionRefreshInterval time.Duration
- // ready is used to signal that maintainer can start running.
- ready chan struct{}
- // done is used to signal that health checker should be closed.
- done chan struct{}
- // once is used for closing channel done only once.
- once sync.Once
- maintainerCancel func()
-}
-
-// newHealthChecker initializes new instance of healthChecker.
-func newHealthChecker(interval, multiplexSessionRefreshInterval time.Duration, workers int, sampleInterval time.Duration, pool *sessionPool) *healthChecker {
- if workers <= 0 {
- workers = 1
- }
- hc := &healthChecker{
- interval: interval,
- multiplexSessionRefreshInterval: multiplexSessionRefreshInterval,
- workers: workers,
- pool: pool,
- sampleInterval: sampleInterval,
- ready: make(chan struct{}),
- done: make(chan struct{}),
- maintainerCancel: func() {},
- }
- hc.waitWorkers.Add(1)
- go hc.maintainer()
- for i := 1; i <= hc.workers; i++ {
- hc.waitWorkers.Add(1)
- go hc.worker(i)
- }
- if hc.pool.enableMultiplexSession {
- go hc.multiplexSessionWorker()
- }
- return hc
-}
-
-// close closes the healthChecker and waits for all healthcheck workers to exit.
-func (hc *healthChecker) close() {
- hc.mu.Lock()
- hc.maintainerCancel()
- hc.mu.Unlock()
- hc.once.Do(func() { close(hc.done) })
- hc.waitWorkers.Wait()
-}
-
-// isClosing checks if a healthChecker is already closing.
-func (hc *healthChecker) isClosing() bool {
- select {
- case <-hc.done:
- return true
- default:
- return false
- }
-}
-
-// getInterval gets the healthcheck interval.
-func (hc *healthChecker) getInterval() time.Duration {
- hc.mu.Lock()
- defer hc.mu.Unlock()
- return hc.interval
-}
-
-// scheduledHCLocked schedules next healthcheck on session s with the assumption
-// that hc.mu is being held.
-func (hc *healthChecker) scheduledHCLocked(s *session) {
- var constPart, randPart float64
- if !s.firstHCDone {
- // The first check will be scheduled in a large range to make requests
- // more evenly distributed. The first healthcheck will be scheduled
- // after [interval*0.2, interval*1.1) ns.
- constPart = float64(hc.interval) * 0.2
- randPart = hc.pool.rand.Float64() * float64(hc.interval) * 0.9
- s.firstHCDone = true
- } else {
- // The next healthcheck will be scheduled after
- // [interval*0.9, interval*1.1) ns.
- constPart = float64(hc.interval) * 0.9
- randPart = hc.pool.rand.Float64() * float64(hc.interval) * 0.2
- }
- // math.Ceil makes the value to be at least 1 ns.
- nsFromNow := int64(math.Ceil(constPart + randPart))
- s.setNextCheck(time.Now().Add(time.Duration(nsFromNow)))
- if hi := s.getHcIndex(); hi != -1 {
- // Session is still being tracked by healthcheck workers.
- heap.Fix(&hc.queue, hi)
- }
-}
-
-// scheduledHC schedules next healthcheck on session s. It is safe to be called
-// concurrently.
-func (hc *healthChecker) scheduledHC(s *session) {
- hc.mu.Lock()
- defer hc.mu.Unlock()
- hc.scheduledHCLocked(s)
-}
-
-// register registers a session with healthChecker for periodical healthcheck.
-func (hc *healthChecker) register(s *session) {
- hc.mu.Lock()
- defer hc.mu.Unlock()
- hc.scheduledHCLocked(s)
- heap.Push(&hc.queue, s)
-}
-
-// unregister unregisters a session from healthcheck queue.
-func (hc *healthChecker) unregister(s *session) {
- hc.mu.Lock()
- defer hc.mu.Unlock()
- oi := s.setHcIndex(-1)
- if oi >= 0 {
- heap.Remove(&hc.queue, oi)
- }
-}
-
-// markDone marks that health check for session has been performed.
-func (hc *healthChecker) markDone(s *session) {
- hc.mu.Lock()
- defer hc.mu.Unlock()
- s.checkingHealth = false
-}
-
-// healthCheck checks the health of the session and pings it if needed.
-func (hc *healthChecker) healthCheck(s *session) {
- defer hc.markDone(s)
- if s.isMultiplexed {
- return
- }
- if !s.pool.isValid() {
- // Session pool is closed, perform a garbage collection.
- s.destroy(false, false)
- return
- }
- if err := s.ping(); isSessionNotFoundError(err) {
- // Ping failed, destroy the session.
- s.destroy(false, false)
- }
-}
-
-// worker performs the healthcheck on sessions in healthChecker's priority
-// queue.
-func (hc *healthChecker) worker(i int) {
- // Returns a session which we should ping to keep it alive.
- getNextForPing := func() *session {
- hc.pool.mu.Lock()
- defer hc.pool.mu.Unlock()
- hc.mu.Lock()
- defer hc.mu.Unlock()
- if hc.queue.Len() <= 0 {
- // Queue is empty.
- return nil
- }
- s := hc.queue.sessions[0]
- if s.getNextCheck().After(time.Now()) && hc.pool.valid {
- // All sessions have been checked recently.
- return nil
- }
- hc.scheduledHCLocked(s)
- if !s.checkingHealth {
- s.checkingHealth = true
- return s
- }
- return nil
- }
-
- for {
- if hc.isClosing() {
- // Exit when the pool has been closed and all sessions have been
- // destroyed or when health checker has been closed.
- hc.waitWorkers.Done()
- return
- }
- rs := getNextForPing()
- if rs == nil {
- // No work to be done so sleep to avoid burning CPU.
- pause := int64(100 * time.Millisecond)
- if pause > int64(hc.interval) {
- pause = int64(hc.interval)
- }
- select {
- case <-time.After(time.Duration(rand.Int63n(pause) + pause/2)):
- case <-hc.done:
- }
- continue
- }
- hc.healthCheck(rs)
- }
-}
-
-// maintainer maintains the number of sessions in the pool based on the session
-// pool configuration and the current and historical number of sessions checked
-// out of the pool. The maintainer will:
-// 1. Ensure that the session pool contains at least MinOpened sessions.
-// 2. If the current number of sessions in the pool exceeds the greatest number
-// of checked out sessions (=sessions in use) during the last 10 minutes,
-// and the delta is larger than MaxIdleSessions, the maintainer will reduce
-// the number of sessions to maxSessionsInUseDuringWindow+MaxIdleSessions.
-func (hc *healthChecker) maintainer() {
- // Wait until the pool is ready.
- <-hc.ready
-
- for iteration := uint64(0); ; iteration++ {
- if hc.isClosing() {
- hc.waitWorkers.Done()
- return
- }
-
- hc.pool.mu.Lock()
- currSessionsOpened := hc.pool.numOpened
- maxIdle := hc.pool.MaxIdle
- minOpened := hc.pool.MinOpened
-
- // Reset the start time for recording the maximum number of sessions
- // in the pool.
- now := time.Now()
- if now.After(hc.pool.lastResetTime.Add(10 * time.Minute)) {
- hc.pool.maxNumInUse = hc.pool.numInUse
- hc.pool.recordStat(context.Background(), MaxInUseSessionsCount, int64(hc.pool.maxNumInUse), tag.Tag{Key: tagKeyIsMultiplexed, Value: "false"})
- hc.pool.lastResetTime = now
- }
- hc.pool.mu.Unlock()
-
- // task to remove or log sessions which are unexpectedly long-running
- if now.After(hc.pool.InactiveTransactionRemovalOptions.lastExecutionTime.Add(hc.pool.executionFrequency)) {
- if hc.pool.ActionOnInactiveTransaction == Warn || hc.pool.ActionOnInactiveTransaction == WarnAndClose || hc.pool.ActionOnInactiveTransaction == Close {
- hc.pool.removeLongRunningSessions()
- }
- hc.pool.InactiveTransactionRemovalOptions.lastExecutionTime = now
- }
-
- // Get the maximum number of sessions in use during the current
- // maintenance window.
- maxSessionsInUseDuringWindow := hc.pool.mw.maxSessionsCheckedOutDuringWindow()
- hc.mu.Lock()
- ctx, cancel := context.WithTimeout(context.Background(), hc.sampleInterval)
- hc.maintainerCancel = cancel
- hc.mu.Unlock()
-
- // Grow or shrink pool if needed.
- // The number of sessions in the pool should be in the range
- // [Config.MinOpened, Config.MaxIdle+maxSessionsInUseDuringWindow]
- if currSessionsOpened < minOpened {
- if err := hc.growPoolInBatch(ctx, minOpened); err != nil {
- logf(hc.pool.sc.logger, "failed to grow pool: %v", err)
- }
- } else if maxIdle+maxSessionsInUseDuringWindow < currSessionsOpened {
- hc.shrinkPool(ctx, maxIdle+maxSessionsInUseDuringWindow)
- }
-
- select {
- case <-ctx.Done():
- case <-hc.done:
- cancel()
- }
- // Cycle the maintenance window. This will remove the oldest cycle and
- // add a new cycle at the beginning of the maintenance window with the
- // currently checked out number of sessions as the max number of
- // sessions in use in this cycle. This value will be increased during
- // the next cycle if it increases.
- hc.pool.mu.Lock()
- currSessionsInUse := hc.pool.currSessionsCheckedOutLocked()
- hc.pool.mu.Unlock()
- hc.pool.mw.startNewCycle(currSessionsInUse)
- }
-}
-
-func (hc *healthChecker) growPoolInBatch(ctx context.Context, growToNumSessions uint64) error {
- hc.pool.mu.Lock()
- defer hc.pool.mu.Unlock()
- numSessions := growToNumSessions - hc.pool.numOpened
- return hc.pool.growPoolLocked(numSessions, false)
-}
-
-// shrinkPool scales down the session pool. The method will stop deleting
-// sessions when shrinkToNumSessions number of sessions in the pool has
-// been reached. The method will also stop deleting sessions if it detects that
-// another process has started creating sessions for the pool again, for
-// example through the take() method.
-func (hc *healthChecker) shrinkPool(ctx context.Context, shrinkToNumSessions uint64) {
- hc.pool.mu.Lock()
- maxSessionsToDelete := int(hc.pool.numOpened - shrinkToNumSessions)
- hc.pool.mu.Unlock()
- var deleted int
- var prevNumOpened uint64 = math.MaxUint64
- for {
- if ctx.Err() != nil {
- return
- }
-
- p := hc.pool
- p.mu.Lock()
- // Check if the number of open sessions has increased. If it has, we
- // should stop deleting sessions, as the load has increased and
- // additional sessions are needed.
- if p.numOpened >= prevNumOpened {
- p.mu.Unlock()
- break
- }
- prevNumOpened = p.numOpened
-
- // Check on both whether we have reached the number of open sessions as
- // well as the number of sessions to delete, in case sessions have been
- // deleted by other methods because they have expired or deemed
- // invalid.
- if shrinkToNumSessions >= p.numOpened || deleted >= maxSessionsToDelete {
- p.mu.Unlock()
- break
- }
-
- var s *session
- if p.idleList.Len() > 0 {
- s = p.idleList.Front().Value.(*session)
- }
- p.mu.Unlock()
- if s != nil {
- deleted++
- // destroy session as expire.
- s.destroy(true, false)
- } else {
- break
- }
- }
-}
-
-func (hc *healthChecker) multiplexSessionWorker() {
- for {
- if hc.isClosing() {
- return
- }
- hc.pool.mu.Lock()
- createTime := time.Now()
- s := hc.pool.multiplexedSession
- if s != nil {
- createTime = hc.pool.multiplexedSession.createTime
- }
- hc.pool.mu.Unlock()
- ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second)
- if createTime.Add(multiplexSessionRefreshInterval).Before(time.Now()) {
- // Multiplexed session is idle for more than 7 days, replace it.
- hc.pool.multiplexedSessionReq <- muxSessionCreateRequest{force: true, ctx: ctx}
- // wait for the new multiplexed session to be created.
- <-hc.pool.mayGetMultiplexedSession
- }
- // Sleep for a while to avoid burning CPU.
- select {
- case <-time.After(hc.multiplexSessionRefreshInterval):
- cancel()
- case <-hc.done:
- cancel()
- return
- }
- }
-}
-
-// maxUint64 returns the maximum of two uint64.
-func maxUint64(a, b uint64) uint64 {
- if a > b {
- return a
- }
- return b
-}
-
-// minUint64 returns the minimum of two uint64.
-func minUint64(a, b uint64) uint64 {
- if a > b {
- return b
- }
- return a
-}
-
-// sessionResourceType is the type name of Spanner sessions.
-const sessionResourceType = "type.googleapis.com/google.spanner.v1.Session"
-
-// isSessionNotFoundError returns true if the given error is a
-// `Session not found` error.
-func isSessionNotFoundError(err error) bool {
- if err == nil {
- return false
- }
- if ErrCode(err) == codes.NotFound {
- if rt, ok := extractResourceType(err); ok {
- return rt == sessionResourceType
- }
- }
- return strings.Contains(err.Error(), "Session not found")
-}
-
-// isUnimplementedError returns true if the gRPC error code is Unimplemented.
-func isUnimplementedError(err error) bool {
- if err == nil {
- return false
- }
- if ErrCode(err) == codes.Unimplemented {
- return true
- }
- return false
-}
-
-func isFailedInlineBeginTransaction(err error) bool {
- if err == nil {
- return false
- }
- return ErrCode(err) == codes.Internal && strings.Contains(err.Error(), errInlineBeginTransactionFailed().Error())
-}
-
-// isClientClosing returns true if the given error is a
-// `Connection is closing` error.
-func isClientClosing(err error) bool {
- if err == nil {
- return false
- }
- return ErrCode(err) == codes.Canceled && strings.Contains(err.Error(), "the client connection is closing")
-}
diff --git a/vendor/cloud.google.com/go/spanner/sessionclient.go b/vendor/cloud.google.com/go/spanner/sessionclient.go
deleted file mode 100644
index e0a56f9af..000000000
--- a/vendor/cloud.google.com/go/spanner/sessionclient.go
+++ /dev/null
@@ -1,433 +0,0 @@
-/*
-Copyright 2019 Google LLC
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-*/
-
-package spanner
-
-import (
- "context"
- "fmt"
- "log"
- "reflect"
- "strings"
- "sync"
- "time"
-
- "cloud.google.com/go/internal/trace"
- vkit "cloud.google.com/go/spanner/apiv1"
- sppb "cloud.google.com/go/spanner/apiv1/spannerpb"
- "cloud.google.com/go/spanner/internal"
- "github.com/googleapis/gax-go/v2"
- "go.opencensus.io/tag"
- "google.golang.org/api/option"
- gtransport "google.golang.org/api/transport/grpc"
- "google.golang.org/grpc"
- "google.golang.org/grpc/codes"
- "google.golang.org/grpc/metadata"
-)
-
-var cidGen = newClientIDGenerator()
-
-type clientIDGenerator struct {
- mu sync.Mutex
- ids map[string]int
-}
-
-func newClientIDGenerator() *clientIDGenerator {
- return &clientIDGenerator{ids: make(map[string]int)}
-}
-
-func (cg *clientIDGenerator) nextID(database string) string {
- cg.mu.Lock()
- defer cg.mu.Unlock()
- var id int
- if val, ok := cg.ids[database]; ok {
- id = val + 1
- } else {
- id = 1
- }
- cg.ids[database] = id
- return fmt.Sprintf("client-%d", id)
-}
-
-// sessionConsumer is passed to the batchCreateSessions method and will receive
-// the sessions that are created as they become available. A sessionConsumer
-// implementation must be safe for concurrent use.
-//
-// The interface is implemented by sessionPool and is used for testing the
-// sessionClient.
-type sessionConsumer interface {
- // sessionReady is called when a session has been created and is ready for
- // use.
- sessionReady(ctx context.Context, s *session)
-
- // sessionCreationFailed is called when the creation of a sub-batch of
- // sessions failed. The numSessions argument specifies the number of
- // sessions that could not be created as a result of this error. A
- // consumer may receive multiple errors per batch.
- sessionCreationFailed(ctx context.Context, err error, numSessions int32, isMultiplexed bool)
-}
-
-// sessionClient creates sessions for a database, either in batches or one at a
-// time. Each session will be affiliated with a gRPC channel. sessionClient
-// will ensure that the sessions that are created are evenly distributed over
-// all available channels.
-type sessionClient struct {
- mu sync.Mutex
- closed bool
- disableRouteToLeader bool
-
- connPool gtransport.ConnPool
- database string
- id string
- userAgent string
- sessionLabels map[string]string
- databaseRole string
- md metadata.MD
- batchTimeout time.Duration
- logger *log.Logger
- callOptions *vkit.CallOptions
- otConfig *openTelemetryConfig
-}
-
-// newSessionClient creates a session client to use for a database.
-func newSessionClient(connPool gtransport.ConnPool, database, userAgent string, sessionLabels map[string]string, databaseRole string, disableRouteToLeader bool, md metadata.MD, batchTimeout time.Duration, logger *log.Logger, callOptions *vkit.CallOptions) *sessionClient {
- return &sessionClient{
- connPool: connPool,
- database: database,
- userAgent: userAgent,
- id: cidGen.nextID(database),
- sessionLabels: sessionLabels,
- databaseRole: databaseRole,
- disableRouteToLeader: disableRouteToLeader,
- md: md,
- batchTimeout: batchTimeout,
- logger: logger,
- callOptions: callOptions,
- }
-}
-
-func (sc *sessionClient) close() error {
- sc.mu.Lock()
- defer sc.mu.Unlock()
- sc.closed = true
- return sc.connPool.Close()
-}
-
-// createSession creates one session for the database of the sessionClient. The
-// session is created using one synchronous RPC.
-func (sc *sessionClient) createSession(ctx context.Context) (*session, error) {
- sc.mu.Lock()
- if sc.closed {
- sc.mu.Unlock()
- return nil, spannerErrorf(codes.FailedPrecondition, "SessionClient is closed")
- }
- sc.mu.Unlock()
- client, err := sc.nextClient()
- if err != nil {
- return nil, err
- }
-
- var md metadata.MD
- sid, err := client.CreateSession(contextWithOutgoingMetadata(ctx, sc.md, sc.disableRouteToLeader), &sppb.CreateSessionRequest{
- Database: sc.database,
- Session: &sppb.Session{Labels: sc.sessionLabels, CreatorRole: sc.databaseRole},
- }, gax.WithGRPCOptions(grpc.Header(&md)))
-
- if getGFELatencyMetricsFlag() && md != nil {
- _, instance, database, err := parseDatabaseName(sc.database)
- if err != nil {
- return nil, ToSpannerError(err)
- }
- ctxGFE, err := tag.New(ctx,
- tag.Upsert(tagKeyClientID, sc.id),
- tag.Upsert(tagKeyDatabase, database),
- tag.Upsert(tagKeyInstance, instance),
- tag.Upsert(tagKeyLibVersion, internal.Version),
- )
- if err != nil {
- trace.TracePrintf(ctx, nil, "Error in recording GFE Latency. Try disabling and rerunning. Error: %v", ToSpannerError(err))
- }
- err = captureGFELatencyStats(ctxGFE, md, "createSession")
- if err != nil {
- trace.TracePrintf(ctx, nil, "Error in recording GFE Latency. Try disabling and rerunning. Error: %v", ToSpannerError(err))
- }
- }
- if metricErr := recordGFELatencyMetricsOT(ctx, md, "createSession", sc.otConfig); metricErr != nil {
- trace.TracePrintf(ctx, nil, "Error in recording GFE Latency through OpenTelemetry. Error: %v", metricErr)
- }
- if err != nil {
- return nil, ToSpannerError(err)
- }
- return &session{valid: true, client: client, id: sid.Name, createTime: time.Now(), md: sc.md, logger: sc.logger}, nil
-}
-
-// batchCreateSessions creates a batch of sessions for the database of the
-// sessionClient and returns these to the given sessionConsumer.
-//
-// createSessionCount is the number of sessions that should be created. The
-// sessionConsumer is guaranteed to receive the requested number of sessions if
-// no error occurs. If one or more errors occur, the sessionConsumer will
-// receive any number of sessions + any number of errors, where each error will
-// include the number of sessions that could not be created as a result of the
-// error. The sum of returned sessions and errored sessions will be equal to
-// the number of requested sessions.
-// If distributeOverChannels is true, the sessions will be equally distributed
-// over all the channels that are in use by the client.
-func (sc *sessionClient) batchCreateSessions(createSessionCount int32, distributeOverChannels bool, consumer sessionConsumer) error {
- var sessionCountPerChannel int32
- var remainder int32
- if distributeOverChannels {
- // The sessions that we create should be evenly distributed over all the
- // channels (gapic clients) that are used by the client. Each gapic client
- // will do a request for a fraction of the total.
- sessionCountPerChannel = createSessionCount / int32(sc.connPool.Num())
- // The remainder of the calculation will be added to the number of sessions
- // that will be created for the first channel, to ensure that we create the
- // exact number of requested sessions.
- remainder = createSessionCount % int32(sc.connPool.Num())
- } else {
- sessionCountPerChannel = createSessionCount
- }
- sc.mu.Lock()
- defer sc.mu.Unlock()
- if sc.closed {
- return spannerErrorf(codes.FailedPrecondition, "SessionClient is closed")
- }
- // Spread the session creation over all available gRPC channels. Spanner
- // will maintain server side caches for a session on the gRPC channel that
- // is used by the session. A session should therefore always use the same
- // channel, and the sessions should be as evenly distributed as possible
- // over the channels.
- var numBeingCreated int32
- for i := 0; i < sc.connPool.Num() && numBeingCreated < createSessionCount; i++ {
- client, err := sc.nextClient()
- if err != nil {
- return err
- }
- // Determine the number of sessions that should be created for this
- // channel. The createCount for the first channel will be increased
- // with the remainder of the division of the total number of sessions
- // with the number of channels. All other channels will just use the
- // result of the division over all channels.
- createCountForChannel := sessionCountPerChannel
- if i == 0 {
- // We add the remainder to the first gRPC channel we use. We could
- // also spread the remainder over all channels, but this ensures
- // that small batches of sessions (i.e. less than numChannels) are
- // created in one RPC.
- createCountForChannel += remainder
- }
- if createCountForChannel > 0 {
- go sc.executeBatchCreateSessions(client, createCountForChannel, sc.sessionLabels, sc.md, consumer)
- numBeingCreated += createCountForChannel
- }
- }
- return nil
-}
-
-// executeBatchCreateSessions executes the gRPC call for creating a batch of
-// sessions.
-func (sc *sessionClient) executeBatchCreateSessions(client *vkit.Client, createCount int32, labels map[string]string, md metadata.MD, consumer sessionConsumer) {
- ctx, cancel := context.WithTimeout(context.Background(), sc.batchTimeout)
- defer cancel()
- ctx = trace.StartSpan(ctx, "cloud.google.com/go/spanner.BatchCreateSessions")
- defer func() { trace.EndSpan(ctx, nil) }()
- trace.TracePrintf(ctx, nil, "Creating a batch of %d sessions", createCount)
- remainingCreateCount := createCount
- for {
- sc.mu.Lock()
- closed := sc.closed
- sc.mu.Unlock()
- if closed {
- err := spannerErrorf(codes.Canceled, "Session client closed")
- trace.TracePrintf(ctx, nil, "Session client closed while creating a batch of %d sessions: %v", createCount, err)
- consumer.sessionCreationFailed(ctx, err, remainingCreateCount, false)
- break
- }
- if ctx.Err() != nil {
- trace.TracePrintf(ctx, nil, "Context error while creating a batch of %d sessions: %v", createCount, ctx.Err())
- consumer.sessionCreationFailed(ctx, ToSpannerError(ctx.Err()), remainingCreateCount, false)
- break
- }
- var mdForGFELatency metadata.MD
- response, err := client.BatchCreateSessions(contextWithOutgoingMetadata(ctx, sc.md, sc.disableRouteToLeader), &sppb.BatchCreateSessionsRequest{
- SessionCount: remainingCreateCount,
- Database: sc.database,
- SessionTemplate: &sppb.Session{Labels: labels, CreatorRole: sc.databaseRole},
- }, gax.WithGRPCOptions(grpc.Header(&mdForGFELatency)))
-
- if getGFELatencyMetricsFlag() && mdForGFELatency != nil {
- _, instance, database, err := parseDatabaseName(sc.database)
- if err != nil {
- trace.TracePrintf(ctx, nil, "Error getting instance and database name: %v", err)
- }
- // Errors should not prevent initializing the session pool.
- ctxGFE, err := tag.New(ctx,
- tag.Upsert(tagKeyClientID, sc.id),
- tag.Upsert(tagKeyDatabase, database),
- tag.Upsert(tagKeyInstance, instance),
- tag.Upsert(tagKeyLibVersion, internal.Version),
- )
- if err != nil {
- trace.TracePrintf(ctx, nil, "Error in adding tags in BatchCreateSessions for GFE Latency: %v", err)
- }
- err = captureGFELatencyStats(ctxGFE, mdForGFELatency, "executeBatchCreateSessions")
- if err != nil {
- trace.TracePrintf(ctx, nil, "Error in Capturing GFE Latency and Header Missing count. Try disabling and rerunning. Error: %v", err)
- }
- }
- if metricErr := recordGFELatencyMetricsOT(ctx, mdForGFELatency, "executeBatchCreateSessions", sc.otConfig); metricErr != nil {
- trace.TracePrintf(ctx, nil, "Error in recording GFE Latency through OpenTelemetry. Error: %v", metricErr)
- }
- if err != nil {
- trace.TracePrintf(ctx, nil, "Error creating a batch of %d sessions: %v", remainingCreateCount, err)
- consumer.sessionCreationFailed(ctx, ToSpannerError(err), remainingCreateCount, false)
- break
- }
- actuallyCreated := int32(len(response.Session))
- trace.TracePrintf(ctx, nil, "Received a batch of %d sessions", actuallyCreated)
- for _, s := range response.Session {
- consumer.sessionReady(ctx, &session{valid: true, client: client, id: s.Name, createTime: time.Now(), md: md, logger: sc.logger})
- }
- if actuallyCreated < remainingCreateCount {
- // Spanner could return less sessions than requested. In that case, we
- // should do another call using the same gRPC channel.
- remainingCreateCount -= actuallyCreated
- } else {
- trace.TracePrintf(ctx, nil, "Finished creating %d sessions", createCount)
- break
- }
- }
-}
-
-func (sc *sessionClient) executeCreateMultiplexedSession(ctx context.Context, client *vkit.Client, md metadata.MD, consumer sessionConsumer) {
- ctx = trace.StartSpan(ctx, "cloud.google.com/go/spanner.CreateSession")
- defer func() { trace.EndSpan(ctx, nil) }()
- trace.TracePrintf(ctx, nil, "Creating a multiplexed session")
- sc.mu.Lock()
- closed := sc.closed
- sc.mu.Unlock()
- if closed {
- err := spannerErrorf(codes.Canceled, "Session client closed")
- trace.TracePrintf(ctx, nil, "Session client closed while creating a multiplexed session: %v", err)
- return
- }
- if ctx.Err() != nil {
- trace.TracePrintf(ctx, nil, "Context error while creating a multiplexed session: %v", ctx.Err())
- consumer.sessionCreationFailed(ctx, ToSpannerError(ctx.Err()), 1, true)
- return
- }
- var mdForGFELatency metadata.MD
- response, err := client.CreateSession(contextWithOutgoingMetadata(ctx, sc.md, sc.disableRouteToLeader), &sppb.CreateSessionRequest{
- Database: sc.database,
- // Multiplexed sessions do not support labels.
- Session: &sppb.Session{CreatorRole: sc.databaseRole, Multiplexed: true},
- }, gax.WithGRPCOptions(grpc.Header(&mdForGFELatency)))
-
- if getGFELatencyMetricsFlag() && mdForGFELatency != nil {
- _, instance, database, err := parseDatabaseName(sc.database)
- if err != nil {
- trace.TracePrintf(ctx, nil, "Error getting instance and database name: %v", err)
- }
- // Errors should not prevent initializing the session pool.
- ctxGFE, err := tag.New(ctx,
- tag.Upsert(tagKeyClientID, sc.id),
- tag.Upsert(tagKeyDatabase, database),
- tag.Upsert(tagKeyInstance, instance),
- tag.Upsert(tagKeyLibVersion, internal.Version),
- )
- if err != nil {
- trace.TracePrintf(ctx, nil, "Error in adding tags in CreateSession for GFE Latency: %v", err)
- }
- err = captureGFELatencyStats(ctxGFE, mdForGFELatency, "executeCreateSession")
- if err != nil {
- trace.TracePrintf(ctx, nil, "Error in Capturing GFE Latency and Header Missing count. Try disabling and rerunning. Error: %v", err)
- }
- }
- if metricErr := recordGFELatencyMetricsOT(ctx, mdForGFELatency, "executeCreateSession", sc.otConfig); metricErr != nil {
- trace.TracePrintf(ctx, nil, "Error in recording GFE Latency through OpenTelemetry. Error: %v", metricErr)
- }
- if err != nil {
- trace.TracePrintf(ctx, nil, "Error creating a multiplexed sessions: %v", err)
- consumer.sessionCreationFailed(ctx, ToSpannerError(err), 1, true)
- return
- }
- consumer.sessionReady(ctx, &session{valid: true, client: client, id: response.Name, createTime: time.Now(), md: md, logger: sc.logger, isMultiplexed: response.Multiplexed})
- trace.TracePrintf(ctx, nil, "Finished creating multiplexed sessions")
-}
-
-func (sc *sessionClient) sessionWithID(id string) (*session, error) {
- sc.mu.Lock()
- defer sc.mu.Unlock()
- client, err := sc.nextClient()
- if err != nil {
- return nil, err
- }
- return &session{valid: true, client: client, id: id, createTime: time.Now(), md: sc.md, logger: sc.logger}, nil
-}
-
-// nextClient returns the next gRPC client to use for session creation. The
-// client is set on the session, and used by all subsequent gRPC calls on the
-// session. Using the same channel for all gRPC calls for a session ensures the
-// optimal usage of server side caches.
-func (sc *sessionClient) nextClient() (*vkit.Client, error) {
- var clientOpt option.ClientOption
- if _, ok := sc.connPool.(*gmeWrapper); ok {
- // Pass GCPMultiEndpoint as a pool.
- clientOpt = gtransport.WithConnPool(sc.connPool)
- } else {
- // Pick a grpc.ClientConn from a regular pool.
- clientOpt = option.WithGRPCConn(sc.connPool.Conn())
- }
- client, err := vkit.NewClient(context.Background(), clientOpt)
- if err != nil {
- return nil, err
- }
- clientInfo := []string{"gccl", internal.Version}
- if sc.userAgent != "" {
- agentWithVersion := strings.SplitN(sc.userAgent, "/", 2)
- if len(agentWithVersion) == 2 {
- clientInfo = append(clientInfo, agentWithVersion[0], agentWithVersion[1])
- }
- }
- client.SetGoogleClientInfo(clientInfo...)
- if sc.callOptions != nil {
- client.CallOptions = mergeCallOptions(client.CallOptions, sc.callOptions)
- }
- return client, nil
-}
-
-// mergeCallOptions merges two CallOptions into one and the first argument has
-// a lower order of precedence than the second one.
-func mergeCallOptions(a *vkit.CallOptions, b *vkit.CallOptions) *vkit.CallOptions {
- res := &vkit.CallOptions{}
- resVal := reflect.ValueOf(res).Elem()
- aVal := reflect.ValueOf(a).Elem()
- bVal := reflect.ValueOf(b).Elem()
-
- t := aVal.Type()
-
- for i := 0; i < aVal.NumField(); i++ {
- fieldName := t.Field(i).Name
-
- aFieldVal := aVal.Field(i).Interface().([]gax.CallOption)
- bFieldVal := bVal.Field(i).Interface().([]gax.CallOption)
-
- merged := append(aFieldVal, bFieldVal...)
- resVal.FieldByName(fieldName).Set(reflect.ValueOf(merged))
- }
- return res
-}
diff --git a/vendor/cloud.google.com/go/spanner/spansql/fuzz.go b/vendor/cloud.google.com/go/spanner/spansql/fuzz.go
deleted file mode 100644
index 3621209f1..000000000
--- a/vendor/cloud.google.com/go/spanner/spansql/fuzz.go
+++ /dev/null
@@ -1,29 +0,0 @@
-// Copyright 2020 Google LLC
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-//go:build gofuzz
-// +build gofuzz
-
-package spansql
-
-func FuzzParseQuery(data []byte) int {
- if _, err := ParseQuery(string(data)); err != nil {
- // The value 0 signals data is an invalid query that should be
- // added to the corpus.
- return 0
- }
- // The value 1 signals the input was lexically corrent and the
- // fuzzer should increase the priority of the given input.
- return 1
-}
diff --git a/vendor/cloud.google.com/go/spanner/spansql/keywords.go b/vendor/cloud.google.com/go/spanner/spansql/keywords.go
deleted file mode 100644
index 6515a41e5..000000000
--- a/vendor/cloud.google.com/go/spanner/spansql/keywords.go
+++ /dev/null
@@ -1,322 +0,0 @@
-/*
-Copyright 2020 Google LLC
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-*/
-
-package spansql
-
-import (
- "strings"
-)
-
-// IsKeyword reports whether the identifier is a reserved keyword.
-func IsKeyword(id string) bool {
- return keywords[strings.ToUpper(id)]
-}
-
-// keywords is the set of reserved keywords.
-// https://cloud.google.com/spanner/docs/lexical#reserved-keywords
-var keywords = map[string]bool{
- "ALL": true,
- "AND": true,
- "ANY": true,
- "ARRAY": true,
- "AS": true,
- "ASC": true,
- "ASSERT_ROWS_MODIFIED": true,
- "AT": true,
- "BETWEEN": true,
- "BY": true,
- "CASE": true,
- "CAST": true,
- "COLLATE": true,
- "CONTAINS": true,
- "CREATE": true,
- "CROSS": true,
- "CUBE": true,
- "CURRENT": true,
- "DEFAULT": true,
- "DEFINE": true,
- "DESC": true,
- "DISTINCT": true,
- "ELSE": true,
- "END": true,
- "ENUM": true,
- "ESCAPE": true,
- "EXCEPT": true,
- "EXCLUDE": true,
- "EXISTS": true,
- "EXTRACT": true,
- "FALSE": true,
- "FETCH": true,
- "FOLLOWING": true,
- "FOR": true,
- "FROM": true,
- "FULL": true,
- "GROUP": true,
- "GROUPING": true,
- "GROUPS": true,
- "HASH": true,
- "HAVING": true,
- "IF": true,
- "IGNORE": true,
- "IN": true,
- "INNER": true,
- "INTERSECT": true,
- "INTERVAL": true,
- "INTO": true,
- "IS": true,
- "JOIN": true,
- "LATERAL": true,
- "LEFT": true,
- "LIKE": true,
- "LIMIT": true,
- "LOOKUP": true,
- "MERGE": true,
- "NATURAL": true,
- "NEW": true,
- "NO": true,
- "NOT": true,
- "NULL": true,
- "NULLS": true,
- "OF": true,
- "ON": true,
- "OR": true,
- "ORDER": true,
- "OUTER": true,
- "OVER": true,
- "PARTITION": true,
- "PRECEDING": true,
- "PROTO": true,
- "RANGE": true,
- "RECURSIVE": true,
- "RESPECT": true,
- "RIGHT": true,
- "ROLLUP": true,
- "ROWS": true,
- "SELECT": true,
- "SET": true,
- "SOME": true,
- "STRUCT": true,
- "TABLESAMPLE": true,
- "THEN": true,
- "TO": true,
- "TREAT": true,
- "TRUE": true,
- "UNBOUNDED": true,
- "UNION": true,
- "UNNEST": true,
- "USING": true,
- "WHEN": true,
- "WHERE": true,
- "WINDOW": true,
- "WITH": true,
- "WITHIN": true,
-}
-
-// funcs is the set of reserved keywords that are functions.
-// https://cloud.google.com/spanner/docs/functions-and-operators
-var funcs = make(map[string]bool)
-var funcArgParsers = make(map[string]func(*parser) (Expr, *parseError))
-var aggregateFuncs = make(map[string]bool)
-
-func init() {
- for _, f := range funcNames {
- funcs[f] = true
- }
- for _, f := range aggregateFuncNames {
- funcs[f] = true
- aggregateFuncs[f] = true
- }
- // Special case for CAST, SAFE_CAST and EXTRACT
- funcArgParsers["CAST"] = typedArgParser
- funcArgParsers["SAFE_CAST"] = typedArgParser
- funcArgParsers["EXTRACT"] = extractArgParser
- // Spacial case of INTERVAL arg for DATE_ADD, DATE_SUB, GENERATE_DATE_ARRAY
- funcArgParsers["DATE_ADD"] = dateIntervalArgParser
- funcArgParsers["DATE_SUB"] = dateIntervalArgParser
- funcArgParsers["GENERATE_DATE_ARRAY"] = dateIntervalArgParser
- // Spacial case of INTERVAL arg for TIMESTAMP_ADD, TIMESTAMP_SUB
- funcArgParsers["TIMESTAMP_ADD"] = timestampIntervalArgParser
- funcArgParsers["TIMESTAMP_SUB"] = timestampIntervalArgParser
- // Special case of SEQUENCE arg for GET_NEXT_SEQUENCE_VALUE, GET_INTERNAL_SEQUENCE_STATE
- funcArgParsers["GET_NEXT_SEQUENCE_VALUE"] = sequenceArgParser
- funcArgParsers["GET_INTERNAL_SEQUENCE_STATE"] = sequenceArgParser
-}
-
-var funcNames = []string{
- // TODO: many more
-
- // Cast functions.
- "CAST",
- "SAFE_CAST",
-
- // Mathematical functions.
- "ABS",
- "ACOS",
- "ACOSH",
- "ASIN",
- "ASINH",
- "ATAN",
- "ATAN2",
- "ATANH",
- "CEIL",
- "CEILING",
- "COS",
- "COSH",
- "DIV",
- "EXP",
- "FLOOR",
- "GREATEST",
- "IEEE_DIVIDE",
- "IS_INF",
- "IS_NAN",
- "LEAST",
- "LN",
- "LOG",
- "LOG10",
- "MOD",
- "POW",
- "POWER",
- "ROUND",
- "SAFE_ADD",
- "SAFE_DIVIDE",
- "SAFE_MULTIPLY",
- "SAFE_NEGATE",
- "SAFE_SUBTRACT",
- "SIGN",
- "SIN",
- "SINH",
- "SQRT",
- "TAN",
- "TANH",
- "TRUNC",
-
- // Hash functions.
- "FARM_FINGERPRINT",
- "SHA1",
- "SHA256", "SHA512",
-
- // String functions.
- "BYTE_LENGTH", "CHAR_LENGTH", "CHARACTER_LENGTH",
- "CODE_POINTS_TO_BYTES", "CODE_POINTS_TO_STRING",
- "CONCAT",
- "ENDS_WITH",
- "FORMAT",
- "FROM_BASE32", "FROM_BASE64", "FROM_HEX",
- "LENGTH",
- "LOWER",
- "LPAD",
- "LTRIM",
- "REGEXP_CONTAINS", "REGEXP_EXTRACT", "REGEXP_EXTRACT_ALL", "REGEXP_REPLACE",
- "REPEAT",
- "REPLACE",
- "REVERSE",
- "RPAD",
- "RTRIM",
- "SAFE_CONVERT_BYTES_TO_STRING",
- "SPLIT",
- "STARTS_WITH",
- "STRPOS",
- "SUBSTR",
- "TO_BASE32", "TO_BASE64", "TO_CODE_POINTS", "TO_HEX",
- "TRIM",
- "UPPER",
-
- // Array functions.
- "ARRAY",
- "ARRAY_CONCAT",
- "ARRAY_FIRST", "ARRAY_INCLUDES", "ARRAY_INCLUDES_ALL", "ARRAY_INCLUDES_ANY", "ARRAY_LAST",
- "ARRAY_LENGTH",
- "ARRAY_MAX", "ARRAY_MIN", "ARRAY_REVERSE", "ARRAY_SLICE", "ARRAY_TRANSFORM",
- "ARRAY_TO_STRING",
- "GENERATE_ARRAY", "GENERATE_DATE_ARRAY",
- "OFFSET", "ORDINAL",
- "ARRAY_REVERSE",
- "ARRAY_IS_DISTINCT",
- "SAFE_OFFSET", "SAFE_ORDINAL",
-
- // Date functions.
- "CURRENT_DATE",
- "EXTRACT",
- "DATE",
- "DATE_ADD",
- "DATE_SUB",
- "DATE_DIFF",
- "DATE_TRUNC",
- "DATE_FROM_UNIX_DATE",
- "FORMAT_DATE",
- "PARSE_DATE",
- "UNIX_DATE",
-
- // Timestamp functions.
- "CURRENT_TIMESTAMP",
- "STRING",
- "TIMESTAMP",
- "TIMESTAMP_ADD",
- "TIMESTAMP_SUB",
- "TIMESTAMP_DIFF",
- "TIMESTAMP_TRUNC",
- "FORMAT_TIMESTAMP",
- "PARSE_TIMESTAMP",
- "TIMESTAMP_SECONDS",
- "TIMESTAMP_MILLIS",
- "TIMESTAMP_MICROS",
- "UNIX_SECONDS",
- "UNIX_MILLIS",
- "UNIX_MICROS",
- "PENDING_COMMIT_TIMESTAMP",
-
- // JSON functions.
- "JSON_QUERY",
- "JSON_VALUE",
- "JSON_QUERY_ARRAY",
- "JSON_VALUE_ARRAY",
-
- // Bit functions.
- "BIT_COUNT",
- "BIT_REVERSE",
-
- // Sequence functions.
- "GET_NEXT_SEQUENCE_VALUE",
- "GET_INTERNAL_SEQUENCE_STATE",
-
- // Utility functions.
- "GENERATE_UUID",
-}
-
-var aggregateFuncNames = []string{
- // Aggregate functions.
- "ANY_VALUE",
- "ARRAY_AGG",
- "ARRAY_CONCAT_AGG",
- "AVG",
- "BIT_AND",
- "BIT_OR",
- "BIT_XOR",
- "COUNT",
- "COUNTIF",
- "LOGICAL_AND",
- "LOGICAL_OR",
- "MAX",
- "MIN",
- "STRING_AGG",
- "SUM",
-
- // Statistical aggregate functions.
- "STDDEV",
- "STDDEV_SAMP",
- "VAR_SAMP",
- "VARIANCE",
-}
diff --git a/vendor/cloud.google.com/go/spanner/spansql/parser.go b/vendor/cloud.google.com/go/spanner/spansql/parser.go
deleted file mode 100644
index 982b6d1d5..000000000
--- a/vendor/cloud.google.com/go/spanner/spansql/parser.go
+++ /dev/null
@@ -1,4696 +0,0 @@
-/*
-Copyright 2019 Google LLC
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-*/
-
-/*
-Package spansql contains types and a parser for the Cloud Spanner SQL dialect.
-
-To parse, use one of the Parse functions (ParseDDL, ParseDDLStmt, ParseQuery, etc.).
-
-Sources:
-
- https://cloud.google.com/spanner/docs/lexical
- https://cloud.google.com/spanner/docs/query-syntax
- https://cloud.google.com/spanner/docs/data-definition-language
-*/
-package spansql
-
-/*
-This file is structured as follows:
-
-- There are several exported ParseFoo functions that accept an input string
- and return a type defined in types.go. This is the principal API of this package.
- These functions are implemented as wrappers around the lower-level functions,
- with additional checks to ensure things such as input exhaustion.
-- The token and parser types are defined. These constitute the lexical token
- and parser machinery. parser.next is the main way that other functions get
- the next token, with parser.back providing a single token rewind, and
- parser.sniff, parser.eat and parser.expect providing lookahead helpers.
-- The parseFoo methods are defined, matching the SQL grammar. Each consumes its
- namesake production from the parser. There are also some fooParser helper vars
- defined that abbreviate the parsing of some of the regular productions.
-*/
-
-import (
- "fmt"
- "os"
- "strconv"
- "strings"
- "time"
- "unicode/utf8"
-
- "cloud.google.com/go/civil"
-)
-
-const debug = false
-
-func debugf(format string, args ...interface{}) {
- if !debug {
- return
- }
- fmt.Fprintf(os.Stderr, "spansql debug: "+format+"\n", args...)
-}
-
-// ParseDDL parses a DDL file.
-//
-// The provided filename is used for error reporting and will
-// appear in the returned structure.
-func ParseDDL(filename, s string) (*DDL, error) {
- ddl := &DDL{}
- if err := parseStatements(ddl, filename, s); err != nil {
- return nil, err
- }
-
- return ddl, nil
-}
-
-// ParseDML parses a DML file.
-//
-// The provided filename is used for error reporting and will
-// appear in the returned structure.
-func ParseDML(filename, s string) (*DML, error) {
- dml := &DML{}
- if err := parseStatements(dml, filename, s); err != nil {
- return nil, err
- }
-
- return dml, nil
-}
-
-func parseStatements(stmts statements, filename string, s string) error {
- p := newParser(filename, s)
-
- stmts.setFilename(filename)
-
- for {
- p.skipSpace()
- if p.done {
- break
- }
-
- switch v := stmts.(type) {
- case *DDL:
- stmt, err := p.parseDDLStmt()
- if err != nil {
- return err
- }
- v.List = append(v.List, stmt)
- case *DML:
- stmt, err := p.parseDMLStmt()
- if err != nil {
- return err
- }
- v.List = append(v.List, stmt)
- }
-
- tok := p.next()
- if tok.err == eof {
- break
- } else if tok.err != nil {
- return tok.err
- }
- if tok.value == ";" {
- continue
- } else {
- return p.errorf("unexpected token %q", tok.value)
- }
- }
- if p.Rem() != "" {
- return fmt.Errorf("unexpected trailing contents %q", p.Rem())
- }
-
- // Handle comments.
- for _, com := range p.comments {
- c := &Comment{
- Marker: com.marker,
- Isolated: com.isolated,
- Start: com.start,
- End: com.end,
- Text: com.text,
- }
-
- // Strip common whitespace prefix and any whitespace suffix.
- // TODO: This is a bodgy implementation of Longest Common Prefix,
- // and also doesn't do tabs vs. spaces well.
- var prefix string
- for i, line := range c.Text {
- line = strings.TrimRight(line, " \b\t")
- c.Text[i] = line
- trim := len(line) - len(strings.TrimLeft(line, " \b\t"))
- if i == 0 {
- prefix = line[:trim]
- } else {
- // Check how much of prefix is in common.
- for !strings.HasPrefix(line, prefix) {
- prefix = prefix[:len(prefix)-1]
- }
- }
- if prefix == "" {
- break
- }
- }
- if prefix != "" {
- for i, line := range c.Text {
- c.Text[i] = strings.TrimPrefix(line, prefix)
- }
- }
-
- stmts.addComment(c)
- }
-
- return nil
-}
-
-// ParseDDLStmt parses a single DDL statement.
-func ParseDDLStmt(s string) (DDLStmt, error) {
- p := newParser("-", s)
- stmt, err := p.parseDDLStmt()
- if err != nil {
- return nil, err
- }
- if p.Rem() != "" {
- return nil, fmt.Errorf("unexpected trailing contents %q", p.Rem())
- }
- return stmt, nil
-}
-
-// ParseDMLStmt parses a single DML statement.
-func ParseDMLStmt(s string) (DMLStmt, error) {
- p := newParser("-", s)
- stmt, err := p.parseDMLStmt()
- if err != nil {
- return nil, err
- }
- if p.Rem() != "" {
- return nil, fmt.Errorf("unexpected trailing contents %q", p.Rem())
- }
- return stmt, nil
-}
-
-// ParseQuery parses a query string.
-func ParseQuery(s string) (Query, error) {
- p := newParser("-", s)
- q, err := p.parseQuery()
- if err != nil {
- return Query{}, err
- }
- if p.Rem() != "" {
- return Query{}, fmt.Errorf("unexpected trailing query contents %q", p.Rem())
- }
- return q, nil
-}
-
-type token struct {
- value string
- err *parseError
- line, offset int
-
- typ tokenType
- float64 float64
- string string // unquoted form for stringToken/bytesToken/quotedID
-
- // int64Token is parsed as a number only when it is known to be a literal.
- // This permits correct handling of operators preceding such a token,
- // which cannot be identified as part of the int64 until later.
- int64Base int
-}
-
-type tokenType int
-
-const (
- unknownToken tokenType = iota
- int64Token
- float64Token
- stringToken
- bytesToken
- unquotedID
- quotedID
-)
-
-func (t *token) String() string {
- if t.err != nil {
- return fmt.Sprintf("parse error: %v", t.err)
- }
- return strconv.Quote(t.value)
-}
-
-type parseError struct {
- message string
- filename string
- line int // 1-based line number
- offset int // 0-based byte offset from start of input
-}
-
-func (pe *parseError) Error() string {
- if pe == nil {
- return "<nil>"
- }
- if pe.line == 1 {
- return fmt.Sprintf("%s:1.%d: %v", pe.filename, pe.offset, pe.message)
- }
- return fmt.Sprintf("%s:%d: %v", pe.filename, pe.line, pe.message)
-}
-
-var eof = &parseError{message: "EOF"}
-
-type parser struct {
- s string // Remaining input.
- done bool // Whether the parsing is finished (success or error).
- backed bool // Whether back() was called.
- cur token
-
- filename string
- line, offset int // updated by places that shrink s
-
- comments []comment // accumulated during parse
-}
-
-type comment struct {
- marker string // "#" or "--" or "/*"
- isolated bool // if it starts on its own line
- start, end Position
- text []string
-}
-
-// Pos reports the position of the current token.
-func (p *parser) Pos() Position { return Position{Line: p.cur.line, Offset: p.cur.offset} }
-
-func newParser(filename, s string) *parser {
- return &parser{
- s: s,
-
- cur: token{line: 1},
-
- filename: filename,
- line: 1,
- }
-}
-
-// Rem returns the unparsed remainder, ignoring space.
-func (p *parser) Rem() string {
- rem := p.s
- if p.backed {
- rem = p.cur.value + rem
- }
- i := 0
- for ; i < len(rem); i++ {
- if !isSpace(rem[i]) {
- break
- }
- }
- return rem[i:]
-}
-
-func (p *parser) String() string {
- if p.backed {
- return fmt.Sprintf("next tok: %s (rem: %q)", &p.cur, p.s)
- }
- return fmt.Sprintf("rem: %q", p.s)
-}
-
-func (p *parser) errorf(format string, args ...interface{}) *parseError {
- pe := &parseError{
- message: fmt.Sprintf(format, args...),
- filename: p.filename,
- line: p.cur.line,
- offset: p.cur.offset,
- }
- p.cur.err = pe
- p.done = true
- return pe
-}
-
-func isInitialIdentifierChar(c byte) bool {
- // https://cloud.google.com/spanner/docs/lexical#identifiers
- switch {
- case 'A' <= c && c <= 'Z':
- return true
- case 'a' <= c && c <= 'z':
- return true
- case c == '_':
- return true
- }
- return false
-}
-
-func isIdentifierChar(c byte) bool {
- // https://cloud.google.com/spanner/docs/lexical#identifiers
- // This doesn't apply the restriction that an identifier cannot start with [0-9],
- // nor does it check against reserved keywords.
- switch {
- case 'A' <= c && c <= 'Z':
- return true
- case 'a' <= c && c <= 'z':
- return true
- case '0' <= c && c <= '9':
- return true
- case c == '_':
- return true
- }
- return false
-}
-
-func isHexDigit(c byte) bool {
- return '0' <= c && c <= '9' || 'a' <= c && c <= 'f' || 'A' <= c && c <= 'F'
-}
-
-func isOctalDigit(c byte) bool {
- return '0' <= c && c <= '7'
-}
-
-func (p *parser) consumeNumber() {
- /*
- int64_value:
- { decimal_value | hex_value }
-
- decimal_value:
- [-]0—9+
-
- hex_value:
- [-]0[xX]{0—9|a—f|A—F}+
-
- (float64_value is not formally specified)
-
- float64_value :=
- [+-]DIGITS.[DIGITS][e[+-]DIGITS]
- | [DIGITS].DIGITS[e[+-]DIGITS]
- | DIGITSe[+-]DIGITS
- */
-
- i, neg, base := 0, false, 10
- float, e, dot := false, false, false
- if p.s[i] == '-' {
- neg = true
- i++
- } else if p.s[i] == '+' {
- // This isn't in the formal grammar, but is mentioned informally.
- // https://cloud.google.com/spanner/docs/lexical#integer-literals
- i++
- }
- if strings.HasPrefix(p.s[i:], "0x") || strings.HasPrefix(p.s[i:], "0X") {
- base = 16
- i += 2
- }
- d0 := i
-digitLoop:
- for i < len(p.s) {
- switch c := p.s[i]; {
- case '0' <= c && c <= '9':
- i++
- case base == 16 && 'A' <= c && c <= 'F':
- i++
- case base == 16 && 'a' <= c && c <= 'f':
- i++
- case base == 10 && (c == 'e' || c == 'E'):
- if e {
- p.errorf("bad token %q", p.s[:i])
- return
- }
- // Switch to consuming float.
- float, e = true, true
- i++
-
- if i < len(p.s) && (p.s[i] == '+' || p.s[i] == '-') {
- i++
- }
- case base == 10 && c == '.':
- if dot || e { // any dot must come before E
- p.errorf("bad token %q", p.s[:i])
- return
- }
- // Switch to consuming float.
- float, dot = true, true
- i++
- default:
- break digitLoop
- }
- }
- if d0 == i {
- p.errorf("no digits in numeric literal")
- return
- }
- sign := ""
- if neg {
- sign = "-"
- }
- p.cur.value, p.s = p.s[:i], p.s[i:]
- p.offset += i
- var err error
- if float {
- p.cur.typ = float64Token
- p.cur.float64, err = strconv.ParseFloat(sign+p.cur.value[d0:], 64)
- } else {
- p.cur.typ = int64Token
- p.cur.value = sign + p.cur.value[d0:]
- p.cur.int64Base = base
- // This is parsed on demand.
- }
- if err != nil {
- p.errorf("bad numeric literal %q: %v", p.cur.value, err)
- }
-}
-
-func (p *parser) consumeString() {
- // https://cloud.google.com/spanner/docs/lexical#string-and-bytes-literals
-
- delim := p.stringDelimiter()
- if p.cur.err != nil {
- return
- }
-
- p.cur.string, p.cur.err = p.consumeStringContent(delim, false, true, "string literal")
- p.cur.typ = stringToken
-}
-
-func (p *parser) consumeRawString() {
- // https://cloud.google.com/spanner/docs/lexical#string-and-bytes-literals
-
- p.s = p.s[1:] // consume 'R'
- delim := p.stringDelimiter()
- if p.cur.err != nil {
- return
- }
-
- p.cur.string, p.cur.err = p.consumeStringContent(delim, true, true, "raw string literal")
- p.cur.typ = stringToken
-}
-
-func (p *parser) consumeBytes() {
- // https://cloud.google.com/spanner/docs/lexical#string-and-bytes-literals
-
- p.s = p.s[1:] // consume 'B'
- delim := p.stringDelimiter()
- if p.cur.err != nil {
- return
- }
-
- p.cur.string, p.cur.err = p.consumeStringContent(delim, false, false, "bytes literal")
- p.cur.typ = bytesToken
-}
-
-func (p *parser) consumeRawBytes() {
- // https://cloud.google.com/spanner/docs/lexical#string-and-bytes-literals
-
- p.s = p.s[2:] // consume 'RB'
- delim := p.stringDelimiter()
- if p.cur.err != nil {
- return
- }
-
- p.cur.string, p.cur.err = p.consumeStringContent(delim, true, false, "raw bytes literal")
- p.cur.typ = bytesToken
-}
-
-// stringDelimiter returns the opening string delimiter.
-func (p *parser) stringDelimiter() string {
- c := p.s[0]
- if c != '"' && c != '\'' {
- p.errorf("invalid string literal")
- return ""
- }
- // Look for triple.
- if len(p.s) >= 3 && p.s[1] == c && p.s[2] == c {
- return p.s[:3]
- }
- return p.s[:1]
-}
-
-// consumeStringContent consumes a string-like literal, including its delimiters.
-//
-// - delim is the opening/closing delimiter.
-// - raw is true if consuming a raw string.
-// - unicode is true if unicode escape sequence (\uXXXX or \UXXXXXXXX) are permitted.
-// - name identifies the name of the consuming token.
-//
-// It is designed for consuming string, bytes literals, and also backquoted identifiers.
-func (p *parser) consumeStringContent(delim string, raw, unicode bool, name string) (string, *parseError) {
- // https://cloud.google.com/spanner/docs/lexical#string-and-bytes-literals
-
- if len(delim) == 3 {
- name = "triple-quoted " + name
- }
-
- i := len(delim)
- var content []byte
-
- for i < len(p.s) {
- if strings.HasPrefix(p.s[i:], delim) {
- i += len(delim)
- p.s = p.s[i:]
- p.offset += i
- return string(content), nil
- }
-
- if p.s[i] == '\\' {
- i++
- if i >= len(p.s) {
- return "", p.errorf("unclosed %s", name)
- }
-
- if raw {
- content = append(content, '\\', p.s[i])
- i++
- continue
- }
-
- switch p.s[i] {
- case 'a':
- i++
- content = append(content, '\a')
- case 'b':
- i++
- content = append(content, '\b')
- case 'f':
- i++
- content = append(content, '\f')
- case 'n':
- i++
- content = append(content, '\n')
- case 'r':
- i++
- content = append(content, '\r')
- case 't':
- i++
- content = append(content, '\t')
- case 'v':
- i++
- content = append(content, '\v')
- case '\\':
- i++
- content = append(content, '\\')
- case '?':
- i++
- content = append(content, '?')
- case '"':
- i++
- content = append(content, '"')
- case '\'':
- i++
- content = append(content, '\'')
- case '`':
- i++
- content = append(content, '`')
- case 'x', 'X':
- i++
- if !(i+1 < len(p.s) && isHexDigit(p.s[i]) && isHexDigit(p.s[i+1])) {
- return "", p.errorf("illegal escape sequence: hex escape sequence must be followed by 2 hex digits")
- }
- c, err := strconv.ParseUint(p.s[i:i+2], 16, 8)
- if err != nil {
- return "", p.errorf("illegal escape sequence: invalid hex digits: %q: %v", p.s[i:i+2], err)
- }
- content = append(content, byte(c))
- i += 2
- case 'u', 'U':
- t := p.s[i]
- if !unicode {
- return "", p.errorf("illegal escape sequence: \\%c", t)
- }
-
- i++
- size := 4
- if t == 'U' {
- size = 8
- }
- if i+size-1 >= len(p.s) {
- return "", p.errorf("illegal escape sequence: \\%c escape sequence must be followed by %d hex digits", t, size)
- }
- for j := 0; j < size; j++ {
- if !isHexDigit(p.s[i+j]) {
- return "", p.errorf("illegal escape sequence: \\%c escape sequence must be followed by %d hex digits", t, size)
- }
- }
- c, err := strconv.ParseUint(p.s[i:i+size], 16, 64)
- if err != nil {
- return "", p.errorf("illegal escape sequence: invalid \\%c digits: %q: %v", t, p.s[i:i+size], err)
- }
- if 0xD800 <= c && c <= 0xDFFF || 0x10FFFF < c {
- return "", p.errorf("illegal escape sequence: invalid codepoint: %x", c)
- }
- var buf [utf8.UTFMax]byte
- n := utf8.EncodeRune(buf[:], rune(c))
- content = append(content, buf[:n]...)
- i += size
- case '0', '1', '2', '3', '4', '5', '6', '7':
- if !(i+2 < len(p.s) && isOctalDigit(p.s[i+1]) && isOctalDigit(p.s[i+2])) {
- return "", p.errorf("illegal escape sequence: octal escape sequence must be followed by 3 octal digits")
- }
- c, err := strconv.ParseUint(p.s[i:i+3], 8, 64)
- if err != nil {
- return "", p.errorf("illegal escape sequence: invalid octal digits: %q: %v", p.s[i:i+3], err)
- }
- if c >= 256 {
- return "", p.errorf("illegal escape sequence: octal digits overflow: %q (%d)", p.s[i:i+3], c)
- }
- content = append(content, byte(c))
- i += 3
- default:
- return "", p.errorf("illegal escape sequence: \\%c", p.s[i])
- }
-
- continue
- }
-
- if p.s[i] == '\n' {
- if len(delim) != 3 { // newline is only allowed inside triple-quoted.
- return "", p.errorf("newline forbidden in %s", name)
- }
- p.line++
- }
-
- content = append(content, p.s[i])
- i++
- }
-
- return "", p.errorf("unclosed %s", name)
-}
-
-var operators = map[string]bool{
- // Arithmetic operators.
- "-": true, // both unary and binary
- "~": true,
- "*": true,
- "/": true,
- "||": true,
- "+": true,
- "<<": true,
- ">>": true,
- "&": true,
- "^": true,
- "|": true,
-
- // Comparison operators.
- "<": true,
- "<=": true,
- ">": true,
- ">=": true,
- "=": true,
- "!=": true,
- "<>": true,
-}
-
-func isSpace(c byte) bool {
- // Per https://cloud.google.com/spanner/docs/lexical, informally,
- // whitespace is defined as "space, backspace, tab, newline".
- switch c {
- case ' ', '\b', '\t', '\n':
- return true
- }
- return false
-}
-
-// skipSpace skips past any space or comments.
-func (p *parser) skipSpace() bool {
- initLine := p.line
- // If we start capturing a comment in this method,
- // this is set to its comment value. Multi-line comments
- // are only joined during a single skipSpace invocation.
- var com *comment
-
- i := 0
- for i < len(p.s) {
- if isSpace(p.s[i]) {
- if p.s[i] == '\n' {
- p.line++
- }
- i++
- continue
- }
- // Comments.
- marker, term := "", ""
- if p.s[i] == '#' {
- marker, term = "#", "\n"
- } else if i+1 < len(p.s) && p.s[i] == '-' && p.s[i+1] == '-' {
- marker, term = "--", "\n"
- } else if i+1 < len(p.s) && p.s[i] == '/' && p.s[i+1] == '*' {
- marker, term = "/*", "*/"
- }
- if term == "" {
- break
- }
- // Search for the terminator, starting after the marker.
- ti := strings.Index(p.s[i+len(marker):], term)
- if ti < 0 {
- p.errorf("unterminated comment")
- return false
- }
- ti += len(marker) // make ti relative to p.s[i:]
- if com != nil && (com.end.Line+1 < p.line || com.marker != marker) {
- // There's a previous comment, but there's an
- // intervening blank line, or the marker changed.
- // Terminate the previous comment.
- com = nil
- }
- if com == nil {
- // New comment.
- p.comments = append(p.comments, comment{
- marker: marker,
- isolated: (p.line != initLine) || p.line == 1,
- start: Position{
- Line: p.line,
- Offset: p.offset + i,
- },
- })
- com = &p.comments[len(p.comments)-1]
- }
- textLines := strings.Split(p.s[i+len(marker):i+ti], "\n")
- com.text = append(com.text, textLines...)
- com.end = Position{
- Line: p.line + len(textLines) - 1,
- Offset: p.offset + i + ti,
- }
- p.line = com.end.Line
- if term == "\n" {
- p.line++
- }
- i += ti + len(term)
-
- // A non-isolated comment is always complete and doesn't get
- // combined with any future comment.
- if !com.isolated {
- com = nil
- }
- }
- p.s = p.s[i:]
- p.offset += i
- if p.s == "" {
- p.done = true
- }
- return i > 0
-}
-
-// advance moves the parser to the next token, which will be available in p.cur.
-func (p *parser) advance() {
- prevID := p.cur.typ == quotedID || p.cur.typ == unquotedID
-
- p.skipSpace()
- if p.done {
- return
- }
-
- // If the previous token was an identifier (quoted or unquoted),
- // the next token being a dot means this is a path expression (not a number).
- if prevID && p.s[0] == '.' {
- p.cur.err = nil
- p.cur.line, p.cur.offset = p.line, p.offset
- p.cur.typ = unknownToken
- p.cur.value, p.s = p.s[:1], p.s[1:]
- p.offset++
- return
- }
-
- p.cur.err = nil
- p.cur.line, p.cur.offset = p.line, p.offset
- p.cur.typ = unknownToken
- // TODO: struct literals
- switch p.s[0] {
- case ',', ';', '(', ')', '{', '}', '[', ']', '*', '+', '-':
- // Single character symbol.
- p.cur.value, p.s = p.s[:1], p.s[1:]
- p.offset++
- return
- // String literal prefix.
- case 'B', 'b', 'R', 'r', '"', '\'':
- // "B", "b", "BR", "Rb" etc are valid string literal prefix, however "BB", "rR" etc are not.
- raw, bytes := false, false
- for i := 0; i < 4 && i < len(p.s); i++ {
- switch {
- case !raw && (p.s[i] == 'R' || p.s[i] == 'r'):
- raw = true
- continue
- case !bytes && (p.s[i] == 'B' || p.s[i] == 'b'):
- bytes = true
- continue
- case p.s[i] == '"' || p.s[i] == '\'':
- switch {
- case raw && bytes:
- p.consumeRawBytes()
- case raw:
- p.consumeRawString()
- case bytes:
- p.consumeBytes()
- default:
- p.consumeString()
- }
- return
- }
- break
- }
- case '`':
- // Quoted identifier.
- p.cur.string, p.cur.err = p.consumeStringContent("`", false, true, "quoted identifier")
- p.cur.typ = quotedID
- return
- }
- if p.s[0] == '@' || isInitialIdentifierChar(p.s[0]) {
- // Start consuming identifier.
- i := 1
- for i < len(p.s) && isIdentifierChar(p.s[i]) {
- i++
- }
- p.cur.value, p.s = p.s[:i], p.s[i:]
- p.cur.typ = unquotedID
- p.offset += i
- return
- }
- if len(p.s) >= 2 && p.s[0] == '.' && ('0' <= p.s[1] && p.s[1] <= '9') {
- // dot followed by a digit.
- p.consumeNumber()
- return
- }
- if '0' <= p.s[0] && p.s[0] <= '9' {
- p.consumeNumber()
- return
- }
-
- // Look for operator (two or one bytes).
- for i := 2; i >= 1; i-- {
- if i <= len(p.s) && operators[p.s[:i]] {
- p.cur.value, p.s = p.s[:i], p.s[i:]
- p.offset += i
- return
- }
- }
-
- p.errorf("unexpected byte %#x", p.s[0])
-}
-
-// back steps the parser back one token. It cannot be called twice in succession.
-func (p *parser) back() {
- if p.backed {
- panic("parser backed up twice")
- }
- p.done = false
- p.backed = true
- // If an error was being recovered, we wish to ignore the error.
- // Don't do that for eof since that'll be returned next.
- if p.cur.err != eof {
- p.cur.err = nil
- }
-}
-
-// next returns the next token.
-func (p *parser) next() *token {
- if p.backed || p.done {
- p.backed = false
- return &p.cur
- }
- p.advance()
- if p.done && p.cur.err == nil {
- p.cur.value = ""
- p.cur.err = eof
- }
- debugf("parser·next(): returning [%v] [err: %v] @l%d,o%d", p.cur.value, p.cur.err, p.cur.line, p.cur.offset)
- return &p.cur
-}
-
-// caseEqual reports whether the token is valid, not a quoted identifier, and
-// equal to the provided string under a case insensitive comparison.
-// Use this (or sniff/eat/expect) instead of comparing a string directly for keywords, etc.
-func (t *token) caseEqual(x string) bool {
- return t.err == nil && t.typ != quotedID && strings.EqualFold(t.value, x)
-}
-
-// sniff reports whether the next N tokens are as specified.
-func (p *parser) sniff(want ...string) bool {
- // Store current parser state and restore on the way out.
- orig := *p
- defer func() { *p = orig }()
-
- for _, w := range want {
- if !p.next().caseEqual(w) {
- return false
- }
- }
- return true
-}
-
-// sniffTokenType reports whether the next token type is as specified.
-func (p *parser) sniffTokenType(want tokenType) bool {
- orig := *p
- defer func() { *p = orig }()
-
- if p.next().typ == want {
- return true
- }
- return false
-}
-
-// eat reports whether the next N tokens are as specified,
-// then consumes them.
-func (p *parser) eat(want ...string) bool {
- // Store current parser state so we can restore if we get a failure.
- orig := *p
-
- for _, w := range want {
- if !p.next().caseEqual(w) {
- // Mismatch.
- *p = orig
- return false
- }
- }
- return true
-}
-
-func (p *parser) expect(want ...string) *parseError {
- for _, w := range want {
- tok := p.next()
- if tok.err != nil {
- return tok.err
- }
- if !tok.caseEqual(w) {
- return p.errorf("got %q while expecting %q", tok.value, w)
- }
- }
- return nil
-}
-
-func (p *parser) parseDDLStmt() (DDLStmt, *parseError) {
- debugf("parseDDLStmt: %v", p)
-
- /*
- statement:
- { create_database | create_table | create_index | alter_table | drop_table | rename_table | drop_index | create_change_stream | alter_change_stream | drop_change_stream }
- */
-
- // TODO: support create_database
-
- if p.sniff("CREATE", "TABLE") {
- ct, err := p.parseCreateTable()
- return ct, err
- } else if p.sniff("CREATE", "INDEX") || p.sniff("CREATE", "UNIQUE", "INDEX") || p.sniff("CREATE", "NULL_FILTERED", "INDEX") || p.sniff("CREATE", "UNIQUE", "NULL_FILTERED", "INDEX") {
- ci, err := p.parseCreateIndex()
- return ci, err
- } else if p.sniff("CREATE", "VIEW") || p.sniff("CREATE", "OR", "REPLACE", "VIEW") {
- cv, err := p.parseCreateView()
- return cv, err
- } else if p.sniff("CREATE", "ROLE") {
- cr, err := p.parseCreateRole()
- return cr, err
- } else if p.sniff("ALTER", "TABLE") {
- a, err := p.parseAlterTable()
- return a, err
- } else if p.eat("DROP") {
- pos := p.Pos()
- // These statements are simple.
- // DROP TABLE [ IF EXISTS ] table_name
- // DROP INDEX [ IF EXISTS ] index_name
- // DROP VIEW view_name
- // DROP ROLE role_name
- // DROP CHANGE STREAM change_stream_name
- tok := p.next()
- if tok.err != nil {
- return nil, tok.err
- }
- switch {
- default:
- return nil, p.errorf("got %q, want TABLE, VIEW, INDEX or CHANGE", tok.value)
- case tok.caseEqual("TABLE"):
- var ifExists bool
- if p.eat("IF", "EXISTS") {
- ifExists = true
- }
- name, err := p.parseTableOrIndexOrColumnName()
- if err != nil {
- return nil, err
- }
- return &DropTable{Name: name, IfExists: ifExists, Position: pos}, nil
- case tok.caseEqual("INDEX"):
- var ifExists bool
- if p.eat("IF", "EXISTS") {
- ifExists = true
- }
- name, err := p.parseTableOrIndexOrColumnName()
- if err != nil {
- return nil, err
- }
- return &DropIndex{Name: name, IfExists: ifExists, Position: pos}, nil
- case tok.caseEqual("VIEW"):
- name, err := p.parseTableOrIndexOrColumnName()
- if err != nil {
- return nil, err
- }
- return &DropView{Name: name, Position: pos}, nil
- case tok.caseEqual("ROLE"):
- name, err := p.parseTableOrIndexOrColumnName()
- if err != nil {
- return nil, err
- }
- return &DropRole{Name: name, Position: pos}, nil
- case tok.caseEqual("CHANGE"):
- if err := p.expect("STREAM"); err != nil {
- return nil, err
- }
- name, err := p.parseTableOrIndexOrColumnName()
- if err != nil {
- return nil, err
- }
- return &DropChangeStream{Name: name, Position: pos}, nil
- case tok.caseEqual("SEQUENCE"):
- var ifExists bool
- if p.eat("IF", "EXISTS") {
- ifExists = true
- }
- name, err := p.parseTableOrIndexOrColumnName()
- if err != nil {
- return nil, err
- }
- return &DropSequence{Name: name, IfExists: ifExists, Position: pos}, nil
- }
- } else if p.sniff("RENAME", "TABLE") {
- a, err := p.parseRenameTable()
- return a, err
- } else if p.sniff("ALTER", "DATABASE") {
- a, err := p.parseAlterDatabase()
- return a, err
- } else if p.eat("GRANT") {
- a, err := p.parseGrantRole()
- return a, err
- } else if p.eat("REVOKE") {
- a, err := p.parseRevokeRole()
- return a, err
- } else if p.sniff("CREATE", "CHANGE", "STREAM") {
- cs, err := p.parseCreateChangeStream()
- return cs, err
- } else if p.sniff("ALTER", "CHANGE", "STREAM") {
- acs, err := p.parseAlterChangeStream()
- return acs, err
- } else if p.sniff("ALTER", "STATISTICS") {
- as, err := p.parseAlterStatistics()
- return as, err
- } else if p.sniff("ALTER", "INDEX") {
- ai, err := p.parseAlterIndex()
- return ai, err
- } else if p.sniff("CREATE", "SEQUENCE") {
- cs, err := p.parseCreateSequence()
- return cs, err
- } else if p.sniff("ALTER", "SEQUENCE") {
- as, err := p.parseAlterSequence()
- return as, err
- }
-
- return nil, p.errorf("unknown DDL statement")
-}
-
-func (p *parser) parseCreateTable() (*CreateTable, *parseError) {
- debugf("parseCreateTable: %v", p)
-
- /*
- CREATE TABLE [ IF NOT EXISTS ] table_name(
- [column_def, ...] [ table_constraint, ...] [ synonym ] )
- primary_key [, cluster]
-
- synonym:
- SYNONYM (name)
-
- primary_key:
- PRIMARY KEY ( [key_part, ...] )
-
- cluster:
- INTERLEAVE IN PARENT table_name [ ON DELETE { CASCADE | NO ACTION } ]
- */
- var ifNotExists bool
-
- if err := p.expect("CREATE"); err != nil {
- return nil, err
- }
- pos := p.Pos()
- if err := p.expect("TABLE"); err != nil {
- return nil, err
- }
- if p.eat("IF", "NOT", "EXISTS") {
- ifNotExists = true
- }
- tname, err := p.parseTableOrIndexOrColumnName()
- if err != nil {
- return nil, err
- }
-
- ct := &CreateTable{Name: tname, Position: pos, IfNotExists: ifNotExists}
- err = p.parseCommaList("(", ")", func(p *parser) *parseError {
- if p.sniffTableConstraint() {
- tc, err := p.parseTableConstraint()
- if err != nil {
- return err
- }
- ct.Constraints = append(ct.Constraints, tc)
- return nil
- }
-
- if p.sniffTableSynonym() {
- ts, err := p.parseTableSynonym()
- if err != nil {
- return err
- }
- ct.Synonym = ts
- return nil
- }
-
- cd, err := p.parseColumnDef()
- if err != nil {
- return err
- }
- ct.Columns = append(ct.Columns, cd)
- return nil
- })
- if err != nil {
- return nil, err
- }
-
- if err := p.expect("PRIMARY"); err != nil {
- return nil, err
- }
- if err := p.expect("KEY"); err != nil {
- return nil, err
- }
- ct.PrimaryKey, err = p.parseKeyPartList()
- if err != nil {
- return nil, err
- }
-
- if p.eat(",", "INTERLEAVE") {
- if err := p.expect("IN"); err != nil {
- return nil, err
- }
- if err := p.expect("PARENT"); err != nil {
- return nil, err
- }
- pname, err := p.parseTableOrIndexOrColumnName()
- if err != nil {
- return nil, err
- }
- ct.Interleave = &Interleave{
- Parent: pname,
- OnDelete: NoActionOnDelete,
- }
- // The ON DELETE clause is optional; it defaults to NoActionOnDelete.
- if p.eat("ON", "DELETE") {
- od, err := p.parseOnDelete()
- if err != nil {
- return nil, err
- }
- ct.Interleave.OnDelete = od
- }
- }
- if p.eat(",", "ROW", "DELETION", "POLICY") {
- rdp, err := p.parseRowDeletionPolicy()
- if err != nil {
- return nil, err
- }
- ct.RowDeletionPolicy = &rdp
- }
-
- return ct, nil
-}
-
-func (p *parser) sniffTableConstraint() bool {
- // Unfortunately the Cloud Spanner grammar is LL(3) because
- // CONSTRAINT BOOL
- // could be the start of a declaration of a column called "CONSTRAINT" of boolean type,
- // or it could be the start of a foreign key constraint called "BOOL".
- // We have to sniff up to the third token to see what production it is.
- // If we have "FOREIGN" and "KEY" (or "CHECK"), this is an unnamed table constraint.
- // If we have "CONSTRAINT", an identifier and "FOREIGN" (or "CHECK"), this is a table constraint.
- // Otherwise, this is a column definition.
-
- if p.sniff("FOREIGN", "KEY") || p.sniff("CHECK") {
- return true
- }
-
- // Store parser state, and peek ahead.
- // Restore on the way out.
- orig := *p
- defer func() { *p = orig }()
-
- if !p.eat("CONSTRAINT") {
- return false
- }
- if _, err := p.parseTableOrIndexOrColumnName(); err != nil {
- return false
- }
- return p.sniff("FOREIGN") || p.sniff("CHECK")
-}
-
-func (p *parser) sniffTableSynonym() bool {
- return p.sniff("SYNONYM")
-}
-
-func (p *parser) parseTableSynonym() (ID, *parseError) {
- debugf("parseTableSynonym: %v", p)
-
- /*
- table_synonym:
- SYNONYM ( name )
- */
-
- if err := p.expect("SYNONYM"); err != nil {
- return "", err
- }
- if err := p.expect("("); err != nil {
- return "", err
- }
- name, err := p.parseTableOrIndexOrColumnName()
- if err != nil {
- return "", err
- }
- if err := p.expect(")"); err != nil {
- return "", err
- }
-
- return name, nil
-}
-
-func (p *parser) parseCreateIndex() (*CreateIndex, *parseError) {
- debugf("parseCreateIndex: %v", p)
-
- /*
- CREATE [UNIQUE] [NULL_FILTERED] INDEX [IF NOT EXISTS] index_name
- ON table_name ( key_part [, ...] ) [ storing_clause ] [ , interleave_clause ]
-
- index_name:
- {a—z|A—Z}[{a—z|A—Z|0—9|_}+]
-
- storing_clause:
- STORING ( column_name [, ...] )
-
- interleave_clause:
- INTERLEAVE IN table_name
- */
-
- var unique, nullFiltered, ifNotExists bool
-
- if err := p.expect("CREATE"); err != nil {
- return nil, err
- }
- pos := p.Pos()
- if p.eat("UNIQUE") {
- unique = true
- }
- if p.eat("NULL_FILTERED") {
- nullFiltered = true
- }
- if err := p.expect("INDEX"); err != nil {
- return nil, err
- }
- if p.eat("IF", "NOT", "EXISTS") {
- ifNotExists = true
- }
- iname, err := p.parseTableOrIndexOrColumnName()
- if err != nil {
- return nil, err
- }
- if err := p.expect("ON"); err != nil {
- return nil, err
- }
- tname, err := p.parseTableOrIndexOrColumnName()
- if err != nil {
- return nil, err
- }
- ci := &CreateIndex{
- Name: iname,
- Table: tname,
-
- Unique: unique,
- NullFiltered: nullFiltered,
- IfNotExists: ifNotExists,
-
- Position: pos,
- }
- ci.Columns, err = p.parseKeyPartList()
- if err != nil {
- return nil, err
- }
-
- if p.eat("STORING") {
- ci.Storing, err = p.parseColumnNameList()
- if err != nil {
- return nil, err
- }
- }
-
- if p.eat(",", "INTERLEAVE", "IN") {
- ci.Interleave, err = p.parseTableOrIndexOrColumnName()
- if err != nil {
- return nil, err
- }
- }
-
- return ci, nil
-}
-
-func (p *parser) parseCreateView() (*CreateView, *parseError) {
- debugf("parseCreateView: %v", p)
-
- /*
- { CREATE VIEW | CREATE OR REPLACE VIEW } view_name
- SQL SECURITY {INVOKER | DEFINER}
- AS query
- */
-
- var orReplace bool
-
- if err := p.expect("CREATE"); err != nil {
- return nil, err
- }
- pos := p.Pos()
- if p.eat("OR", "REPLACE") {
- orReplace = true
- }
- if err := p.expect("VIEW"); err != nil {
- return nil, err
- }
- vname, err := p.parseTableOrIndexOrColumnName()
- if err != nil {
- return nil, err
- }
- if err := p.expect("SQL", "SECURITY"); err != nil {
- return nil, err
- }
- tok := p.next()
- if tok.err != nil {
- return nil, tok.err
- }
- var securityType SecurityType
- switch {
- case tok.caseEqual("INVOKER"):
- securityType = Invoker
- case tok.caseEqual("DEFINER"):
- securityType = Definer
- default:
- return nil, p.errorf("got %q, want INVOKER or DEFINER", tok.value)
- }
- if err := p.expect("AS"); err != nil {
- return nil, err
- }
- query, err := p.parseQuery()
- if err != nil {
- return nil, err
- }
-
- return &CreateView{
- Name: vname,
- OrReplace: orReplace,
- SecurityType: securityType,
- Query: query,
-
- Position: pos,
- }, nil
-}
-
-func (p *parser) parseCreateRole() (*CreateRole, *parseError) {
- debugf("parseCreateRole: %v", p)
-
- /*
- CREATE ROLE database_role_name
- */
-
- if err := p.expect("CREATE"); err != nil {
- return nil, err
- }
- pos := p.Pos()
- if err := p.expect("ROLE"); err != nil {
- return nil, err
- }
- rname, err := p.parseTableOrIndexOrColumnName()
- if err != nil {
- return nil, err
- }
- cr := &CreateRole{
- Name: rname,
-
- Position: pos,
- }
-
- return cr, nil
-}
-
-func (p *parser) parseGrantRole() (*GrantRole, *parseError) {
- pos := p.Pos()
- g := &GrantRole{
- Position: pos,
- }
- if p.eat("ROLE") {
- roleList, err := p.parseGrantOrRevokeRoleList("TO")
- if err != nil {
- return nil, err
- }
- g.GrantRoleNames = roleList
- } else if p.eat("EXECUTE", "ON", "TABLE", "FUNCTION") {
- tvfList, err := p.parseGrantOrRevokeRoleList("TO")
- if err != nil {
- return nil, err
- }
- g.TvfNames = tvfList
- } else if p.eat("SELECT", "ON", "VIEW") {
- viewList, err := p.parseGrantOrRevokeRoleList("TO")
- if err != nil {
- return nil, err
- }
- g.ViewNames = viewList
- } else if p.eat("SELECT", "ON", "CHANGE", "STREAM") {
- csList, err := p.parseGrantOrRevokeRoleList("TO")
- if err != nil {
- return nil, err
- }
- g.ChangeStreamNames = csList
- } else {
- var privs []Privilege
- privs, err := p.parsePrivileges()
- if err != nil {
- return nil, err
- }
- g.Privileges = privs
- var tableList []ID
- f := func(p *parser) *parseError {
- table, err := p.parseTableOrIndexOrColumnName()
- if err != nil {
- return err
- }
- tableList = append(tableList, table)
- return nil
- }
- if err := p.parseCommaListWithEnds(f, "TO", "ROLE"); err != nil {
- return nil, err
- }
- g.TableNames = tableList
- }
- list, err := p.parseIDList()
- if err != nil {
- return nil, err
- }
- g.ToRoleNames = list
-
- return g, nil
-}
-
-func (p *parser) parseRevokeRole() (*RevokeRole, *parseError) {
- pos := p.Pos()
- r := &RevokeRole{
- Position: pos,
- }
- if p.eat("ROLE") {
- roleList, err := p.parseGrantOrRevokeRoleList("FROM")
- if err != nil {
- return nil, err
- }
- r.RevokeRoleNames = roleList
- } else if p.eat("EXECUTE", "ON", "TABLE", "FUNCTION") {
- tvfList, err := p.parseGrantOrRevokeRoleList("FROM")
- if err != nil {
- return nil, err
- }
- r.TvfNames = tvfList
- } else if p.eat("SELECT", "ON", "VIEW") {
- viewList, err := p.parseGrantOrRevokeRoleList("FROM")
- if err != nil {
- return nil, err
- }
- r.ViewNames = viewList
- } else if p.eat("SELECT", "ON", "CHANGE", "STREAM") {
- csList, err := p.parseGrantOrRevokeRoleList("FROM")
- if err != nil {
- return nil, err
- }
- r.ChangeStreamNames = csList
- } else {
- var privs []Privilege
- privs, err := p.parsePrivileges()
- if err != nil {
- return nil, err
- }
- r.Privileges = privs
- var tableList []ID
- f := func(p *parser) *parseError {
- table, err := p.parseTableOrIndexOrColumnName()
- if err != nil {
- return err
- }
- tableList = append(tableList, table)
- return nil
- }
- if err := p.parseCommaListWithEnds(f, "FROM", "ROLE"); err != nil {
- return nil, err
- }
- r.TableNames = tableList
- }
- list, err := p.parseIDList()
- if err != nil {
- return nil, err
- }
- r.FromRoleNames = list
-
- return r, nil
-}
-func (p *parser) parseGrantOrRevokeRoleList(end string) ([]ID, *parseError) {
- var roleList []ID
- f := func(p *parser) *parseError {
- role, err := p.parseTableOrIndexOrColumnName()
- if err != nil {
- return err
- }
- roleList = append(roleList, role)
- return nil
- }
- err := p.parseCommaListWithEnds(f, end, "ROLE")
- if err != nil {
- return nil, err
- }
- return roleList, nil
-}
-
-func (p *parser) parsePrivileges() ([]Privilege, *parseError) {
- var privs []Privilege
- for {
- tok := p.next()
- if tok.err != nil {
- return []Privilege{}, tok.err
- }
-
- priv := Privilege{}
- switch {
- default:
- return []Privilege{}, p.errorf("got %q, want SELECT or UPDATE or INSERT or DELETE", tok.value)
- case tok.caseEqual("SELECT"):
- priv.Type = PrivilegeTypeSelect
- case tok.caseEqual("UPDATE"):
- priv.Type = PrivilegeTypeUpdate
- case tok.caseEqual("INSERT"):
- priv.Type = PrivilegeTypeInsert
- case tok.caseEqual("DELETE"):
- priv.Type = PrivilegeTypeDelete
- }
- // can grant DELETE only at the table level.
- // https://cloud.google.com/spanner/docs/reference/standard-sql/data-definition-language#notes_and_restrictions
- if p.sniff("(") && !tok.caseEqual("DELETE") {
- list, err := p.parseColumnNameList()
- if err != nil {
- return nil, err
- }
- priv.Columns = list
- }
- privs = append(privs, priv)
- tok = p.next()
- if tok.err != nil {
- return []Privilege{}, tok.err
- }
- if tok.value == "," {
- continue
- } else if tok.caseEqual("ON") && p.eat("TABLE") {
- break
- } else {
- return []Privilege{}, p.errorf("got %q, want , or ON TABLE", tok.value)
- }
- }
- return privs, nil
-}
-func (p *parser) parseAlterTable() (*AlterTable, *parseError) {
- debugf("parseAlterTable: %v", p)
-
- /*
- alter_table:
- ALTER TABLE table_name { table_alteration | table_column_alteration }
-
- table_alteration:
- { ADD [ COLUMN ] [ IF NOT EXISTS ] column_def
- | DROP [ COLUMN ] column_name
- | ADD table_constraint
- | DROP CONSTRAINT constraint_name
- | SET ON DELETE { CASCADE | NO ACTION }
- | ADD SYNONYM synonym_name
- | DROP SYNONYM synonym_name
- | RENAME TO new_table_name }
-
- table_column_alteration:
- ALTER [ COLUMN ] column_name { { scalar_type | array_type } [NOT NULL] | SET options_def }
- */
-
- if err := p.expect("ALTER"); err != nil {
- return nil, err
- }
- pos := p.Pos()
- if err := p.expect("TABLE"); err != nil {
- return nil, err
- }
- tname, err := p.parseTableOrIndexOrColumnName()
- if err != nil {
- return nil, err
- }
- a := &AlterTable{Name: tname, Position: pos}
-
- tok := p.next()
- if tok.err != nil {
- return nil, tok.err
- }
- switch {
- default:
- return nil, p.errorf("got %q, expected ADD or DROP or SET or ALTER", tok.value)
- case tok.caseEqual("ADD"):
- if p.sniff("CONSTRAINT") || p.sniff("FOREIGN") || p.sniff("CHECK") {
- tc, err := p.parseTableConstraint()
- if err != nil {
- return nil, err
- }
- a.Alteration = AddConstraint{Constraint: tc}
- return a, nil
- }
-
- if p.eat("ROW", "DELETION", "POLICY") {
- rdp, err := p.parseRowDeletionPolicy()
- if err != nil {
- return nil, err
- }
- a.Alteration = AddRowDeletionPolicy{RowDeletionPolicy: rdp}
- return a, nil
- }
-
- // TODO: "COLUMN" is optional. A column named SYNONYM is allowed.
- if p.eat("SYNONYM") {
- synonym, err := p.parseTableOrIndexOrColumnName()
- if err != nil {
- return nil, err
- }
- a.Alteration = AddSynonym{Name: synonym}
- return a, nil
- }
-
- // TODO: "COLUMN" is optional.
- if err := p.expect("COLUMN"); err != nil {
- return nil, err
- }
- var ifNotExists bool
- if p.eat("IF", "NOT", "EXISTS") {
- ifNotExists = true
- }
- cd, err := p.parseColumnDef()
- if err != nil {
- return nil, err
- }
- a.Alteration = AddColumn{Def: cd, IfNotExists: ifNotExists}
- return a, nil
- case tok.caseEqual("DROP"):
- if p.eat("CONSTRAINT") {
- name, err := p.parseTableOrIndexOrColumnName()
- if err != nil {
- return nil, err
- }
- a.Alteration = DropConstraint{Name: name}
- return a, nil
- }
-
- if p.eat("ROW", "DELETION", "POLICY") {
- a.Alteration = DropRowDeletionPolicy{}
- return a, nil
- }
-
- // TODO: "COLUMN" is optional. A column named SYNONYM is allowed.
- if p.eat("SYNONYM") {
- synonym, err := p.parseTableOrIndexOrColumnName()
- if err != nil {
- return nil, err
- }
- a.Alteration = DropSynonym{Name: synonym}
- return a, nil
- }
-
- // TODO: "COLUMN" is optional.
- if err := p.expect("COLUMN"); err != nil {
- return nil, err
- }
- name, err := p.parseTableOrIndexOrColumnName()
- if err != nil {
- return nil, err
- }
- a.Alteration = DropColumn{Name: name}
- return a, nil
- case tok.caseEqual("SET"):
- if err := p.expect("ON"); err != nil {
- return nil, err
- }
- if err := p.expect("DELETE"); err != nil {
- return nil, err
- }
- od, err := p.parseOnDelete()
- if err != nil {
- return nil, err
- }
- a.Alteration = SetOnDelete{Action: od}
- return a, nil
- case tok.caseEqual("ALTER"):
- // TODO: "COLUMN" is optional.
- if err := p.expect("COLUMN"); err != nil {
- return nil, err
- }
- name, err := p.parseTableOrIndexOrColumnName()
- if err != nil {
- return nil, err
- }
- ca, err := p.parseColumnAlteration()
- if err != nil {
- return nil, err
- }
- a.Alteration = AlterColumn{
- Name: name,
- Alteration: ca,
- }
- return a, nil
- case tok.caseEqual("REPLACE"):
- if p.eat("ROW", "DELETION", "POLICY") {
- rdp, err := p.parseRowDeletionPolicy()
- if err != nil {
- return nil, err
- }
- a.Alteration = ReplaceRowDeletionPolicy{RowDeletionPolicy: rdp}
- return a, nil
- }
- case tok.caseEqual("RENAME"):
- if p.eat("TO") {
- newName, err := p.parseTableOrIndexOrColumnName()
- if err != nil {
- return nil, err
- }
- rt := RenameTo{ToName: newName}
- if p.eat(",", "ADD", "SYNONYM") {
- synonym, err := p.parseTableOrIndexOrColumnName()
- if err != nil {
- return nil, err
- }
- rt.Synonym = synonym
- }
- a.Alteration = rt
- return a, nil
- }
- }
- return a, nil
-}
-
-func (p *parser) parseRenameTable() (*RenameTable, *parseError) {
- debugf("parseRenameTable: %v", p)
-
- /*
- RENAME TABLE table_name TO new_name [, table_name2 TO new_name2, ...]
- */
-
- if err := p.expect("RENAME"); err != nil {
- return nil, err
- }
- pos := p.Pos()
- if err := p.expect("TABLE"); err != nil {
- return nil, err
- }
- rt := &RenameTable{
- Position: pos,
- }
-
- var renameOps []TableRenameOp
- for {
- fromName, err := p.parseTableOrIndexOrColumnName()
- if err != nil {
- return nil, err
- }
- if err := p.expect("TO"); err != nil {
- return nil, err
- }
- toName, err := p.parseTableOrIndexOrColumnName()
- if err != nil {
- return nil, err
- }
- renameOps = append(renameOps, TableRenameOp{FromName: fromName, ToName: toName})
-
- tok := p.next()
- if tok.err != nil {
- if tok.err == eof {
- break
- }
- return nil, tok.err
- } else if tok.value == "," {
- continue
- } else if tok.value == ";" {
- break
- } else {
- return nil, p.errorf("unexpected token %q", tok.value)
- }
- }
- rt.TableRenameOps = renameOps
- return rt, nil
-}
-
-func (p *parser) parseAlterDatabase() (*AlterDatabase, *parseError) {
- debugf("parseAlterDatabase: %v", p)
-
- /*
- ALTER DATABASE database_id
- action
-
- where database_id is:
- {a—z}[{a—z|0—9|_|-}+]{a—z|0—9}
-
- and action is:
- SET OPTIONS ( optimizer_version = { 1 ... 2 | null },
- version_retention_period = { 'duration' | null } )
- */
-
- if err := p.expect("ALTER"); err != nil {
- return nil, err
- }
- pos := p.Pos()
- if err := p.expect("DATABASE"); err != nil {
- return nil, err
- }
- // This is not 100% correct as database identifiers have slightly more
- // restrictions than table names, but the restrictions are currently not
- // applied in the spansql parser.
- // TODO: Apply restrictions for all identifiers.
- dbname, err := p.parseTableOrIndexOrColumnName()
- if err != nil {
- return nil, err
- }
- a := &AlterDatabase{Name: dbname, Position: pos}
-
- tok := p.next()
- if tok.err != nil {
- return nil, tok.err
- }
- switch {
- default:
- return nil, p.errorf("got %q, expected SET", tok.value)
- case tok.caseEqual("SET"):
- options, err := p.parseDatabaseOptions()
- if err != nil {
- return nil, err
- }
- a.Alteration = SetDatabaseOptions{Options: options}
- return a, nil
- }
-}
-
-func (p *parser) parseDMLStmt() (DMLStmt, *parseError) {
- debugf("parseDMLStmt: %v", p)
-
- /*
- DELETE [FROM] target_name [[AS] alias]
- WHERE condition
-
- UPDATE target_name [[AS] alias]
- SET update_item [, ...]
- WHERE condition
-
- update_item: path_expression = expression | path_expression = DEFAULT
-
- INSERT [INTO] target_name
- (column_name_1 [, ..., column_name_n] )
- input
-
- input:
- VALUES (row_1_column_1_expr [, ..., row_1_column_n_expr ] )
- [, ..., (row_k_column_1_expr [, ..., row_k_column_n_expr ] ) ]
- | select_query
-
- expr: value_expression | DEFAULT
- */
-
- if p.eat("DELETE") {
- p.eat("FROM") // optional
- tname, err := p.parseTableOrIndexOrColumnName()
- if err != nil {
- return nil, err
- }
- // TODO: parse alias.
- if err := p.expect("WHERE"); err != nil {
- return nil, err
- }
- where, err := p.parseBoolExpr()
- if err != nil {
- return nil, err
- }
- return &Delete{
- Table: tname,
- Where: where,
- }, nil
- }
-
- if p.eat("UPDATE") {
- tname, err := p.parseTableOrIndexOrColumnName()
- if err != nil {
- return nil, err
- }
- u := &Update{
- Table: tname,
- }
- // TODO: parse alias.
- if err := p.expect("SET"); err != nil {
- return nil, err
- }
- for {
- ui, err := p.parseUpdateItem()
- if err != nil {
- return nil, err
- }
- u.Items = append(u.Items, ui)
- if p.eat(",") {
- continue
- }
- break
- }
- if err := p.expect("WHERE"); err != nil {
- return nil, err
- }
- where, err := p.parseBoolExpr()
- if err != nil {
- return nil, err
- }
- u.Where = where
- return u, nil
- }
-
- if p.eat("INSERT") {
- p.eat("INTO") // optional
- tname, err := p.parseTableOrIndexOrColumnName()
- if err != nil {
- return nil, err
- }
-
- columns, err := p.parseColumnNameList()
- if err != nil {
- return nil, err
- }
-
- var input ValuesOrSelect
- if p.eat("VALUES") {
- values := make([][]Expr, 0)
- for {
- exprs, err := p.parseParenExprList()
- if err != nil {
- return nil, err
- }
- values = append(values, exprs)
- if !p.eat(",") {
- break
- }
- }
- input = Values(values)
- } else {
- input, err = p.parseSelect()
- if err != nil {
- return nil, err
- }
- }
-
- return &Insert{
- Table: tname,
- Columns: columns,
- Input: input,
- }, nil
- }
-
- return nil, p.errorf("unknown DML statement")
-}
-
-func (p *parser) parseUpdateItem() (UpdateItem, *parseError) {
- col, err := p.parseTableOrIndexOrColumnName()
- if err != nil {
- return UpdateItem{}, err
- }
- ui := UpdateItem{
- Column: col,
- }
- if err := p.expect("="); err != nil {
- return UpdateItem{}, err
- }
- if p.eat("DEFAULT") {
- return ui, nil
- }
- ui.Value, err = p.parseExpr()
- if err != nil {
- return UpdateItem{}, err
- }
- return ui, nil
-}
-
-func (p *parser) parseColumnDef() (ColumnDef, *parseError) {
- debugf("parseColumnDef: %v", p)
-
- /*
- column_def:
- column_name {scalar_type | array_type} [NOT NULL] [{DEFAULT ( expression ) | AS ( expression ) STORED}] [options_def]
- */
-
- name, err := p.parseTableOrIndexOrColumnName()
- if err != nil {
- return ColumnDef{}, err
- }
-
- cd := ColumnDef{Name: name, Position: p.Pos()}
-
- cd.Type, err = p.parseType()
- if err != nil {
- return ColumnDef{}, err
- }
-
- if p.eat("NOT", "NULL") {
- cd.NotNull = true
- }
-
- if p.eat("DEFAULT", "(") {
- cd.Default, err = p.parseExpr()
- if err != nil {
- return ColumnDef{}, err
- }
- if err := p.expect(")"); err != nil {
- return ColumnDef{}, err
- }
- }
-
- if p.eat("AS", "(") {
- cd.Generated, err = p.parseExpr()
- if err != nil {
- return ColumnDef{}, err
- }
- if err := p.expect(")"); err != nil {
- return ColumnDef{}, err
- }
- if err := p.expect("STORED"); err != nil {
- return ColumnDef{}, err
- }
- }
-
- if p.sniff("OPTIONS") {
- cd.Options, err = p.parseColumnOptions()
- if err != nil {
- return ColumnDef{}, err
- }
- }
-
- return cd, nil
-}
-
-func (p *parser) parseColumnAlteration() (ColumnAlteration, *parseError) {
- debugf("parseColumnAlteration: %v", p)
- /*
- {
- data_type [ NOT NULL ] [ DEFAULT ( expression ) ]
- | SET ( options_def )
- | SET DEFAULT ( expression )
- | DROP DEFAULT
- }
- */
-
- if p.eat("SET", "DEFAULT", "(") {
- d, err := p.parseExpr()
- if err != nil {
- return nil, err
- }
- if err := p.expect(")"); err != nil {
- return nil, err
- }
- return SetDefault{Default: d}, nil
- }
-
- if p.eat("DROP", "DEFAULT") {
- return DropDefault{}, nil
- }
-
- if p.eat("SET") {
- co, err := p.parseColumnOptions()
- if err != nil {
- return nil, err
- }
- return SetColumnOptions{Options: co}, nil
- }
-
- typ, err := p.parseType()
- if err != nil {
- return nil, err
- }
- sct := SetColumnType{Type: typ}
-
- if p.eat("NOT", "NULL") {
- sct.NotNull = true
- }
-
- if p.eat("DEFAULT", "(") {
- sct.Default, err = p.parseExpr()
- if err != nil {
- return nil, err
- }
- if err := p.expect(")"); err != nil {
- return nil, err
- }
- }
-
- return sct, nil
-}
-
-func (p *parser) parseColumnOptions() (ColumnOptions, *parseError) {
- debugf("parseColumnOptions: %v", p)
- /*
- options_def:
- OPTIONS (allow_commit_timestamp = { true | null })
- */
-
- if err := p.expect("OPTIONS"); err != nil {
- return ColumnOptions{}, err
- }
- if err := p.expect("("); err != nil {
- return ColumnOptions{}, err
- }
-
- // TODO: Figure out if column options are case insensitive.
- // We ignore case for the key (because it is easier) but not the value.
- var co ColumnOptions
- if p.eat("allow_commit_timestamp", "=") {
- tok := p.next()
- if tok.err != nil {
- return ColumnOptions{}, tok.err
- }
- allowCommitTimestamp := new(bool)
- switch tok.value {
- case "true":
- *allowCommitTimestamp = true
- case "null":
- *allowCommitTimestamp = false
- default:
- return ColumnOptions{}, p.errorf("got %q, want true or null", tok.value)
- }
- co.AllowCommitTimestamp = allowCommitTimestamp
- }
-
- if err := p.expect(")"); err != nil {
- return ColumnOptions{}, err
- }
-
- return co, nil
-}
-
-func (p *parser) parseDatabaseOptions() (DatabaseOptions, *parseError) {
- debugf("parseDatabaseOptions: %v", p)
- /*
- options_def:
- OPTIONS (enable_key_visualizer = { true | null },
- optimizer_version = { 1 ... 2 | null },
- version_retention_period = { 'duration' | null })
- */
-
- if err := p.expect("OPTIONS"); err != nil {
- return DatabaseOptions{}, err
- }
- if err := p.expect("("); err != nil {
- return DatabaseOptions{}, err
- }
-
- // We ignore case for the key (because it is easier) but not the value.
- var opts DatabaseOptions
- for {
- if p.eat("enable_key_visualizer", "=") {
- tok := p.next()
- if tok.err != nil {
- return DatabaseOptions{}, tok.err
- }
- enableKeyVisualizer := new(bool)
- switch tok.value {
- case "true":
- *enableKeyVisualizer = true
- case "null":
- *enableKeyVisualizer = false
- default:
- return DatabaseOptions{}, p.errorf("invalid enable_key_visualizer_value: %v", tok.value)
- }
- opts.EnableKeyVisualizer = enableKeyVisualizer
- } else if p.eat("optimizer_version", "=") {
- tok := p.next()
- if tok.err != nil {
- return DatabaseOptions{}, tok.err
- }
- optimizerVersion := new(int)
- if tok.value == "null" {
- *optimizerVersion = 0
- } else {
- if tok.typ != int64Token {
- return DatabaseOptions{}, p.errorf("invalid optimizer_version value: %v", tok.value)
- }
- version, err := strconv.Atoi(tok.value)
- if err != nil {
- return DatabaseOptions{}, p.errorf("invalid optimizer_version value: %v", tok.value)
- }
- *optimizerVersion = version
- }
- opts.OptimizerVersion = optimizerVersion
- } else if p.eat("optimizer_statistics_package", "=") {
- tok := p.next()
- if tok.err != nil {
- return DatabaseOptions{}, tok.err
- }
- optimizerStatisticsPackage := new(string)
- if tok.value == "null" {
- *optimizerStatisticsPackage = ""
- } else {
- if tok.typ != stringToken {
- return DatabaseOptions{}, p.errorf("invalid optimizer_statistics_package: %v", tok.value)
- }
- *optimizerStatisticsPackage = tok.string
- }
- opts.OptimizerStatisticsPackage = optimizerStatisticsPackage
- } else if p.eat("version_retention_period", "=") {
- tok := p.next()
- if tok.err != nil {
- return DatabaseOptions{}, tok.err
- }
- retentionPeriod := new(string)
- if tok.value == "null" {
- *retentionPeriod = ""
- } else {
- if tok.typ != stringToken {
- return DatabaseOptions{}, p.errorf("invalid version_retention_period: %v", tok.value)
- }
- *retentionPeriod = tok.string
- }
- opts.VersionRetentionPeriod = retentionPeriod
- } else if p.eat("default_leader", "=") {
- tok := p.next()
- if tok.err != nil {
- return DatabaseOptions{}, tok.err
- }
- defaultLeader := new(string)
- if tok.value == "null" {
- *defaultLeader = ""
- } else {
- if tok.typ != stringToken {
- return DatabaseOptions{}, p.errorf("invalid default_leader: %v", tok.value)
- }
- *defaultLeader = tok.string
- }
- opts.DefaultLeader = defaultLeader
- } else {
- tok := p.next()
- return DatabaseOptions{}, p.errorf("unknown database option: %v", tok.value)
- }
- if p.sniff(")") {
- break
- }
- if !p.eat(",") {
- return DatabaseOptions{}, p.errorf("missing ',' in options list")
- }
- }
- if err := p.expect(")"); err != nil {
- return DatabaseOptions{}, err
- }
-
- return opts, nil
-}
-
-func (p *parser) parseKeyPartList() ([]KeyPart, *parseError) {
- var list []KeyPart
- err := p.parseCommaList("(", ")", func(p *parser) *parseError {
- kp, err := p.parseKeyPart()
- if err != nil {
- return err
- }
- list = append(list, kp)
- return nil
- })
- return list, err
-}
-
-func (p *parser) parseKeyPart() (KeyPart, *parseError) {
- debugf("parseKeyPart: %v", p)
-
- /*
- key_part:
- column_name [{ ASC | DESC }]
- */
-
- name, err := p.parseTableOrIndexOrColumnName()
- if err != nil {
- return KeyPart{}, err
- }
-
- kp := KeyPart{Column: name}
-
- if p.eat("ASC") {
- // OK.
- } else if p.eat("DESC") {
- kp.Desc = true
- }
-
- return kp, nil
-}
-
-func (p *parser) parseTableConstraint() (TableConstraint, *parseError) {
- debugf("parseTableConstraint: %v", p)
-
- /*
- table_constraint:
- [ CONSTRAINT constraint_name ]
- { check | foreign_key }
- */
-
- if p.eat("CONSTRAINT") {
- pos := p.Pos()
- // Named constraint.
- cname, err := p.parseTableOrIndexOrColumnName()
- if err != nil {
- return TableConstraint{}, err
- }
- c, err := p.parseConstraint()
- if err != nil {
- return TableConstraint{}, err
- }
- return TableConstraint{
- Name: cname,
- Constraint: c,
- Position: pos,
- }, nil
- }
-
- // Unnamed constraint.
- c, err := p.parseConstraint()
- if err != nil {
- return TableConstraint{}, err
- }
- return TableConstraint{
- Constraint: c,
- Position: c.Pos(),
- }, nil
-}
-
-func (p *parser) parseConstraint() (Constraint, *parseError) {
- if p.sniff("FOREIGN") {
- fk, err := p.parseForeignKey()
- return fk, err
- }
- c, err := p.parseCheck()
- return c, err
-}
-
-func (p *parser) parseForeignKey() (ForeignKey, *parseError) {
- debugf("parseForeignKey: %v", p)
-
- /*
- foreign_key:
- FOREIGN KEY ( column_name [, ... ] ) REFERENCES ref_table ( ref_column [, ... ] ) [ ON DELETE { CASCADE | NO ACTION } ]
- */
-
- if err := p.expect("FOREIGN"); err != nil {
- return ForeignKey{}, err
- }
- fk := ForeignKey{Position: p.Pos()}
- if err := p.expect("KEY"); err != nil {
- return ForeignKey{}, err
- }
- var err *parseError
- fk.Columns, err = p.parseColumnNameList()
- if err != nil {
- return ForeignKey{}, err
- }
- if err := p.expect("REFERENCES"); err != nil {
- return ForeignKey{}, err
- }
- fk.RefTable, err = p.parseTableOrIndexOrColumnName()
- if err != nil {
- return ForeignKey{}, err
- }
- fk.RefColumns, err = p.parseColumnNameList()
- if err != nil {
- return ForeignKey{}, err
- }
- // The ON DELETE clause is optional; it defaults to NoActionOnDelete.
- fk.OnDelete = NoActionOnDelete
- if p.eat("ON", "DELETE") {
- fk.OnDelete, err = p.parseOnDelete()
- if err != nil {
- return ForeignKey{}, err
- }
- }
- return fk, nil
-}
-
-func (p *parser) parseCheck() (Check, *parseError) {
- debugf("parseCheck: %v", p)
-
- /*
- check:
- CHECK ( expression )
- */
-
- if err := p.expect("CHECK"); err != nil {
- return Check{}, err
- }
- c := Check{Position: p.Pos()}
- if err := p.expect("("); err != nil {
- return Check{}, err
- }
- var err *parseError
- c.Expr, err = p.parseBoolExpr()
- if err != nil {
- return Check{}, err
- }
- if err := p.expect(")"); err != nil {
- return Check{}, err
- }
- return c, nil
-}
-
-func (p *parser) parseColumnNameList() ([]ID, *parseError) {
- var list []ID
- err := p.parseCommaList("(", ")", func(p *parser) *parseError {
- n, err := p.parseTableOrIndexOrColumnName()
- if err != nil {
- return err
- }
- list = append(list, n)
- return nil
- })
- return list, err
-}
-
-func (p *parser) parseIDList() ([]ID, *parseError) {
- var list []ID
- for {
- n, err := p.parseTableOrIndexOrColumnName()
- if err != nil {
- return nil, err
- }
- list = append(list, n)
-
- if p.eat(",") {
- continue
- }
- break
- }
- return list, nil
-}
-
-func (p *parser) parseCreateChangeStream() (*CreateChangeStream, *parseError) {
- debugf("parseCreateChangeStream: %v", p)
-
- /*
- CREATE CHANGE STREAM change_stream_name
- [FOR column_or_table_watching_definition[, ... ] ]
- [
- OPTIONS (
- retention_period = timespan,
- value_capture_type = type
- )
- ]
- */
- if err := p.expect("CREATE"); err != nil {
- return nil, err
- }
- pos := p.Pos()
- if err := p.expect("CHANGE"); err != nil {
- return nil, err
- }
- if err := p.expect("STREAM"); err != nil {
- return nil, err
- }
- csname, err := p.parseTableOrIndexOrColumnName()
- if err != nil {
- return nil, err
- }
-
- cs := &CreateChangeStream{Name: csname, Position: pos}
-
- if p.sniff("FOR") {
- watch, watchAllTables, err := p.parseChangeStreamWatches()
- if err != nil {
- return nil, err
- }
- cs.Watch = watch
- cs.WatchAllTables = watchAllTables
- }
-
- if p.sniff("OPTIONS") {
- cs.Options, err = p.parseChangeStreamOptions()
- if err != nil {
- return nil, err
- }
- }
-
- return cs, nil
-}
-
-func (p *parser) parseAlterChangeStream() (*AlterChangeStream, *parseError) {
- debugf("parseAlterChangeStream: %v", p)
-
- if err := p.expect("ALTER"); err != nil {
- return nil, err
- }
- pos := p.Pos()
- if err := p.expect("CHANGE"); err != nil {
- return nil, err
- }
- if err := p.expect("STREAM"); err != nil {
- return nil, err
- }
- csname, err := p.parseTableOrIndexOrColumnName()
- if err != nil {
- return nil, err
- }
-
- acs := &AlterChangeStream{Name: csname, Position: pos}
-
- tok := p.next()
- if tok.err != nil {
- return nil, tok.err
- }
- switch {
- default:
- return nil, p.errorf("got %q, expected SET or DROP", tok.value)
- case tok.caseEqual("SET"):
- if p.sniff("OPTIONS") {
- options, err := p.parseChangeStreamOptions()
- if err != nil {
- return nil, err
- }
- acs.Alteration = AlterChangeStreamOptions{Options: options}
- return acs, nil
- }
- if p.sniff("FOR") {
- watch, watchAllTables, err := p.parseChangeStreamWatches()
- if err != nil {
- return nil, err
- }
- acs.Alteration = AlterWatch{Watch: watch, WatchAllTables: watchAllTables}
- return acs, nil
- }
- return nil, p.errorf("got %q, expected FOR or OPTIONS", p.next())
- case tok.caseEqual("DROP"):
- if err := p.expect("FOR", "ALL"); err != nil {
- return nil, err
- }
- acs.Alteration = DropChangeStreamWatch{}
- return acs, nil
- }
-}
-
-func (p *parser) parseChangeStreamWatches() ([]WatchDef, bool, *parseError) {
- debugf("parseChangeStreamWatches: %v", p)
-
- if err := p.expect("FOR"); err != nil {
- return nil, false, err
- }
-
- if p.eat("ALL") {
- return nil, true, nil
- }
-
- watchDefs := []WatchDef{}
- for {
- tname, err := p.parseTableOrIndexOrColumnName()
- if err != nil {
- return nil, false, err
- }
- pos := p.Pos()
- wd := WatchDef{Table: tname, Position: pos}
-
- if p.sniff("(") {
- columns, err := p.parseColumnNameList()
- if err != nil {
- return nil, false, err
- }
- wd.Columns = columns
- } else {
- wd.WatchAllCols = true
- }
-
- watchDefs = append(watchDefs, wd)
- if p.eat(",") {
- continue
- }
- break
- }
-
- return watchDefs, false, nil
-}
-
-func (p *parser) parseChangeStreamOptions() (ChangeStreamOptions, *parseError) {
- debugf("parseChangeStreamOptions: %v", p)
- /*
- options_def:
- OPTIONS (
- retention_period = timespan,
- value_capture_type = type
- ) */
-
- if err := p.expect("OPTIONS"); err != nil {
- return ChangeStreamOptions{}, err
- }
- if err := p.expect("("); err != nil {
- return ChangeStreamOptions{}, err
- }
-
- var cso ChangeStreamOptions
- for {
- if p.eat("retention_period", "=") {
- tok := p.next()
- if tok.err != nil {
- return ChangeStreamOptions{}, tok.err
- }
- retentionPeriod := new(string)
- if tok.value == "null" {
- *retentionPeriod = ""
- } else {
- if tok.typ != stringToken {
- return ChangeStreamOptions{}, p.errorf("invalid retention_period: %v", tok.value)
- }
- *retentionPeriod = tok.string
- }
- cso.RetentionPeriod = retentionPeriod
- } else if p.eat("value_capture_type", "=") {
- tok := p.next()
- if tok.err != nil {
- return ChangeStreamOptions{}, tok.err
- }
- valueCaptureType := new(string)
- if tok.typ != stringToken {
- return ChangeStreamOptions{}, p.errorf("invalid value_capture_type: %v", tok.value)
- }
- *valueCaptureType = tok.string
- cso.ValueCaptureType = valueCaptureType
- } else {
- tok := p.next()
- return ChangeStreamOptions{}, p.errorf("unknown change stream option: %v", tok.value)
- }
- if p.sniff(")") {
- break
- }
- if !p.eat(",") {
- return ChangeStreamOptions{}, p.errorf("missing ',' in options list")
- }
- }
-
- if err := p.expect(")"); err != nil {
- return ChangeStreamOptions{}, err
- }
-
- return cso, nil
-}
-
-func (p *parser) parseAlterStatistics() (*AlterStatistics, *parseError) {
- debugf("parseAlterStatistics: %v", p)
-
- /*
- ALTER STATISTICS package_name
- action
-
- where package_name is:
- {a—z}[{a—z|0—9|_|-}+]{a—z|0—9}
-
- and action is:
- SET OPTIONS ( options_def )
-
- and options_def is:
- { allow_gc = { true | false } }
- */
-
- if err := p.expect("ALTER"); err != nil {
- return nil, err
- }
- pos := p.Pos()
- if err := p.expect("STATISTICS"); err != nil {
- return nil, err
- }
- // This is not 100% correct as package_name identifiers have slightly more
- // restrictions than table names, but the restrictions are currently not
- // applied in the spansql parser.
- // TODO: Apply restrictions for all identifiers.
- dbname, err := p.parseTableOrIndexOrColumnName()
- if err != nil {
- return nil, err
- }
- a := &AlterStatistics{Name: dbname, Position: pos}
-
- tok := p.next()
- if tok.err != nil {
- return nil, tok.err
- }
- switch {
- default:
- return nil, p.errorf("got %q, expected SET", tok.value)
- case tok.caseEqual("SET"):
- options, err := p.parseStatisticsOptions()
- if err != nil {
- return nil, err
- }
- a.Alteration = SetStatisticsOptions{Options: options}
- return a, nil
- }
-}
-
-func (p *parser) parseStatisticsOptions() (StatisticsOptions, *parseError) {
- debugf("parseDatabaseOptions: %v", p)
- /*
- options_def is:
- { allow_gc = { true | false } }
- */
-
- if err := p.expect("OPTIONS"); err != nil {
- return StatisticsOptions{}, err
- }
- if err := p.expect("("); err != nil {
- return StatisticsOptions{}, err
- }
-
- // We ignore case for the key (because it is easier) but not the value.
- var opts StatisticsOptions
- for {
- if p.eat("allow_gc", "=") {
- tok := p.next()
- if tok.err != nil {
- return StatisticsOptions{}, tok.err
- }
- allowGC := new(bool)
- switch tok.value {
- case "true":
- *allowGC = true
- case "false":
- *allowGC = false
- default:
- return StatisticsOptions{}, p.errorf("invalid allow_gc: %v", tok.value)
- }
- opts.AllowGC = allowGC
- } else {
- tok := p.next()
- return StatisticsOptions{}, p.errorf("unknown statistics option: %v", tok.value)
- }
- if p.sniff(")") {
- break
- }
- if !p.eat(",") {
- return StatisticsOptions{}, p.errorf("missing ',' in options list")
- }
- }
- if err := p.expect(")"); err != nil {
- return StatisticsOptions{}, err
- }
-
- return opts, nil
-}
-
-func (p *parser) parseAlterIndex() (*AlterIndex, *parseError) {
- debugf("parseAlterIndex: %v", p)
-
- if err := p.expect("ALTER"); err != nil {
- return nil, err
- }
- pos := p.Pos()
- if err := p.expect("INDEX"); err != nil {
- return nil, err
- }
- iname, err := p.parseTableOrIndexOrColumnName()
- if err != nil {
- return nil, err
- }
-
- a := &AlterIndex{Name: iname, Position: pos}
- tok := p.next()
- if tok.err != nil {
- return nil, tok.err
- }
- switch {
- case tok.caseEqual("ADD"):
- if err := p.expect("STORED", "COLUMN"); err != nil {
- return nil, err
- }
- cname, err := p.parseTableOrIndexOrColumnName()
- if err != nil {
- return nil, err
- }
- a.Alteration = AddStoredColumn{Name: cname}
- return a, nil
- case tok.caseEqual("DROP"):
- if err := p.expect("STORED", "COLUMN"); err != nil {
- return nil, err
- }
- cname, err := p.parseTableOrIndexOrColumnName()
- if err != nil {
- return nil, err
- }
- a.Alteration = DropStoredColumn{Name: cname}
- return a, nil
- }
-
- return nil, p.errorf("got %q, expected ADD or DROP", tok.value)
-}
-
-func (p *parser) parseCreateSequence() (*CreateSequence, *parseError) {
- debugf("parseCreateSequence: %v", p)
-
- /*
- CREATE SEQUENCE
- [ IF NOT EXISTS ] sequence_name
- [ OPTIONS ( sequence_options ) ]
- */
-
- if err := p.expect("CREATE"); err != nil {
- return nil, err
- }
- pos := p.Pos()
- if err := p.expect("SEQUENCE"); err != nil {
- return nil, err
- }
- var ifNotExists bool
- if p.eat("IF", "NOT", "EXISTS") {
- ifNotExists = true
- }
- sname, err := p.parseTableOrIndexOrColumnName()
- if err != nil {
- return nil, err
- }
-
- cs := &CreateSequence{Name: sname, IfNotExists: ifNotExists, Position: pos}
-
- if p.sniff("OPTIONS") {
- cs.Options, err = p.parseSequenceOptions()
- if err != nil {
- return nil, err
- }
- }
-
- return cs, nil
-}
-
-func (p *parser) parseAlterSequence() (*AlterSequence, *parseError) {
- debugf("parseAlterSequence: %v", p)
-
- /*
- ALTER SEQUENCE sequence_name
- SET OPTIONS sequence_options
- */
-
- if err := p.expect("ALTER"); err != nil {
- return nil, err
- }
- pos := p.Pos()
- if err := p.expect("SEQUENCE"); err != nil {
- return nil, err
- }
- sname, err := p.parseTableOrIndexOrColumnName()
- if err != nil {
- return nil, err
- }
-
- as := &AlterSequence{Name: sname, Position: pos}
-
- tok := p.next()
- if tok.err != nil {
- return nil, tok.err
- }
- switch {
- default:
- return nil, p.errorf("got %q, expected SET", tok.value)
- case tok.caseEqual("SET"):
- options, err := p.parseSequenceOptions()
- if err != nil {
- return nil, err
- }
- as.Alteration = SetSequenceOptions{Options: options}
- return as, nil
- }
-}
-
-func (p *parser) parseSequenceOptions() (SequenceOptions, *parseError) {
- debugf("parseSequenceOptions: %v", p)
-
- if err := p.expect("OPTIONS", "("); err != nil {
- return SequenceOptions{}, err
- }
-
- // We ignore case for the key (because it is easier) but not the value.
- var so SequenceOptions
- for {
- if p.eat("sequence_kind", "=") {
- tok := p.next()
- if tok.err != nil {
- return SequenceOptions{}, tok.err
- }
- if tok.typ != stringToken {
- return SequenceOptions{}, p.errorf("invalid sequence_kind value: %v", tok.value)
- }
- sequenceKind := tok.string
- so.SequenceKind = &sequenceKind
- } else if p.eat("skip_range_min", "=") {
- tok := p.next()
- if tok.err != nil {
- return SequenceOptions{}, tok.err
- }
- if tok.typ != int64Token {
- return SequenceOptions{}, p.errorf("invalid skip_range_min value: %v", tok.value)
- }
- value, err := strconv.Atoi(tok.value)
- if err != nil {
- return SequenceOptions{}, p.errorf("invalid skip_range_min value: %v", tok.value)
- }
- so.SkipRangeMin = &value
- } else if p.eat("skip_range_max", "=") {
- tok := p.next()
- if tok.err != nil {
- return SequenceOptions{}, tok.err
- }
- if tok.typ != int64Token {
- return SequenceOptions{}, p.errorf("invalid skip_range_max value: %v", tok.value)
- }
- value, err := strconv.Atoi(tok.value)
- if err != nil {
- return SequenceOptions{}, p.errorf("invalid skip_range_max value: %v", tok.value)
- }
- so.SkipRangeMax = &value
- } else if p.eat("start_with_counter", "=") {
- tok := p.next()
- if tok.err != nil {
- return SequenceOptions{}, tok.err
- }
- if tok.typ != int64Token {
- return SequenceOptions{}, p.errorf("invalid start_with_counter value: %v", tok.value)
- }
- value, err := strconv.Atoi(tok.value)
- if err != nil {
- return SequenceOptions{}, p.errorf("invalid start_with_counter value: %v", tok.value)
- }
- so.StartWithCounter = &value
- } else {
- tok := p.next()
- return SequenceOptions{}, p.errorf("unknown sequence option: %v", tok.value)
- }
- if p.sniff(")") {
- break
- }
- if !p.eat(",") {
- return SequenceOptions{}, p.errorf("missing ',' in options list")
- }
- }
- if err := p.expect(")"); err != nil {
- return SequenceOptions{}, err
- }
-
- return so, nil
-}
-
-var baseTypes = map[string]TypeBase{
- "BOOL": Bool,
- "INT64": Int64,
- "FLOAT64": Float64,
- "NUMERIC": Numeric,
- "STRING": String,
- "BYTES": Bytes,
- "DATE": Date,
- "TIMESTAMP": Timestamp,
- "JSON": JSON,
-}
-
-func (p *parser) parseBaseType() (Type, *parseError) {
- return p.parseBaseOrParameterizedType(false)
-}
-
-func (p *parser) parseType() (Type, *parseError) {
- return p.parseBaseOrParameterizedType(true)
-}
-
-var extractPartTypes = map[string]TypeBase{
- "DAY": Int64,
- "MONTH": Int64,
- "YEAR": Int64,
- "DATE": Date,
-}
-
-func (p *parser) parseExtractType() (Type, string, *parseError) {
- var t Type
- tok := p.next()
- if tok.err != nil {
- return Type{}, "", tok.err
- }
- base, ok := extractPartTypes[strings.ToUpper(tok.value)] // valid part types for EXTRACT is keyed by upper case strings.
- if !ok {
- return Type{}, "", p.errorf("got %q, want valid EXTRACT types", tok.value)
- }
- t.Base = base
- return t, strings.ToUpper(tok.value), nil
-}
-
-func (p *parser) parseBaseOrParameterizedType(withParam bool) (Type, *parseError) {
- debugf("parseBaseOrParameterizedType: %v", p)
-
- /*
- array_type:
- ARRAY< scalar_type >
-
- scalar_type:
- { BOOL | INT64 | FLOAT64 | NUMERIC | STRING( length ) | BYTES( length ) | DATE | TIMESTAMP | JSON }
- length:
- { int64_value | MAX }
- */
-
- var t Type
-
- tok := p.next()
- if tok.err != nil {
- return Type{}, tok.err
- }
- if tok.caseEqual("ARRAY") {
- t.Array = true
- if err := p.expect("<"); err != nil {
- return Type{}, err
- }
- tok = p.next()
- if tok.err != nil {
- return Type{}, tok.err
- }
- }
- base, ok := baseTypes[strings.ToUpper(tok.value)] // baseTypes is keyed by upper case strings.
- if !ok {
- return Type{}, p.errorf("got %q, want scalar type", tok.value)
- }
- t.Base = base
-
- if withParam && (t.Base == String || t.Base == Bytes) {
- if err := p.expect("("); err != nil {
- return Type{}, err
- }
-
- tok = p.next()
- if tok.err != nil {
- return Type{}, tok.err
- }
- if tok.caseEqual("MAX") {
- t.Len = MaxLen
- } else if tok.typ == int64Token {
- n, err := strconv.ParseInt(tok.value, tok.int64Base, 64)
- if err != nil {
- return Type{}, p.errorf("%v", err)
- }
- t.Len = n
- } else {
- return Type{}, p.errorf("got %q, want MAX or int64", tok.value)
- }
-
- if err := p.expect(")"); err != nil {
- return Type{}, err
- }
- }
-
- if t.Array {
- if err := p.expect(">"); err != nil {
- return Type{}, err
- }
- }
-
- return t, nil
-}
-
-func (p *parser) parseQuery() (Query, *parseError) {
- debugf("parseQuery: %v", p)
-
- /*
- query_statement:
- [ table_hint_expr ][ join_hint_expr ]
- query_expr
-
- query_expr:
- { select | ( query_expr ) | query_expr set_op query_expr }
- [ ORDER BY expression [{ ASC | DESC }] [, ...] ]
- [ LIMIT count [ OFFSET skip_rows ] ]
- */
-
- // TODO: sub-selects, etc.
-
- if err := p.expect("SELECT"); err != nil {
- return Query{}, err
- }
- p.back()
- sel, err := p.parseSelect()
- if err != nil {
- return Query{}, err
- }
- q := Query{Select: sel}
-
- if p.eat("ORDER", "BY") {
- for {
- o, err := p.parseOrder()
- if err != nil {
- return Query{}, err
- }
- q.Order = append(q.Order, o)
-
- if !p.eat(",") {
- break
- }
- }
- }
-
- if p.eat("LIMIT") {
- // "only literal or parameter values"
- // https://cloud.google.com/spanner/docs/query-syntax#limit-clause-and-offset-clause
-
- lim, err := p.parseLiteralOrParam()
- if err != nil {
- return Query{}, err
- }
- q.Limit = lim
-
- if p.eat("OFFSET") {
- off, err := p.parseLiteralOrParam()
- if err != nil {
- return Query{}, err
- }
- q.Offset = off
- }
- }
-
- return q, nil
-}
-
-func (p *parser) parseSelect() (Select, *parseError) {
- debugf("parseSelect: %v", p)
-
- /*
- select:
- SELECT [{ ALL | DISTINCT }]
- { [ expression. ]* | expression [ [ AS ] alias ] } [, ...]
- [ FROM from_item [ tablesample_type ] [, ...] ]
- [ WHERE bool_expression ]
- [ GROUP BY expression [, ...] ]
- [ HAVING bool_expression ]
- */
- if err := p.expect("SELECT"); err != nil {
- return Select{}, err
- }
-
- var sel Select
-
- if p.eat("ALL") {
- // Nothing to do; this is the default.
- } else if p.eat("DISTINCT") {
- sel.Distinct = true
- }
-
- // Read expressions for the SELECT list.
- list, aliases, err := p.parseSelectList()
- if err != nil {
- return Select{}, err
- }
- sel.List, sel.ListAliases = list, aliases
-
- if p.eat("FROM") {
- padTS := func() {
- for len(sel.TableSamples) < len(sel.From) {
- sel.TableSamples = append(sel.TableSamples, nil)
- }
- }
-
- for {
- from, err := p.parseSelectFrom()
- if err != nil {
- return Select{}, err
- }
- sel.From = append(sel.From, from)
-
- if p.sniff("TABLESAMPLE") {
- ts, err := p.parseTableSample()
- if err != nil {
- return Select{}, err
- }
- padTS()
- sel.TableSamples[len(sel.TableSamples)-1] = &ts
- }
-
- if p.eat(",") {
- continue
- }
- break
- }
-
- if sel.TableSamples != nil {
- padTS()
- }
- }
-
- if p.eat("WHERE") {
- where, err := p.parseBoolExpr()
- if err != nil {
- return Select{}, err
- }
- sel.Where = where
- }
-
- if p.eat("GROUP", "BY") {
- list, err := p.parseExprList()
- if err != nil {
- return Select{}, err
- }
- sel.GroupBy = list
- }
-
- // TODO: HAVING
-
- return sel, nil
-}
-
-func (p *parser) parseSelectList() ([]Expr, []ID, *parseError) {
- var list []Expr
- var aliases []ID // Only set if any aliases are seen.
- padAliases := func() {
- for len(aliases) < len(list) {
- aliases = append(aliases, "")
- }
- }
-
- for {
- expr, err := p.parseExpr()
- if err != nil {
- return nil, nil, err
- }
- list = append(list, expr)
-
- // TODO: The "AS" keyword is optional.
- if p.eat("AS") {
- alias, err := p.parseAlias()
- if err != nil {
- return nil, nil, err
- }
-
- padAliases()
- aliases[len(aliases)-1] = alias
- }
-
- if p.eat(",") {
- continue
- }
- break
- }
- if aliases != nil {
- padAliases()
- }
- return list, aliases, nil
-}
-
-func (p *parser) parseSelectFromTable() (SelectFrom, *parseError) {
- if p.eat("UNNEST") {
- if err := p.expect("("); err != nil {
- return nil, err
- }
- e, err := p.parseExpr()
- if err != nil {
- return nil, err
- }
- if err := p.expect(")"); err != nil {
- return nil, err
- }
- sfu := SelectFromUnnest{Expr: e}
- if p.eat("AS") { // TODO: The "AS" keyword is optional.
- alias, err := p.parseAlias()
- if err != nil {
- return nil, err
- }
- sfu.Alias = alias
- }
- // TODO: hint, offset
- return sfu, nil
- }
-
- // A join starts with a from_item, so that can't be detected in advance.
- // TODO: Support subquery, field_path, array_path, WITH.
- // TODO: Verify associativity of multile joins.
-
- tname, err := p.parseTableOrIndexOrColumnName()
- if err != nil {
- return nil, err
- }
- sf := SelectFromTable{Table: tname}
- if p.eat("@") {
- hints, err := p.parseHints(map[string]string{})
- if err != nil {
- return nil, err
- }
- sf.Hints = hints
- }
-
- // TODO: The "AS" keyword is optional.
- if p.eat("AS") {
- alias, err := p.parseAlias()
- if err != nil {
- return nil, err
- }
- sf.Alias = alias
- }
- return sf, nil
-}
-
-func (p *parser) parseSelectFromJoin(lhs SelectFrom) (SelectFrom, *parseError) {
- // Look ahead to see if this is a join.
- tok := p.next()
- if tok.err != nil {
- p.back()
- return nil, nil
- }
- var hashJoin bool // Special case for "HASH JOIN" syntax.
- if tok.caseEqual("HASH") {
- hashJoin = true
- tok = p.next()
- if tok.err != nil {
- return nil, tok.err
- }
- }
- var jt JoinType
- if tok.caseEqual("JOIN") {
- // This is implicitly an inner join.
- jt = InnerJoin
- } else if j, ok := joinKeywords[tok.value]; ok {
- jt = j
- switch jt {
- case FullJoin, LeftJoin, RightJoin:
- // These join types are implicitly "outer" joins,
- // so the "OUTER" keyword is optional.
- p.eat("OUTER")
- }
- if err := p.expect("JOIN"); err != nil {
- return nil, err
- }
- } else {
- // Not a join
- p.back()
- return nil, nil
- }
- sfj := SelectFromJoin{
- Type: jt,
- LHS: lhs,
- }
- var hints map[string]string
- if hashJoin {
- hints = map[string]string{}
- hints["JOIN_METHOD"] = "HASH_JOIN"
- }
-
- if p.eat("@") {
- h, err := p.parseHints(hints)
- if err != nil {
- return nil, err
- }
- hints = h
- }
- sfj.Hints = hints
-
- rhs, err := p.parseSelectFromTable()
- if err != nil {
- return nil, err
- }
-
- sfj.RHS = rhs
-
- if p.eat("ON") {
- sfj.On, err = p.parseBoolExpr()
- if err != nil {
- return nil, err
- }
- }
- if p.eat("USING") {
- if sfj.On != nil {
- return nil, p.errorf("join may not have both ON and USING clauses")
- }
- sfj.Using, err = p.parseColumnNameList()
- if err != nil {
- return nil, err
- }
- }
-
- return sfj, nil
-}
-
-func (p *parser) parseSelectFrom() (SelectFrom, *parseError) {
- debugf("parseSelectFrom: %v", p)
-
- /*
- from_item: {
- table_name [ table_hint_expr ] [ [ AS ] alias ] |
- join |
- ( query_expr ) [ table_hint_expr ] [ [ AS ] alias ] |
- field_path |
- { UNNEST( array_expression ) | UNNEST( array_path ) | array_path }
- [ table_hint_expr ] [ [ AS ] alias ] [ WITH OFFSET [ [ AS ] alias ] ] |
- with_query_name [ table_hint_expr ] [ [ AS ] alias ]
- }
-
- join:
- from_item [ join_type ] [ join_method ] JOIN [ join_hint_expr ] from_item
- [ ON bool_expression | USING ( join_column [, ...] ) ]
-
- join_type:
- { INNER | CROSS | FULL [OUTER] | LEFT [OUTER] | RIGHT [OUTER] }
- */
- leftHandSide, err := p.parseSelectFromTable()
- if err != nil {
- return nil, err
- }
- // Lets keep consuming joins until we no longer find more joins
- for {
- sfj, err := p.parseSelectFromJoin(leftHandSide)
- if err != nil {
- return nil, err
- }
- if sfj == nil {
- // There was no join to consume
- break
- }
- leftHandSide = sfj
- }
- return leftHandSide, nil
-}
-
-var joinKeywords = map[string]JoinType{
- "INNER": InnerJoin,
- "CROSS": CrossJoin,
- "FULL": FullJoin,
- "LEFT": LeftJoin,
- "RIGHT": RightJoin,
-}
-
-func (p *parser) parseTableSample() (TableSample, *parseError) {
- var ts TableSample
-
- if err := p.expect("TABLESAMPLE"); err != nil {
- return ts, err
- }
-
- tok := p.next()
- switch {
- case tok.err != nil:
- return ts, tok.err
- case tok.caseEqual("BERNOULLI"):
- ts.Method = Bernoulli
- case tok.caseEqual("RESERVOIR"):
- ts.Method = Reservoir
- default:
- return ts, p.errorf("got %q, want BERNOULLI or RESERVOIR", tok.value)
- }
-
- if err := p.expect("("); err != nil {
- return ts, err
- }
-
- // The docs say "numeric_value_expression" here,
- // but that doesn't appear to be defined anywhere.
- size, err := p.parseExpr()
- if err != nil {
- return ts, err
- }
- ts.Size = size
-
- tok = p.next()
- switch {
- case tok.err != nil:
- return ts, tok.err
- case tok.caseEqual("PERCENT"):
- ts.SizeType = PercentTableSample
- case tok.caseEqual("ROWS"):
- ts.SizeType = RowsTableSample
- default:
- return ts, p.errorf("got %q, want PERCENT or ROWS", tok.value)
- }
-
- if err := p.expect(")"); err != nil {
- return ts, err
- }
-
- return ts, nil
-}
-
-func (p *parser) parseOrder() (Order, *parseError) {
- /*
- expression [{ ASC | DESC }]
- */
-
- expr, err := p.parseExpr()
- if err != nil {
- return Order{}, err
- }
- o := Order{Expr: expr}
-
- if p.eat("ASC") {
- // OK.
- } else if p.eat("DESC") {
- o.Desc = true
- }
-
- return o, nil
-}
-
-func (p *parser) parseLiteralOrParam() (LiteralOrParam, *parseError) {
- tok := p.next()
- if tok.err != nil {
- return nil, tok.err
- }
- if tok.typ == int64Token {
- n, err := strconv.ParseInt(tok.value, tok.int64Base, 64)
- if err != nil {
- return nil, p.errorf("%v", err)
- }
- return IntegerLiteral(n), nil
- }
- // TODO: check character sets.
- if strings.HasPrefix(tok.value, "@") {
- return Param(tok.value[1:]), nil
- }
- return nil, p.errorf("got %q, want literal or parameter", tok.value)
-}
-
-func (p *parser) parseExprList() ([]Expr, *parseError) {
- var list []Expr
- for {
- expr, err := p.parseExpr()
- if err != nil {
- return nil, err
- }
- list = append(list, expr)
-
- if p.eat(",") {
- continue
- }
- break
- }
- return list, nil
-}
-
-func (p *parser) parseParenExprList() ([]Expr, *parseError) {
- return p.parseParenExprListWithParseFunc(func(p *parser) (Expr, *parseError) {
- return p.parseExpr()
- })
-}
-
-func (p *parser) parseParenExprListWithParseFunc(f func(*parser) (Expr, *parseError)) ([]Expr, *parseError) {
- var list []Expr
- err := p.parseCommaList("(", ")", func(p *parser) *parseError {
- e, err := f(p)
- if err != nil {
- return err
- }
- list = append(list, e)
- return nil
- })
- return list, err
-}
-
-// Special argument parser for CAST and SAFE_CAST
-var typedArgParser = func(p *parser) (Expr, *parseError) {
- e, err := p.parseExpr()
- if err != nil {
- return nil, err
- }
- if err := p.expect("AS"); err != nil {
- return nil, err
- }
- // typename in cast function must not be parameterized types
- toType, err := p.parseBaseType()
- if err != nil {
- return nil, err
- }
- return TypedExpr{
- Expr: e,
- Type: toType,
- }, nil
-}
-
-// Special argument parser for EXTRACT
-var extractArgParser = func(p *parser) (Expr, *parseError) {
- partType, part, err := p.parseExtractType()
- if err != nil {
- return nil, err
- }
- if err := p.expect("FROM"); err != nil {
- return nil, err
- }
- e, err := p.parseExpr()
- if err != nil {
- return nil, err
- }
- // AT TIME ZONE is optional
- if p.eat("AT", "TIME", "ZONE") {
- tok := p.next()
- if tok.err != nil {
- return nil, err
- }
- return ExtractExpr{Part: part, Type: partType, Expr: AtTimeZoneExpr{Expr: e, Zone: tok.string, Type: Type{Base: Timestamp}}}, nil
- }
- return ExtractExpr{
- Part: part,
- Expr: e,
- Type: partType,
- }, nil
-}
-
-var intervalArgParser = func(parseDatePart func(*parser) (string, *parseError)) func(*parser) (Expr, *parseError) {
- return func(p *parser) (Expr, *parseError) {
- if p.eat("INTERVAL") {
- expr, err := p.parseExpr()
- if err != nil {
- return nil, err
- }
- datePart, err := parseDatePart(p)
- if err != nil {
- return nil, err
- }
- return IntervalExpr{Expr: expr, DatePart: datePart}, nil
- }
- return p.parseExpr()
- }
-}
-
-var dateIntervalDateParts map[string]bool = map[string]bool{
- "DAY": true,
- "WEEK": true,
- "MONTH": true,
- "QUARTER": true,
- "YEAR": true,
-}
-
-func (p *parser) parseDateIntervalDatePart() (string, *parseError) {
- tok := p.next()
- if tok.err != nil {
- return "", tok.err
- }
- if dateIntervalDateParts[strings.ToUpper(tok.value)] {
- return strings.ToUpper(tok.value), nil
- }
- return "", p.errorf("got %q, want valid date part names", tok.value)
-}
-
-var timestampIntervalDateParts map[string]bool = map[string]bool{
- "NANOSECOND": true,
- "MICROSECOND": true,
- "MILLISECOND": true,
- "SECOND": true,
- "MINUTE": true,
- "HOUR": true,
- "DAY": true,
-}
-
-func (p *parser) parseTimestampIntervalDatePart() (string, *parseError) {
- tok := p.next()
- if tok.err != nil {
- return "", tok.err
- }
- if timestampIntervalDateParts[strings.ToUpper(tok.value)] {
- return strings.ToUpper(tok.value), nil
- }
- return "", p.errorf("got %q, want valid date part names", tok.value)
-}
-
-// Special argument parser for DATE_ADD, DATE_SUB
-var dateIntervalArgParser = intervalArgParser((*parser).parseDateIntervalDatePart)
-
-// Special argument parser for TIMESTAMP_ADD, TIMESTAMP_SUB
-var timestampIntervalArgParser = intervalArgParser((*parser).parseTimestampIntervalDatePart)
-
-var sequenceArgParser = func(p *parser) (Expr, *parseError) {
- if p.eat("SEQUENCE") {
- name, err := p.parseTableOrIndexOrColumnName()
- if err != nil {
- return nil, err
- }
- return SequenceExpr{Name: name}, nil
- }
- return p.parseExpr()
-}
-
-func (p *parser) parseAggregateFunc() (Func, *parseError) {
- tok := p.next()
- if tok.err != nil {
- return Func{}, tok.err
- }
- name := strings.ToUpper(tok.value)
- if err := p.expect("("); err != nil {
- return Func{}, err
- }
- var distinct bool
- if p.eat("DISTINCT") {
- distinct = true
- }
- args, err := p.parseExprList()
- if err != nil {
- return Func{}, err
- }
- var nullsHandling NullsHandling
- if p.eat("IGNORE", "NULLS") {
- nullsHandling = IgnoreNulls
- } else if p.eat("RESPECT", "NULLS") {
- nullsHandling = RespectNulls
- }
- var having *AggregateHaving
- if p.eat("HAVING") {
- tok := p.next()
- if tok.err != nil {
- return Func{}, tok.err
- }
- var cond AggregateHavingCondition
- switch tok.value {
- case "MAX":
- cond = HavingMax
- case "MIN":
- cond = HavingMin
- default:
- return Func{}, p.errorf("got %q, want MAX or MIN", tok.value)
- }
- expr, err := p.parseExpr()
- if err != nil {
- return Func{}, err
- }
- having = &AggregateHaving{
- Condition: cond,
- Expr: expr,
- }
- }
- if err := p.expect(")"); err != nil {
- return Func{}, err
- }
- return Func{
- Name: name,
- Args: args,
- Distinct: distinct,
- NullsHandling: nullsHandling,
- Having: having,
- }, nil
-}
-
-/*
-Expressions
-
-Cloud Spanner expressions are not formally specified.
-The set of operators and their precedence is listed in
-https://cloud.google.com/spanner/docs/functions-and-operators#operators.
-
-parseExpr works as a classical recursive descent parser, splitting
-precedence levels into separate methods, where the call stack is in
-ascending order of precedence:
- parseExpr
- orParser
- andParser
- parseIsOp
- parseInOp
- parseComparisonOp
- parseArithOp: |, ^, &, << and >>, + and -, * and / and ||
- parseUnaryArithOp: - and ~
- parseLit
-*/
-
-func (p *parser) parseExpr() (Expr, *parseError) {
- debugf("parseExpr: %v", p)
-
- return orParser.parse(p)
-}
-
-// binOpParser is a generic meta-parser for binary operations.
-// It assumes the operation is left associative.
-type binOpParser struct {
- LHS, RHS func(*parser) (Expr, *parseError)
- Op string
- ArgCheck func(Expr) error
- Combiner func(lhs, rhs Expr) Expr
-}
-
-func (bin binOpParser) parse(p *parser) (Expr, *parseError) {
- expr, err := bin.LHS(p)
- if err != nil {
- return nil, err
- }
-
- for {
- if !p.eat(bin.Op) {
- break
- }
- rhs, err := bin.RHS(p)
- if err != nil {
- return nil, err
- }
- if bin.ArgCheck != nil {
- if err := bin.ArgCheck(expr); err != nil {
- return nil, p.errorf("%v", err)
- }
- if err := bin.ArgCheck(rhs); err != nil {
- return nil, p.errorf("%v", err)
- }
- }
- expr = bin.Combiner(expr, rhs)
- }
- return expr, nil
-}
-
-// Break initialisation loop.
-func init() { orParser = orParserShim }
-
-var (
- boolExprCheck = func(expr Expr) error {
- if _, ok := expr.(BoolExpr); !ok {
- return fmt.Errorf("got %T, want a boolean expression", expr)
- }
- return nil
- }
-
- orParser binOpParser
-
- orParserShim = binOpParser{
- LHS: andParser.parse,
- RHS: andParser.parse,
- Op: "OR",
- ArgCheck: boolExprCheck,
- Combiner: func(lhs, rhs Expr) Expr {
- return LogicalOp{LHS: lhs.(BoolExpr), Op: Or, RHS: rhs.(BoolExpr)}
- },
- }
- andParser = binOpParser{
- LHS: (*parser).parseLogicalNot,
- RHS: (*parser).parseLogicalNot,
- Op: "AND",
- ArgCheck: boolExprCheck,
- Combiner: func(lhs, rhs Expr) Expr {
- return LogicalOp{LHS: lhs.(BoolExpr), Op: And, RHS: rhs.(BoolExpr)}
- },
- }
-
- bitOrParser = newBinArithParser("|", BitOr, bitXorParser.parse)
- bitXorParser = newBinArithParser("^", BitXor, bitAndParser.parse)
- bitAndParser = newBinArithParser("&", BitAnd, bitShrParser.parse)
- bitShrParser = newBinArithParser(">>", BitShr, bitShlParser.parse)
- bitShlParser = newBinArithParser("<<", BitShl, subParser.parse)
- subParser = newBinArithParser("-", Sub, addParser.parse)
- addParser = newBinArithParser("+", Add, concatParser.parse)
- concatParser = newBinArithParser("||", Concat, divParser.parse)
- divParser = newBinArithParser("/", Div, mulParser.parse)
- mulParser = newBinArithParser("*", Mul, (*parser).parseUnaryArithOp)
-)
-
-func newBinArithParser(opStr string, op ArithOperator, nextPrec func(*parser) (Expr, *parseError)) binOpParser {
- return binOpParser{
- LHS: nextPrec,
- RHS: nextPrec,
- Op: opStr,
- // TODO: ArgCheck? numeric inputs only, except for ||.
- Combiner: func(lhs, rhs Expr) Expr {
- return ArithOp{LHS: lhs, Op: op, RHS: rhs}
- },
- }
-}
-
-func (p *parser) parseLogicalNot() (Expr, *parseError) {
- if !p.eat("NOT") {
- return p.parseIsOp()
- }
- be, err := p.parseBoolExpr()
- if err != nil {
- return nil, err
- }
- return LogicalOp{Op: Not, RHS: be}, nil
-}
-
-func (p *parser) parseIsOp() (Expr, *parseError) {
- debugf("parseIsOp: %v", p)
-
- expr, err := p.parseInOp()
- if err != nil {
- return nil, err
- }
-
- if !p.eat("IS") {
- return expr, nil
- }
-
- isOp := IsOp{LHS: expr}
- if p.eat("NOT") {
- isOp.Neg = true
- }
-
- tok := p.next()
- if tok.err != nil {
- return nil, tok.err
- }
- switch {
- case tok.caseEqual("NULL"):
- isOp.RHS = Null
- case tok.caseEqual("TRUE"):
- isOp.RHS = True
- case tok.caseEqual("FALSE"):
- isOp.RHS = False
- default:
- return nil, p.errorf("got %q, want NULL or TRUE or FALSE", tok.value)
- }
-
- return isOp, nil
-}
-
-func (p *parser) parseInOp() (Expr, *parseError) {
- debugf("parseInOp: %v", p)
-
- expr, err := p.parseComparisonOp()
- if err != nil {
- return nil, err
- }
-
- inOp := InOp{LHS: expr}
- if p.eat("NOT", "IN") {
- inOp.Neg = true
- } else if p.eat("IN") {
- // Okay.
- } else {
- return expr, nil
- }
-
- if p.eat("UNNEST") {
- inOp.Unnest = true
- }
-
- inOp.RHS, err = p.parseParenExprList()
- if err != nil {
- return nil, err
- }
- return inOp, nil
-}
-
-var symbolicOperators = map[string]ComparisonOperator{
- "<": Lt,
- "<=": Le,
- ">": Gt,
- ">=": Ge,
- "=": Eq,
- "!=": Ne,
- "<>": Ne,
-}
-
-func (p *parser) parseComparisonOp() (Expr, *parseError) {
- debugf("parseComparisonOp: %v", p)
-
- expr, err := p.parseArithOp()
- if err != nil {
- return nil, err
- }
-
- for {
- // There's a need for two token lookahead.
- var op ComparisonOperator
- var rhs2 bool
- if p.eat("NOT", "LIKE") {
- op = NotLike
- } else if p.eat("NOT", "BETWEEN") {
- op, rhs2 = NotBetween, true
- } else if p.eat("LIKE") {
- op = Like
- } else if p.eat("BETWEEN") {
- op, rhs2 = Between, true
- } else {
- // Check for a symbolic operator.
- tok := p.next()
- if tok.err != nil {
- p.back()
- break
- }
- var ok bool
- op, ok = symbolicOperators[tok.value]
- if !ok {
- p.back()
- break
- }
- }
-
- rhs, err := p.parseArithOp()
- if err != nil {
- return nil, err
- }
- co := ComparisonOp{LHS: expr, Op: op, RHS: rhs}
-
- if rhs2 {
- if err := p.expect("AND"); err != nil {
- return nil, err
- }
- rhs2, err := p.parseArithOp()
- if err != nil {
- return nil, err
- }
- co.RHS2 = rhs2
- }
-
- expr = co
- }
- return expr, nil
-}
-
-func (p *parser) parseArithOp() (Expr, *parseError) {
- return bitOrParser.parse(p)
-}
-
-var unaryArithOperators = map[string]ArithOperator{
- "-": Neg,
- "~": BitNot,
- "+": Plus,
-}
-
-func (p *parser) parseUnaryArithOp() (Expr, *parseError) {
- tok := p.next()
- if tok.err != nil {
- return nil, tok.err
- }
-
- op := tok.value
-
- if op == "-" || op == "+" {
- // If the next token is a numeric token, combine and parse as a literal.
- ntok := p.next()
- if ntok.err == nil {
- switch ntok.typ {
- case int64Token:
- comb := op + ntok.value
- n, err := strconv.ParseInt(comb, ntok.int64Base, 64)
- if err != nil {
- return nil, p.errorf("%v", err)
- }
- return IntegerLiteral(n), nil
- case float64Token:
- f := ntok.float64
- if op == "-" {
- f = -f
- }
- return FloatLiteral(f), nil
- }
- }
- // It is not possible for the p.back() lower down to fire
- // because - and + are in unaryArithOperators.
- p.back()
- }
-
- if op, ok := unaryArithOperators[op]; ok {
- e, err := p.parseLit()
- if err != nil {
- return nil, err
- }
- return ArithOp{Op: op, RHS: e}, nil
- }
- p.back()
-
- return p.parseLit()
-}
-
-func (p *parser) parseLit() (Expr, *parseError) {
- tok := p.next()
- if tok.err != nil {
- return nil, tok.err
- }
-
- switch tok.typ {
- case int64Token:
- n, err := strconv.ParseInt(tok.value, tok.int64Base, 64)
- if err != nil {
- return nil, p.errorf("%v", err)
- }
- return IntegerLiteral(n), nil
- case float64Token:
- return FloatLiteral(tok.float64), nil
- case stringToken:
- return StringLiteral(tok.string), nil
- case bytesToken:
- return BytesLiteral(tok.string), nil
- }
-
- // Handle parenthesized expressions.
- if tok.value == "(" {
- e, err := p.parseExpr()
- if err != nil {
- return nil, err
- }
- if err := p.expect(")"); err != nil {
- return nil, err
- }
- return Paren{Expr: e}, nil
- }
-
- // If the literal was an identifier, and there's an open paren next,
- // this is a function invocation.
- // The `funcs` map is keyed by upper case strings.
- if name := strings.ToUpper(tok.value); funcs[name] && p.sniff("(") {
- if aggregateFuncs[name] {
- p.back()
- return p.parseAggregateFunc()
- }
- var list []Expr
- var err *parseError
- if f, ok := funcArgParsers[name]; ok {
- list, err = p.parseParenExprListWithParseFunc(f)
- } else {
- list, err = p.parseParenExprList()
- }
- if err != nil {
- return nil, err
- }
- return Func{
- Name: name,
- Args: list,
- }, nil
- }
-
- // Handle some reserved keywords and special tokens that become specific values.
- switch {
- case tok.caseEqual("TRUE"):
- return True, nil
- case tok.caseEqual("FALSE"):
- return False, nil
- case tok.caseEqual("NULL"):
- return Null, nil
- case tok.value == "*":
- return Star, nil
- default:
- // TODO: Check IsKeyWord(tok.value), and return a good error?
- }
-
- // Handle conditional expressions.
- switch {
- case tok.caseEqual("CASE"):
- p.back()
- return p.parseCaseExpr()
- case tok.caseEqual("COALESCE"):
- p.back()
- return p.parseCoalesceExpr()
- case tok.caseEqual("IF"):
- p.back()
- return p.parseIfExpr()
- case tok.caseEqual("IFNULL"):
- p.back()
- return p.parseIfNullExpr()
- case tok.caseEqual("NULLIF"):
- p.back()
- return p.parseNullIfExpr()
- }
-
- // Handle typed literals.
- switch {
- case tok.caseEqual("ARRAY") || tok.value == "[":
- p.back()
- return p.parseArrayLit()
- case tok.caseEqual("DATE"):
- if p.sniffTokenType(stringToken) {
- p.back()
- return p.parseDateLit()
- }
- case tok.caseEqual("TIMESTAMP"):
- if p.sniffTokenType(stringToken) {
- p.back()
- return p.parseTimestampLit()
- }
- case tok.caseEqual("JSON"):
- if p.sniffTokenType(stringToken) {
- p.back()
- return p.parseJSONLit()
- }
- }
-
- // TODO: struct literals
-
- // Try a parameter.
- // TODO: check character sets.
- if strings.HasPrefix(tok.value, "@") {
- return Param(tok.value[1:]), nil
- }
-
- // Only thing left is a path expression or standalone identifier.
- p.back()
- pe, err := p.parsePathExp()
- if err != nil {
- return nil, err
- }
- if len(pe) == 1 {
- return pe[0], nil // identifier
- }
- return pe, nil
-}
-
-func (p *parser) parseCaseExpr() (Case, *parseError) {
- if err := p.expect("CASE"); err != nil {
- return Case{}, err
- }
-
- var expr Expr
- if !p.sniff("WHEN") {
- var err *parseError
- expr, err = p.parseExpr()
- if err != nil {
- return Case{}, err
- }
- }
-
- when, err := p.parseWhenClause()
- if err != nil {
- return Case{}, err
- }
- whens := []WhenClause{when}
- for p.sniff("WHEN") {
- when, err := p.parseWhenClause()
- if err != nil {
- return Case{}, err
- }
- whens = append(whens, when)
- }
-
- var elseResult Expr
- if p.sniff("ELSE") {
- p.eat("ELSE")
- var err *parseError
- elseResult, err = p.parseExpr()
- if err != nil {
- return Case{}, err
- }
- }
-
- if err := p.expect("END"); err != nil {
- return Case{}, err
- }
-
- return Case{
- Expr: expr,
- WhenClauses: whens,
- ElseResult: elseResult,
- }, nil
-}
-
-func (p *parser) parseWhenClause() (WhenClause, *parseError) {
- if err := p.expect("WHEN"); err != nil {
- return WhenClause{}, err
- }
- cond, err := p.parseExpr()
- if err != nil {
- return WhenClause{}, err
- }
- if err := p.expect("THEN"); err != nil {
- return WhenClause{}, err
- }
- result, err := p.parseExpr()
- if err != nil {
- return WhenClause{}, err
- }
- return WhenClause{Cond: cond, Result: result}, nil
-}
-
-func (p *parser) parseCoalesceExpr() (Coalesce, *parseError) {
- if err := p.expect("COALESCE"); err != nil {
- return Coalesce{}, err
- }
- exprList, err := p.parseParenExprList()
- if err != nil {
- return Coalesce{}, err
- }
- return Coalesce{ExprList: exprList}, nil
-}
-
-func (p *parser) parseIfExpr() (If, *parseError) {
- if err := p.expect("IF", "("); err != nil {
- return If{}, err
- }
-
- expr, err := p.parseBoolExpr()
- if err != nil {
- return If{}, err
- }
- if err := p.expect(","); err != nil {
- return If{}, err
- }
-
- trueResult, err := p.parseExpr()
- if err != nil {
- return If{}, err
- }
- if err := p.expect(","); err != nil {
- return If{}, err
- }
-
- elseResult, err := p.parseExpr()
- if err != nil {
- return If{}, err
- }
- if err := p.expect(")"); err != nil {
- return If{}, err
- }
-
- return If{Expr: expr, TrueResult: trueResult, ElseResult: elseResult}, nil
-}
-
-func (p *parser) parseIfNullExpr() (IfNull, *parseError) {
- if err := p.expect("IFNULL", "("); err != nil {
- return IfNull{}, err
- }
-
- expr, err := p.parseExpr()
- if err != nil {
- return IfNull{}, err
- }
- if err := p.expect(","); err != nil {
- return IfNull{}, err
- }
-
- nullResult, err := p.parseExpr()
- if err != nil {
- return IfNull{}, err
- }
- if err := p.expect(")"); err != nil {
- return IfNull{}, err
- }
-
- return IfNull{Expr: expr, NullResult: nullResult}, nil
-}
-
-func (p *parser) parseNullIfExpr() (NullIf, *parseError) {
- if err := p.expect("NULLIF", "("); err != nil {
- return NullIf{}, err
- }
-
- expr, err := p.parseExpr()
- if err != nil {
- return NullIf{}, err
- }
- if err := p.expect(","); err != nil {
- return NullIf{}, err
- }
-
- exprToMatch, err := p.parseExpr()
- if err != nil {
- return NullIf{}, err
- }
- if err := p.expect(")"); err != nil {
- return NullIf{}, err
- }
-
- return NullIf{Expr: expr, ExprToMatch: exprToMatch}, nil
-}
-
-func (p *parser) parseArrayLit() (Array, *parseError) {
- // ARRAY keyword is optional.
- // TODO: If it is present, consume any <T> after it.
- p.eat("ARRAY")
-
- var arr Array
- err := p.parseCommaList("[", "]", func(p *parser) *parseError {
- e, err := p.parseLit()
- if err != nil {
- return err
- }
- // TODO: Do type consistency checking here?
- arr = append(arr, e)
- return nil
- })
- return arr, err
-}
-
-// TODO: There should be exported Parse{Date,Timestamp}Literal package-level funcs
-// to support spannertest coercing plain string literals when used in a typed context.
-// Those should wrap parseDateLit and parseTimestampLit below.
-
-func (p *parser) parseDateLit() (DateLiteral, *parseError) {
- if err := p.expect("DATE"); err != nil {
- return DateLiteral{}, err
- }
- s, err := p.parseStringLit()
- if err != nil {
- return DateLiteral{}, err
- }
- d, perr := civil.ParseDate(string(s))
- if perr != nil {
- return DateLiteral{}, p.errorf("bad date literal %q: %v", s, perr)
- }
- // TODO: Enforce valid range.
- return DateLiteral(d), nil
-}
-
-// TODO: A manual parser is probably better than this.
-// There are a lot of variations that this does not handle.
-var timestampFormats = []string{
- // 'YYYY-[M]M-[D]D [[H]H:[M]M:[S]S[.DDDDDD] [timezone]]'
- "2006-01-02",
- "2006-01-02 15:04:05",
- "2006-01-02 15:04:05.000000",
- "2006-01-02 15:04:05-07:00",
- "2006-01-02 15:04:05.000000-07:00",
-}
-
-var defaultLocation = func() *time.Location {
- // The docs say "America/Los_Angeles" is the default.
- // Use that if we can load it, but fall back on UTC if we don't have timezone data.
- loc, err := time.LoadLocation("America/Los_Angeles")
- if err == nil {
- return loc
- }
- return time.UTC
-}()
-
-func (p *parser) parseTimestampLit() (TimestampLiteral, *parseError) {
- if err := p.expect("TIMESTAMP"); err != nil {
- return TimestampLiteral{}, err
- }
- s, err := p.parseStringLit()
- if err != nil {
- return TimestampLiteral{}, err
- }
- for _, format := range timestampFormats {
- t, err := time.ParseInLocation(format, string(s), defaultLocation)
- if err == nil {
- // TODO: Enforce valid range.
- return TimestampLiteral(t), nil
- }
- }
- return TimestampLiteral{}, p.errorf("invalid timestamp literal %q", s)
-}
-
-func (p *parser) parseJSONLit() (JSONLiteral, *parseError) {
- if err := p.expect("JSON"); err != nil {
- return JSONLiteral{}, err
- }
- s, err := p.parseStringLit()
- if err != nil {
- return JSONLiteral{}, err
- }
- // It is not guaranteed that the returned JSONLiteral is a valid JSON document
- // to avoid error due to parsing SQL generated with an invalid JSONLiteral like JSONLiteral("")
- return JSONLiteral(s), nil
-}
-
-func (p *parser) parseStringLit() (StringLiteral, *parseError) {
- tok := p.next()
- if tok.err != nil {
- return "", tok.err
- }
- if tok.typ != stringToken {
- return "", p.errorf("got %q, want string literal", tok.value)
- }
- return StringLiteral(tok.string), nil
-}
-
-func (p *parser) parsePathExp() (PathExp, *parseError) {
- var pe PathExp
- for {
- tok := p.next()
- if tok.err != nil {
- return nil, tok.err
- }
- switch tok.typ {
- case quotedID:
- pe = append(pe, ID(tok.string))
- case unquotedID:
- pe = append(pe, ID(tok.value))
- default:
- // TODO: Is this correct?
- return nil, p.errorf("expected identifer")
- }
- if !p.eat(".") {
- break
- }
- }
- return pe, nil
-}
-
-func (p *parser) parseBoolExpr() (BoolExpr, *parseError) {
- expr, err := p.parseExpr()
- if err != nil {
- return nil, err
- }
- be, ok := expr.(BoolExpr)
- if !ok {
- return nil, p.errorf("got non-bool expression %T", expr)
- }
- return be, nil
-}
-
-func (p *parser) parseAlias() (ID, *parseError) {
- // The docs don't specify what lexical token is valid for an alias,
- // but it seems likely that it is an identifier.
- return p.parseTableOrIndexOrColumnName()
-}
-
-func (p *parser) parseHints(hints map[string]string) (map[string]string, *parseError) {
- if hints == nil {
- hints = map[string]string{}
- }
- if err := p.expect("{"); err != nil {
- return nil, err
- }
- for {
- if p.sniff("}") {
- break
- }
- tok := p.next()
- if tok.err != nil {
- return nil, tok.err
- }
- k := tok.value
- if err := p.expect("="); err != nil {
- return nil, err
- }
- tok = p.next()
- if tok.err != nil {
- return nil, tok.err
- }
- v := tok.value
- hints[k] = v
- if !p.eat(",") {
- break
- }
- }
- if err := p.expect("}"); err != nil {
- return nil, err
- }
- return hints, nil
-}
-
-func (p *parser) parseTableOrIndexOrColumnName() (ID, *parseError) {
- /*
- table_name and column_name and index_name and role_name:
- {a—z|A—Z}[{a—z|A—Z|0—9|_}+]
- */
-
- tok := p.next()
- if tok.err != nil {
- return "", tok.err
- }
- switch tok.typ {
- case quotedID:
- return ID(tok.string), nil
- case unquotedID:
- // TODO: enforce restrictions
- return ID(tok.value), nil
- default:
- return "", p.errorf("expected identifier")
- }
-}
-
-func (p *parser) parseOnDelete() (OnDelete, *parseError) {
- /*
- CASCADE
- NO ACTION
- */
-
- tok := p.next()
- if tok.err != nil {
- return 0, tok.err
- }
- if tok.caseEqual("CASCADE") {
- return CascadeOnDelete, nil
- }
- if !tok.caseEqual("NO") {
- return 0, p.errorf("got %q, want NO or CASCADE", tok.value)
- }
- if err := p.expect("ACTION"); err != nil {
- return 0, err
- }
- return NoActionOnDelete, nil
-}
-
-func (p *parser) parseRowDeletionPolicy() (RowDeletionPolicy, *parseError) {
- if err := p.expect("(", "OLDER_THAN", "("); err != nil {
- return RowDeletionPolicy{}, err
- }
- cname, err := p.parseTableOrIndexOrColumnName()
- if err != nil {
- return RowDeletionPolicy{}, err
- }
- if err := p.expect(",", "INTERVAL"); err != nil {
- return RowDeletionPolicy{}, err
- }
- tok := p.next()
- if tok.err != nil {
- return RowDeletionPolicy{}, tok.err
- }
- if tok.typ != int64Token {
- return RowDeletionPolicy{}, p.errorf("got %q, expected int64 token", tok.value)
- }
- n, serr := strconv.ParseInt(tok.value, tok.int64Base, 64)
- if serr != nil {
- return RowDeletionPolicy{}, p.errorf("%v", serr)
- }
- if err := p.expect("DAY", ")", ")"); err != nil {
- return RowDeletionPolicy{}, err
- }
- return RowDeletionPolicy{
- Column: cname,
- NumDays: n,
- }, nil
-}
-
-// parseCommaList parses a comma-separated list enclosed by bra and ket,
-// delegating to f for the individual element parsing.
-// Only invoke this with symbols as bra/ket; they are matched literally, not case insensitively.
-func (p *parser) parseCommaList(bra, ket string, f func(*parser) *parseError) *parseError {
- if err := p.expect(bra); err != nil {
- return err
- }
- for {
- if p.eat(ket) {
- return nil
- }
-
- err := f(p)
- if err != nil {
- return err
- }
-
- // ket or "," should be next.
- tok := p.next()
- if tok.err != nil {
- return err
- }
- if tok.value == ket {
- return nil
- } else if tok.value == "," {
- continue
- } else {
- return p.errorf(`got %q, want %q or ","`, tok.value, ket)
- }
- }
-}
-
-// parseCommaListWithEnds parses a comma-separated list to expected ends,
-// delegating to f for the individual element parsing.
-// Only invoke this with symbols as end; they are matched case insensitively.
-func (p *parser) parseCommaListWithEnds(f func(*parser) *parseError, end ...string) *parseError {
- if p.eat(end...) {
- return nil
- }
- for {
- err := f(p)
- if err != nil {
- return err
- }
- if p.eat(end...) {
- return nil
- }
-
- tok := p.next()
- if tok.err != nil {
- return err
- }
- if tok.value == "," {
- continue
- } else if tok.value == ";" {
- return nil
- }
- }
-}
diff --git a/vendor/cloud.google.com/go/spanner/spansql/sql.go b/vendor/cloud.google.com/go/spanner/spansql/sql.go
deleted file mode 100644
index 577a45e2e..000000000
--- a/vendor/cloud.google.com/go/spanner/spansql/sql.go
+++ /dev/null
@@ -1,1183 +0,0 @@
-/*
-Copyright 2019 Google LLC
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-*/
-
-package spansql
-
-// This file holds SQL methods for rendering the types in types.go
-// as the SQL dialect that this package parses.
-//
-// Every exported type has an SQL method that returns a string.
-// Some also have an addSQL method that efficiently builds that string
-// in a provided strings.Builder.
-
-import (
- "fmt"
- "sort"
- "strconv"
- "strings"
- "time"
-)
-
-func buildSQL(x interface{ addSQL(*strings.Builder) }) string {
- var sb strings.Builder
- x.addSQL(&sb)
- return sb.String()
-}
-
-func (ct CreateTable) SQL() string {
- str := "CREATE TABLE "
- if ct.IfNotExists {
- str += "IF NOT EXISTS "
- }
- str += ct.Name.SQL() + " (\n"
- for _, c := range ct.Columns {
- str += " " + c.SQL() + ",\n"
- }
- for _, tc := range ct.Constraints {
- str += " " + tc.SQL() + ",\n"
- }
- if len(ct.Synonym) > 0 {
- str += " SYNONYM(" + ct.Synonym.SQL() + "),\n"
- }
- str += ") PRIMARY KEY("
- for i, c := range ct.PrimaryKey {
- if i > 0 {
- str += ", "
- }
- str += c.SQL()
- }
- str += ")"
- if il := ct.Interleave; il != nil {
- str += ",\n INTERLEAVE IN PARENT " + il.Parent.SQL() + " ON DELETE " + il.OnDelete.SQL()
- }
- if rdp := ct.RowDeletionPolicy; rdp != nil {
- str += ",\n " + rdp.SQL()
- }
- return str
-}
-
-func (ci CreateIndex) SQL() string {
- str := "CREATE"
- if ci.Unique {
- str += " UNIQUE"
- }
- if ci.NullFiltered {
- str += " NULL_FILTERED"
- }
- str += " INDEX "
- if ci.IfNotExists {
- str += "IF NOT EXISTS "
- }
- str += ci.Name.SQL() + " ON " + ci.Table.SQL() + "("
- for i, c := range ci.Columns {
- if i > 0 {
- str += ", "
- }
- str += c.SQL()
- }
- str += ")"
- if len(ci.Storing) > 0 {
- str += " STORING (" + idList(ci.Storing, ", ") + ")"
- }
- if ci.Interleave != "" {
- str += ", INTERLEAVE IN " + ci.Interleave.SQL()
- }
- return str
-}
-
-func (cv CreateView) SQL() string {
- str := "CREATE"
- if cv.OrReplace {
- str += " OR REPLACE"
- }
- str += " VIEW " + cv.Name.SQL() + " SQL SECURITY " + cv.SecurityType.SQL() + " AS " + cv.Query.SQL()
- return str
-}
-
-func (st SecurityType) SQL() string {
- switch st {
- case Invoker:
- return "INVOKER"
- case Definer:
- return "DEFINER"
- }
- panic("unknown SecurityType")
-}
-
-func (cr CreateRole) SQL() string {
- return "CREATE ROLE " + cr.Name.SQL()
-}
-
-func (cs CreateChangeStream) SQL() string {
- str := "CREATE CHANGE STREAM "
- str += cs.Name.SQL()
- if cs.WatchAllTables {
- str += " FOR ALL"
- } else {
- for i, table := range cs.Watch {
- if i == 0 {
- str += " FOR "
- } else {
- str += ", "
- }
- str += table.SQL()
- }
- }
- if cs.Options != (ChangeStreamOptions{}) {
- str += " " + cs.Options.SQL()
- }
-
- return str
-}
-
-func (w WatchDef) SQL() string {
- str := w.Table.SQL()
- if !w.WatchAllCols {
- str += "("
- for i, c := range w.Columns {
- if i > 0 {
- str += ", "
- }
- str += c.SQL()
- }
- str += ")"
- }
- return str
-}
-
-func (dt DropTable) SQL() string {
- str := "DROP TABLE "
- if dt.IfExists {
- str += "IF EXISTS "
- }
- str += dt.Name.SQL()
- return str
-}
-
-func (di DropIndex) SQL() string {
- str := "DROP INDEX "
- if di.IfExists {
- str += "IF EXISTS "
- }
- str += di.Name.SQL()
- return str
-}
-
-func (dv DropView) SQL() string {
- return "DROP VIEW " + dv.Name.SQL()
-}
-
-func (dr DropRole) SQL() string {
- return "DROP ROLE " + dr.Name.SQL()
-}
-
-func (gr GrantRole) SQL() string {
- sql := "GRANT "
- if gr.Privileges != nil {
- for i, priv := range gr.Privileges {
- if i > 0 {
- sql += ", "
- }
- sql += priv.Type.SQL()
- if priv.Columns != nil {
- sql += "(" + idList(priv.Columns, ", ") + ")"
- }
- }
- sql += " ON TABLE " + idList(gr.TableNames, ", ")
- } else if len(gr.TvfNames) > 0 {
- sql += "EXECUTE ON TABLE FUNCTION " + idList(gr.TvfNames, ", ")
- } else if len(gr.ViewNames) > 0 {
- sql += "SELECT ON VIEW " + idList(gr.ViewNames, ", ")
- } else if len(gr.ChangeStreamNames) > 0 {
- sql += "SELECT ON CHANGE STREAM " + idList(gr.ChangeStreamNames, ", ")
- } else {
- sql += "ROLE " + idList(gr.GrantRoleNames, ", ")
- }
- sql += " TO ROLE " + idList(gr.ToRoleNames, ", ")
- return sql
-}
-
-func (rr RevokeRole) SQL() string {
- sql := "REVOKE "
- if rr.Privileges != nil {
- for i, priv := range rr.Privileges {
- if i > 0 {
- sql += ", "
- }
- sql += priv.Type.SQL()
- if priv.Columns != nil {
- sql += "(" + idList(priv.Columns, ", ") + ")"
- }
- }
- sql += " ON TABLE " + idList(rr.TableNames, ", ")
- } else if len(rr.TvfNames) > 0 {
- sql += "EXECUTE ON TABLE FUNCTION " + idList(rr.TvfNames, ", ")
- } else if len(rr.ViewNames) > 0 {
- sql += "SELECT ON VIEW " + idList(rr.ViewNames, ", ")
- } else if len(rr.ChangeStreamNames) > 0 {
- sql += "SELECT ON CHANGE STREAM " + idList(rr.ChangeStreamNames, ", ")
- } else {
- sql += "ROLE " + idList(rr.RevokeRoleNames, ", ")
- }
- sql += " FROM ROLE " + idList(rr.FromRoleNames, ", ")
- return sql
-}
-
-func (dc DropChangeStream) SQL() string {
- return "DROP CHANGE STREAM " + dc.Name.SQL()
-}
-
-func (acs AlterChangeStream) SQL() string {
- return "ALTER CHANGE STREAM " + acs.Name.SQL() + " " + acs.Alteration.SQL()
-}
-
-func (scsw AlterWatch) SQL() string {
- str := "SET FOR "
- if scsw.WatchAllTables {
- return str + "ALL"
- }
- for i, table := range scsw.Watch {
- if i > 0 {
- str += ", "
- }
- str += table.SQL()
- }
- return str
-}
-
-func (ao AlterChangeStreamOptions) SQL() string {
- return "SET " + ao.Options.SQL()
-}
-
-func (dcsw DropChangeStreamWatch) SQL() string {
- return "DROP FOR ALL"
-}
-
-func (cso ChangeStreamOptions) SQL() string {
- str := "OPTIONS ("
- hasOpt := false
- if cso.RetentionPeriod != nil {
- hasOpt = true
- str += fmt.Sprintf("retention_period='%s'", *cso.RetentionPeriod)
- }
- if cso.ValueCaptureType != nil {
- if hasOpt {
- str += ", "
- }
- hasOpt = true
- str += fmt.Sprintf("value_capture_type='%s'", *cso.ValueCaptureType)
- }
- str += ")"
- return str
-}
-
-func (at AlterTable) SQL() string {
- return "ALTER TABLE " + at.Name.SQL() + " " + at.Alteration.SQL()
-}
-
-func (ac AddColumn) SQL() string {
- str := "ADD COLUMN "
- if ac.IfNotExists {
- str += "IF NOT EXISTS "
- }
- str += ac.Def.SQL()
- return str
-}
-
-func (dc DropColumn) SQL() string {
- return "DROP COLUMN " + dc.Name.SQL()
-}
-
-func (ac AddConstraint) SQL() string {
- return "ADD " + ac.Constraint.SQL()
-}
-
-func (dc DropConstraint) SQL() string {
- return "DROP CONSTRAINT " + dc.Name.SQL()
-}
-
-func (rt RenameTo) SQL() string {
- str := "RENAME TO " + rt.ToName.SQL()
- if len(rt.Synonym) > 0 {
- str += ", ADD SYNONYM " + rt.Synonym.SQL()
- }
- return str
-}
-
-func (as AddSynonym) SQL() string {
- return "ADD SYNONYM " + as.Name.SQL()
-}
-
-func (ds DropSynonym) SQL() string {
- return "DROP SYNONYM " + ds.Name.SQL()
-}
-
-func (sod SetOnDelete) SQL() string {
- return "SET ON DELETE " + sod.Action.SQL()
-}
-
-func (od OnDelete) SQL() string {
- switch od {
- case NoActionOnDelete:
- return "NO ACTION"
- case CascadeOnDelete:
- return "CASCADE"
- }
- panic("unknown OnDelete")
-}
-
-func (ac AlterColumn) SQL() string {
- return "ALTER COLUMN " + ac.Name.SQL() + " " + ac.Alteration.SQL()
-}
-
-func (ardp AddRowDeletionPolicy) SQL() string {
- return "ADD " + ardp.RowDeletionPolicy.SQL()
-}
-
-func (rrdp ReplaceRowDeletionPolicy) SQL() string {
- return "REPLACE " + rrdp.RowDeletionPolicy.SQL()
-}
-
-func (drdp DropRowDeletionPolicy) SQL() string {
- return "DROP ROW DELETION POLICY"
-}
-
-func (sct SetColumnType) SQL() string {
- str := sct.Type.SQL()
- if sct.NotNull {
- str += " NOT NULL"
- }
- if sct.Default != nil {
- str += " DEFAULT (" + sct.Default.SQL() + ")"
- }
- return str
-}
-
-func (sco SetColumnOptions) SQL() string {
- // TODO: not clear what to do for no options.
- return "SET " + sco.Options.SQL()
-}
-
-func (sd SetDefault) SQL() string {
- return "SET DEFAULT (" + sd.Default.SQL() + ")"
-}
-
-func (dp DropDefault) SQL() string {
- return "DROP DEFAULT"
-}
-
-func (co ColumnOptions) SQL() string {
- str := "OPTIONS ("
- if co.AllowCommitTimestamp != nil {
- if *co.AllowCommitTimestamp {
- str += "allow_commit_timestamp = true"
- } else {
- str += "allow_commit_timestamp = null"
- }
- }
- str += ")"
- return str
-}
-
-func (rt RenameTable) SQL() string {
- str := "RENAME TABLE "
- for i, op := range rt.TableRenameOps {
- if i > 0 {
- str += ", "
- }
- str += op.FromName.SQL() + " TO " + op.ToName.SQL()
- }
- return str
-}
-
-func (ad AlterDatabase) SQL() string {
- return "ALTER DATABASE " + ad.Name.SQL() + " " + ad.Alteration.SQL()
-}
-
-func (sdo SetDatabaseOptions) SQL() string {
- return "SET " + sdo.Options.SQL()
-}
-
-func (do DatabaseOptions) SQL() string {
- str := "OPTIONS ("
- hasOpt := false
- if do.OptimizerVersion != nil {
- hasOpt = true
- if *do.OptimizerVersion == 0 {
- str += "optimizer_version=null"
- } else {
- str += fmt.Sprintf("optimizer_version=%v", *do.OptimizerVersion)
- }
- }
- if do.OptimizerStatisticsPackage != nil {
- if hasOpt {
- str += ", "
- }
- hasOpt = true
- if *do.OptimizerStatisticsPackage == "" {
- str += "optimizer_statistics_package=null"
- } else {
- str += fmt.Sprintf("optimizer_statistics_package='%s'", *do.OptimizerStatisticsPackage)
- }
- }
- if do.VersionRetentionPeriod != nil {
- if hasOpt {
- str += ", "
- }
- hasOpt = true
- if *do.VersionRetentionPeriod == "" {
- str += "version_retention_period=null"
- } else {
- str += fmt.Sprintf("version_retention_period='%s'", *do.VersionRetentionPeriod)
- }
- }
- if do.EnableKeyVisualizer != nil {
- if hasOpt {
- str += ", "
- }
- hasOpt = true
- if *do.EnableKeyVisualizer {
- str += "enable_key_visualizer=true"
- } else {
- str += "enable_key_visualizer=null"
- }
- }
- if do.DefaultLeader != nil {
- if hasOpt {
- str += ", "
- }
- hasOpt = true
- if *do.DefaultLeader == "" {
- str += "default_leader=null"
- } else {
- str += fmt.Sprintf("default_leader='%s'", *do.DefaultLeader)
- }
- }
- str += ")"
- return str
-}
-
-func (as AlterStatistics) SQL() string {
- return "ALTER STATISTICS " + as.Name.SQL() + " " + as.Alteration.SQL()
-}
-
-func (sso SetStatisticsOptions) SQL() string {
- return "SET " + sso.Options.SQL()
-}
-
-func (sa StatisticsOptions) SQL() string {
- str := "OPTIONS ("
- if sa.AllowGC != nil {
- str += fmt.Sprintf("allow_gc=%v", *sa.AllowGC)
- }
- str += ")"
- return str
-}
-
-func (ai AlterIndex) SQL() string {
- return "ALTER INDEX " + ai.Name.SQL() + " " + ai.Alteration.SQL()
-}
-
-func (asc AddStoredColumn) SQL() string {
- return "ADD STORED COLUMN " + asc.Name.SQL()
-}
-
-func (dsc DropStoredColumn) SQL() string {
- return "DROP STORED COLUMN " + dsc.Name.SQL()
-}
-
-func (cs CreateSequence) SQL() string {
- str := "CREATE SEQUENCE "
- if cs.IfNotExists {
- str += "IF NOT EXISTS "
- }
- return str + cs.Name.SQL() + " " + cs.Options.SQL()
-}
-
-func (as AlterSequence) SQL() string {
- return "ALTER SEQUENCE " + as.Name.SQL() + " " + as.Alteration.SQL()
-}
-
-func (sa SetSequenceOptions) SQL() string {
- return "SET " + sa.Options.SQL()
-}
-
-func (so SequenceOptions) SQL() string {
- str := "OPTIONS ("
- hasOpt := false
- if so.SequenceKind != nil {
- hasOpt = true
- str += fmt.Sprintf("sequence_kind='%s'", *so.SequenceKind)
- }
- if so.SkipRangeMin != nil {
- if hasOpt {
- str += ", "
- }
- hasOpt = true
- str += fmt.Sprintf("skip_range_min=%v", *so.SkipRangeMin)
- }
- if so.SkipRangeMax != nil {
- if hasOpt {
- str += ", "
- }
- hasOpt = true
- str += fmt.Sprintf("skip_range_max=%v", *so.SkipRangeMax)
- }
- if so.StartWithCounter != nil {
- if hasOpt {
- str += ", "
- }
- hasOpt = true
- str += fmt.Sprintf("start_with_counter=%v", *so.StartWithCounter)
- }
- return str + ")"
-}
-
-func (do DropSequence) SQL() string {
- str := "DROP SEQUENCE "
- if do.IfExists {
- str += "IF EXISTS "
- }
- return str + do.Name.SQL()
-}
-
-func (d *Delete) SQL() string {
- return "DELETE FROM " + d.Table.SQL() + " WHERE " + d.Where.SQL()
-}
-
-func (u *Update) SQL() string {
- str := "UPDATE " + u.Table.SQL() + " SET "
- for i, item := range u.Items {
- if i > 0 {
- str += ", "
- }
- str += item.Column.SQL() + " = "
- if item.Value != nil {
- str += item.Value.SQL()
- } else {
- str += "DEFAULT"
- }
- }
- str += " WHERE " + u.Where.SQL()
- return str
-}
-
-func (i *Insert) SQL() string {
- str := "INSERT INTO " + i.Table.SQL() + " ("
- for i, column := range i.Columns {
- if i > 0 {
- str += ", "
- }
- str += column.SQL()
- }
- str += ") "
- str += i.Input.SQL()
- return str
-}
-
-func (v Values) SQL() string {
- str := "VALUES "
- for j, values := range v {
- if j > 0 {
- str += ", "
- }
- str += "("
-
- for k, value := range values {
- if k > 0 {
- str += ", "
- }
- str += value.SQL()
- }
- str += ")"
- }
- return str
-}
-
-func (cd ColumnDef) SQL() string {
- str := cd.Name.SQL() + " " + cd.Type.SQL()
- if cd.NotNull {
- str += " NOT NULL"
- }
- if cd.Default != nil {
- str += " DEFAULT (" + cd.Default.SQL() + ")"
- }
- if cd.Generated != nil {
- str += " AS (" + cd.Generated.SQL() + ") STORED"
- }
- if cd.Options != (ColumnOptions{}) {
- str += " " + cd.Options.SQL()
- }
- return str
-}
-
-func (tc TableConstraint) SQL() string {
- var str string
- if tc.Name != "" {
- str += "CONSTRAINT " + tc.Name.SQL() + " "
- }
- str += tc.Constraint.SQL()
- return str
-}
-
-func (rdp RowDeletionPolicy) SQL() string {
- return "ROW DELETION POLICY ( OLDER_THAN ( " + rdp.Column.SQL() + ", INTERVAL " + strconv.FormatInt(rdp.NumDays, 10) + " DAY ))"
-}
-
-func (fk ForeignKey) SQL() string {
- str := "FOREIGN KEY (" + idList(fk.Columns, ", ")
- str += ") REFERENCES " + fk.RefTable.SQL() + " ("
- str += idList(fk.RefColumns, ", ") + ")"
- str += " ON DELETE " + fk.OnDelete.SQL()
- return str
-}
-
-func (c Check) SQL() string {
- return "CHECK (" + c.Expr.SQL() + ")"
-}
-
-func (t Type) SQL() string {
- str := t.Base.SQL()
- if t.Len > 0 && (t.Base == String || t.Base == Bytes) {
- str += "("
- if t.Len == MaxLen {
- str += "MAX"
- } else {
- str += strconv.FormatInt(t.Len, 10)
- }
- str += ")"
- }
- if t.Array {
- str = "ARRAY<" + str + ">"
- }
- return str
-}
-
-func (tb TypeBase) SQL() string {
- switch tb {
- case Bool:
- return "BOOL"
- case Int64:
- return "INT64"
- case Float64:
- return "FLOAT64"
- case Numeric:
- return "NUMERIC"
- case String:
- return "STRING"
- case Bytes:
- return "BYTES"
- case Date:
- return "DATE"
- case Timestamp:
- return "TIMESTAMP"
- case JSON:
- return "JSON"
- }
- panic("unknown TypeBase")
-}
-
-func (pt PrivilegeType) SQL() string {
- switch pt {
- case PrivilegeTypeSelect:
- return "SELECT"
- case PrivilegeTypeInsert:
- return "INSERT"
- case PrivilegeTypeUpdate:
- return "UPDATE"
- case PrivilegeTypeDelete:
- return "DELETE"
- }
- panic("unknown PrivilegeType")
-}
-func (kp KeyPart) SQL() string {
- str := kp.Column.SQL()
- if kp.Desc {
- str += " DESC"
- }
- return str
-}
-
-func (q Query) SQL() string { return buildSQL(q) }
-func (q Query) addSQL(sb *strings.Builder) {
- q.Select.addSQL(sb)
- if len(q.Order) > 0 {
- sb.WriteString(" ORDER BY ")
- for i, o := range q.Order {
- if i > 0 {
- sb.WriteString(", ")
- }
- o.addSQL(sb)
- }
- }
- if q.Limit != nil {
- sb.WriteString(" LIMIT ")
- sb.WriteString(q.Limit.SQL())
- if q.Offset != nil {
- sb.WriteString(" OFFSET ")
- sb.WriteString(q.Offset.SQL())
- }
- }
-}
-
-func (sel Select) SQL() string { return buildSQL(sel) }
-func (sel Select) addSQL(sb *strings.Builder) {
- sb.WriteString("SELECT ")
- if sel.Distinct {
- sb.WriteString("DISTINCT ")
- }
- for i, e := range sel.List {
- if i > 0 {
- sb.WriteString(", ")
- }
- e.addSQL(sb)
- if len(sel.ListAliases) > 0 {
- alias := sel.ListAliases[i]
- if alias != "" {
- sb.WriteString(" AS ")
- sb.WriteString(alias.SQL())
- }
- }
- }
- if len(sel.From) > 0 {
- sb.WriteString(" FROM ")
- for i, f := range sel.From {
- if i > 0 {
- sb.WriteString(", ")
- }
- sb.WriteString(f.SQL())
- }
- }
- if sel.Where != nil {
- sb.WriteString(" WHERE ")
- sel.Where.addSQL(sb)
- }
- if len(sel.GroupBy) > 0 {
- sb.WriteString(" GROUP BY ")
- addExprList(sb, sel.GroupBy, ", ")
- }
-}
-
-func (sft SelectFromTable) SQL() string {
- str := sft.Table.SQL()
- if len(sft.Hints) > 0 {
- str += "@{"
- kvs := make([]string, len(sft.Hints))
- i := 0
- for k, v := range sft.Hints {
- kvs[i] = fmt.Sprintf("%s=%s", k, v)
- i++
- }
- sort.Strings(kvs)
- str += strings.Join(kvs, ",")
- str += "}"
- }
-
- if sft.Alias != "" {
- str += " AS " + sft.Alias.SQL()
- }
- return str
-}
-
-func (sfj SelectFromJoin) SQL() string {
- // TODO: The grammar permits arbitrary nesting. Does this need to add parens?
- str := sfj.LHS.SQL() + " " + joinTypes[sfj.Type] + " JOIN "
- // TODO: hints go here
- str += sfj.RHS.SQL()
- if sfj.On != nil {
- str += " ON " + sfj.On.SQL()
- } else if len(sfj.Using) > 0 {
- str += " USING (" + idList(sfj.Using, ", ") + ")"
- }
- return str
-}
-
-var joinTypes = map[JoinType]string{
- InnerJoin: "INNER",
- CrossJoin: "CROSS",
- FullJoin: "FULL",
- LeftJoin: "LEFT",
- RightJoin: "RIGHT",
-}
-
-func (sfu SelectFromUnnest) SQL() string {
- str := "UNNEST(" + sfu.Expr.SQL() + ")"
- if sfu.Alias != "" {
- str += " AS " + sfu.Alias.SQL()
- }
- return str
-}
-
-func (o Order) SQL() string { return buildSQL(o) }
-func (o Order) addSQL(sb *strings.Builder) {
- o.Expr.addSQL(sb)
- if o.Desc {
- sb.WriteString(" DESC")
- }
-}
-
-var arithOps = map[ArithOperator]string{
- // Binary operators only; unary operators are handled first.
- Mul: "*",
- Div: "/",
- Concat: "||",
- Add: "+",
- Sub: "-",
- BitShl: "<<",
- BitShr: ">>",
- BitAnd: "&",
- BitXor: "^",
- BitOr: "|",
-}
-
-func (ao ArithOp) SQL() string { return buildSQL(ao) }
-func (ao ArithOp) addSQL(sb *strings.Builder) {
- // Extra parens inserted to ensure the correct precedence.
-
- switch ao.Op {
- case Neg:
- sb.WriteString("-(")
- ao.RHS.addSQL(sb)
- sb.WriteString(")")
- return
- case Plus:
- sb.WriteString("+(")
- ao.RHS.addSQL(sb)
- sb.WriteString(")")
- return
- case BitNot:
- sb.WriteString("~(")
- ao.RHS.addSQL(sb)
- sb.WriteString(")")
- return
- }
- op, ok := arithOps[ao.Op]
- if !ok {
- panic("unknown ArithOp")
- }
- sb.WriteString("(")
- ao.LHS.addSQL(sb)
- sb.WriteString(")")
- sb.WriteString(op)
- sb.WriteString("(")
- ao.RHS.addSQL(sb)
- sb.WriteString(")")
-}
-
-func (lo LogicalOp) SQL() string { return buildSQL(lo) }
-func (lo LogicalOp) addSQL(sb *strings.Builder) {
- switch lo.Op {
- default:
- panic("unknown LogicalOp")
- case And:
- lo.LHS.addSQL(sb)
- sb.WriteString(" AND ")
- case Or:
- lo.LHS.addSQL(sb)
- sb.WriteString(" OR ")
- case Not:
- sb.WriteString("NOT ")
- }
- lo.RHS.addSQL(sb)
-}
-
-var compOps = map[ComparisonOperator]string{
- Lt: "<",
- Le: "<=",
- Gt: ">",
- Ge: ">=",
- Eq: "=",
- Ne: "!=",
- Like: "LIKE",
- NotLike: "NOT LIKE",
- Between: "BETWEEN",
- NotBetween: "NOT BETWEEN",
-}
-
-func (co ComparisonOp) SQL() string { return buildSQL(co) }
-func (co ComparisonOp) addSQL(sb *strings.Builder) {
- op, ok := compOps[co.Op]
- if !ok {
- panic("unknown ComparisonOp")
- }
- co.LHS.addSQL(sb)
- sb.WriteString(" ")
- sb.WriteString(op)
- sb.WriteString(" ")
- co.RHS.addSQL(sb)
- if co.Op == Between || co.Op == NotBetween {
- sb.WriteString(" AND ")
- co.RHS2.addSQL(sb)
- }
-}
-
-func (io InOp) SQL() string { return buildSQL(io) }
-func (io InOp) addSQL(sb *strings.Builder) {
- io.LHS.addSQL(sb)
- if io.Neg {
- sb.WriteString(" NOT")
- }
- sb.WriteString(" IN ")
- if io.Unnest {
- sb.WriteString("UNNEST")
- }
- sb.WriteString("(")
- addExprList(sb, io.RHS, ", ")
- sb.WriteString(")")
-}
-
-func (io IsOp) SQL() string { return buildSQL(io) }
-func (io IsOp) addSQL(sb *strings.Builder) {
- io.LHS.addSQL(sb)
- sb.WriteString(" IS ")
- if io.Neg {
- sb.WriteString("NOT ")
- }
- io.RHS.addSQL(sb)
-}
-
-func (f Func) SQL() string { return buildSQL(f) }
-func (f Func) addSQL(sb *strings.Builder) {
- sb.WriteString(f.Name)
- sb.WriteString("(")
- if f.Distinct {
- sb.WriteString("DISTINCT ")
- }
- addExprList(sb, f.Args, ", ")
- switch f.NullsHandling {
- case RespectNulls:
- sb.WriteString(" RESPECT NULLS")
- case IgnoreNulls:
- sb.WriteString(" IGNORE NULLS")
- }
- if ah := f.Having; ah != nil {
- sb.WriteString(" HAVING")
- switch ah.Condition {
- case HavingMax:
- sb.WriteString(" MAX")
- case HavingMin:
- sb.WriteString(" MIN")
- }
- sb.WriteString(" ")
- sb.WriteString(ah.Expr.SQL())
- }
- sb.WriteString(")")
-}
-
-func (te TypedExpr) SQL() string { return buildSQL(te) }
-func (te TypedExpr) addSQL(sb *strings.Builder) {
- te.Expr.addSQL(sb)
- sb.WriteString(" AS ")
- sb.WriteString(te.Type.SQL())
-}
-
-func (ee ExtractExpr) SQL() string { return buildSQL(ee) }
-func (ee ExtractExpr) addSQL(sb *strings.Builder) {
- sb.WriteString(ee.Part)
- sb.WriteString(" FROM ")
- ee.Expr.addSQL(sb)
-}
-
-func (aze AtTimeZoneExpr) SQL() string { return buildSQL(aze) }
-func (aze AtTimeZoneExpr) addSQL(sb *strings.Builder) {
- aze.Expr.addSQL(sb)
- sb.WriteString(" AT TIME ZONE ")
- sb.WriteString(aze.Zone)
-}
-
-func (ie IntervalExpr) SQL() string { return buildSQL(ie) }
-func (ie IntervalExpr) addSQL(sb *strings.Builder) {
- sb.WriteString("INTERVAL")
- sb.WriteString(" ")
- ie.Expr.addSQL(sb)
- sb.WriteString(" ")
- sb.WriteString(ie.DatePart)
-}
-
-func (se SequenceExpr) SQL() string { return buildSQL(se) }
-func (se SequenceExpr) addSQL(sb *strings.Builder) {
- sb.WriteString("SEQUENCE ")
- sb.WriteString(se.Name.SQL())
-}
-
-func idList(l []ID, join string) string {
- var ss []string
- for _, s := range l {
- ss = append(ss, s.SQL())
- }
- return strings.Join(ss, join)
-}
-
-func addExprList(sb *strings.Builder, l []Expr, join string) {
- for i, s := range l {
- if i > 0 {
- sb.WriteString(join)
- }
- s.addSQL(sb)
- }
-}
-
-func addIDList(sb *strings.Builder, l []ID, join string) {
- for i, s := range l {
- if i > 0 {
- sb.WriteString(join)
- }
- s.addSQL(sb)
- }
-}
-
-func (pe PathExp) SQL() string { return buildSQL(pe) }
-func (pe PathExp) addSQL(sb *strings.Builder) {
- addIDList(sb, []ID(pe), ".")
-}
-
-func (p Paren) SQL() string { return buildSQL(p) }
-func (p Paren) addSQL(sb *strings.Builder) {
- sb.WriteString("(")
- p.Expr.addSQL(sb)
- sb.WriteString(")")
-}
-
-func (a Array) SQL() string { return buildSQL(a) }
-func (a Array) addSQL(sb *strings.Builder) {
- sb.WriteString("[")
- addExprList(sb, []Expr(a), ", ")
- sb.WriteString("]")
-}
-
-func (id ID) SQL() string { return buildSQL(id) }
-func (id ID) addSQL(sb *strings.Builder) {
- // https://cloud.google.com/spanner/docs/lexical#identifiers
-
- // TODO: If there are non-letters/numbers/underscores then this also needs quoting.
-
- // Naming Convention: Must be enclosed in backticks (`) if it's a reserved keyword or contains a hyphen.
- if IsKeyword(string(id)) || strings.Contains(string(id), "-") {
- // TODO: Escaping may be needed here.
- sb.WriteString("`")
- sb.WriteString(string(id))
- sb.WriteString("`")
- return
- }
-
- sb.WriteString(string(id))
-}
-
-func (p Param) SQL() string { return buildSQL(p) }
-func (p Param) addSQL(sb *strings.Builder) {
- sb.WriteString("@")
- sb.WriteString(string(p))
-}
-
-func (c Case) SQL() string { return buildSQL(c) }
-func (c Case) addSQL(sb *strings.Builder) {
- sb.WriteString("CASE ")
- if c.Expr != nil {
- fmt.Fprintf(sb, "%s ", c.Expr.SQL())
- }
- for _, w := range c.WhenClauses {
- fmt.Fprintf(sb, "WHEN %s THEN %s ", w.Cond.SQL(), w.Result.SQL())
- }
- if c.ElseResult != nil {
- fmt.Fprintf(sb, "ELSE %s ", c.ElseResult.SQL())
- }
- sb.WriteString("END")
-}
-
-func (c Coalesce) SQL() string { return buildSQL(c) }
-func (c Coalesce) addSQL(sb *strings.Builder) {
- sb.WriteString("COALESCE(")
- for i, expr := range c.ExprList {
- if i > 0 {
- sb.WriteString(", ")
- }
- expr.addSQL(sb)
- }
- sb.WriteString(")")
-}
-
-func (i If) SQL() string { return buildSQL(i) }
-func (i If) addSQL(sb *strings.Builder) {
- sb.WriteString("IF(")
- i.Expr.addSQL(sb)
- sb.WriteString(", ")
- i.TrueResult.addSQL(sb)
- sb.WriteString(", ")
- i.ElseResult.addSQL(sb)
- sb.WriteString(")")
-}
-
-func (in IfNull) SQL() string { return buildSQL(in) }
-func (in IfNull) addSQL(sb *strings.Builder) {
- sb.WriteString("IFNULL(")
- in.Expr.addSQL(sb)
- sb.WriteString(", ")
- in.NullResult.addSQL(sb)
- sb.WriteString(")")
-}
-
-func (ni NullIf) SQL() string { return buildSQL(ni) }
-func (ni NullIf) addSQL(sb *strings.Builder) {
- sb.WriteString("NULLIF(")
- ni.Expr.addSQL(sb)
- sb.WriteString(", ")
- ni.ExprToMatch.addSQL(sb)
- sb.WriteString(")")
-}
-
-func (b BoolLiteral) SQL() string { return buildSQL(b) }
-func (b BoolLiteral) addSQL(sb *strings.Builder) {
- if b {
- sb.WriteString("TRUE")
- } else {
- sb.WriteString("FALSE")
- }
-}
-
-func (NullLiteral) SQL() string { return buildSQL(NullLiteral(0)) }
-func (NullLiteral) addSQL(sb *strings.Builder) { sb.WriteString("NULL") }
-
-func (StarExpr) SQL() string { return buildSQL(StarExpr(0)) }
-func (StarExpr) addSQL(sb *strings.Builder) { sb.WriteString("*") }
-
-func (il IntegerLiteral) SQL() string { return buildSQL(il) }
-func (il IntegerLiteral) addSQL(sb *strings.Builder) { fmt.Fprintf(sb, "%d", il) }
-
-func (fl FloatLiteral) SQL() string { return buildSQL(fl) }
-func (fl FloatLiteral) addSQL(sb *strings.Builder) { fmt.Fprintf(sb, "%g", fl) }
-
-// TODO: provide correct string quote method and use it.
-
-func (sl StringLiteral) SQL() string { return buildSQL(sl) }
-func (sl StringLiteral) addSQL(sb *strings.Builder) { fmt.Fprintf(sb, "%q", sl) }
-
-func (bl BytesLiteral) SQL() string { return buildSQL(bl) }
-func (bl BytesLiteral) addSQL(sb *strings.Builder) { fmt.Fprintf(sb, "B%q", bl) }
-
-func (dl DateLiteral) SQL() string { return buildSQL(dl) }
-func (dl DateLiteral) addSQL(sb *strings.Builder) {
- fmt.Fprintf(sb, "DATE '%04d-%02d-%02d'", dl.Year, dl.Month, dl.Day)
-}
-
-func (tl TimestampLiteral) SQL() string { return buildSQL(tl) }
-func (tl TimestampLiteral) addSQL(sb *strings.Builder) {
- fmt.Fprintf(sb, "TIMESTAMP '%s'", time.Time(tl).Format("2006-01-02 15:04:05.000000-07:00"))
-}
-
-func (jl JSONLiteral) SQL() string { return buildSQL(jl) }
-func (jl JSONLiteral) addSQL(sb *strings.Builder) {
- fmt.Fprintf(sb, "JSON '%s'", jl)
-}
diff --git a/vendor/cloud.google.com/go/spanner/spansql/types.go b/vendor/cloud.google.com/go/spanner/spansql/types.go
deleted file mode 100644
index 481d83f10..000000000
--- a/vendor/cloud.google.com/go/spanner/spansql/types.go
+++ /dev/null
@@ -1,1394 +0,0 @@
-/*
-Copyright 2019 Google LLC
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-*/
-
-package spansql
-
-// This file holds the type definitions for the SQL dialect.
-
-import (
- "fmt"
- "math"
- "sort"
- "strings"
- "time"
-
- "cloud.google.com/go/civil"
-)
-
-// TODO: More Position fields throughout; maybe in Query/Select.
-
-// CreateTable represents a CREATE TABLE statement.
-// https://cloud.google.com/spanner/docs/data-definition-language#create_table
-type CreateTable struct {
- Name ID
- IfNotExists bool
- Columns []ColumnDef
- Constraints []TableConstraint
- PrimaryKey []KeyPart
- Interleave *Interleave
- RowDeletionPolicy *RowDeletionPolicy
- Synonym ID // may be empty
-
- Position Position // position of the "CREATE" token
-}
-
-func (ct *CreateTable) String() string { return fmt.Sprintf("%#v", ct) }
-func (*CreateTable) isDDLStmt() {}
-func (ct *CreateTable) Pos() Position { return ct.Position }
-func (ct *CreateTable) clearOffset() {
- for i := range ct.Columns {
- // Mutate in place.
- ct.Columns[i].clearOffset()
- }
- for i := range ct.Constraints {
- // Mutate in place.
- ct.Constraints[i].clearOffset()
- }
- ct.Position.Offset = 0
-}
-
-// TableConstraint represents a constraint on a table.
-type TableConstraint struct {
- Name ID // may be empty
- Constraint Constraint
-
- Position Position // position of the "CONSTRAINT" token, or Constraint.Pos()
-}
-
-func (tc TableConstraint) Pos() Position { return tc.Position }
-func (tc *TableConstraint) clearOffset() {
- switch c := tc.Constraint.(type) {
- case ForeignKey:
- c.clearOffset()
- tc.Constraint = c
- case Check:
- c.clearOffset()
- tc.Constraint = c
- }
- tc.Position.Offset = 0
-}
-
-type Constraint interface {
- isConstraint()
- SQL() string
- Node
-}
-
-// Interleave represents an interleave clause of a CREATE TABLE statement.
-type Interleave struct {
- Parent ID
- OnDelete OnDelete
-}
-
-// RowDeletionPolicy represents an row deletion policy clause of a CREATE, ALTER TABLE statement.
-type RowDeletionPolicy struct {
- Column ID
- NumDays int64
-}
-
-// CreateIndex represents a CREATE INDEX statement.
-// https://cloud.google.com/spanner/docs/data-definition-language#create-index
-type CreateIndex struct {
- Name ID
- Table ID
- Columns []KeyPart
-
- Unique bool
- NullFiltered bool
- IfNotExists bool
-
- Storing []ID
- Interleave ID
-
- Position Position // position of the "CREATE" token
-}
-
-func (ci *CreateIndex) String() string { return fmt.Sprintf("%#v", ci) }
-func (*CreateIndex) isDDLStmt() {}
-func (ci *CreateIndex) Pos() Position { return ci.Position }
-func (ci *CreateIndex) clearOffset() { ci.Position.Offset = 0 }
-
-// CreateView represents a CREATE [OR REPLACE] VIEW statement.
-// https://cloud.google.com/spanner/docs/data-definition-language#view_statements
-type CreateView struct {
- Name ID
- OrReplace bool
- SecurityType SecurityType
- Query Query
-
- Position Position // position of the "CREATE" token
-}
-
-func (cv *CreateView) String() string { return fmt.Sprintf("%#v", cv) }
-func (*CreateView) isDDLStmt() {}
-func (cv *CreateView) Pos() Position { return cv.Position }
-func (cv *CreateView) clearOffset() { cv.Position.Offset = 0 }
-
-type SecurityType int
-
-const (
- Invoker SecurityType = iota
- Definer
-)
-
-// CreateRole represents a CREATE Role statement.
-// https://cloud.google.com/spanner/docs/reference/standard-sql/data-definition-language#create_role
-type CreateRole struct {
- Name ID
-
- Position Position // position of the "CREATE" token
-}
-
-func (cr *CreateRole) String() string { return fmt.Sprintf("%#v", cr) }
-func (*CreateRole) isDDLStmt() {}
-func (cr *CreateRole) Pos() Position { return cr.Position }
-func (cr *CreateRole) clearOffset() { cr.Position.Offset = 0 }
-
-// DropTable represents a DROP TABLE statement.
-// https://cloud.google.com/spanner/docs/data-definition-language#drop_table
-type DropTable struct {
- Name ID
- IfExists bool
-
- Position Position // position of the "DROP" token
-}
-
-func (dt *DropTable) String() string { return fmt.Sprintf("%#v", dt) }
-func (*DropTable) isDDLStmt() {}
-func (dt *DropTable) Pos() Position { return dt.Position }
-func (dt *DropTable) clearOffset() { dt.Position.Offset = 0 }
-
-// DropIndex represents a DROP INDEX statement.
-// https://cloud.google.com/spanner/docs/data-definition-language#drop-index
-type DropIndex struct {
- Name ID
- IfExists bool
-
- Position Position // position of the "DROP" token
-}
-
-func (di *DropIndex) String() string { return fmt.Sprintf("%#v", di) }
-func (*DropIndex) isDDLStmt() {}
-func (di *DropIndex) Pos() Position { return di.Position }
-func (di *DropIndex) clearOffset() { di.Position.Offset = 0 }
-
-// DropView represents a DROP VIEW statement.
-// https://cloud.google.com/spanner/docs/data-definition-language#drop-view
-type DropView struct {
- Name ID
-
- Position Position // position of the "DROP" token
-}
-
-func (dv *DropView) String() string { return fmt.Sprintf("%#v", dv) }
-func (*DropView) isDDLStmt() {}
-func (dv *DropView) Pos() Position { return dv.Position }
-func (dv *DropView) clearOffset() { dv.Position.Offset = 0 }
-
-// DropRole represents a DROP ROLE statement.
-// https://cloud.google.com/spanner/docs/reference/standard-sql/data-definition-language#drop_role
-type DropRole struct {
- Name ID
-
- Position Position // position of the "DROP" token
-}
-
-func (dr *DropRole) String() string { return fmt.Sprintf("%#v", dr) }
-func (*DropRole) isDDLStmt() {}
-func (dr *DropRole) Pos() Position { return dr.Position }
-func (dr *DropRole) clearOffset() { dr.Position.Offset = 0 }
-
-// GrantRole represents a GRANT statement.
-// https://cloud.google.com/spanner/docs/reference/standard-sql/data-definition-language#grant_statement
-type GrantRole struct {
- ToRoleNames []ID
- GrantRoleNames []ID
- Privileges []Privilege
- TableNames []ID
- TvfNames []ID
- ViewNames []ID
- ChangeStreamNames []ID
-
- Position Position // position of the "GRANT" token
-}
-
-func (gr *GrantRole) String() string { return fmt.Sprintf("%#v", gr) }
-func (*GrantRole) isDDLStmt() {}
-func (gr *GrantRole) Pos() Position { return gr.Position }
-func (gr *GrantRole) clearOffset() { gr.Position.Offset = 0 }
-
-// RevokeRole represents a REVOKE statement.
-// https://cloud.google.com/spanner/docs/reference/standard-sql/data-definition-language#revoke_statement
-type RevokeRole struct {
- FromRoleNames []ID
- RevokeRoleNames []ID
- Privileges []Privilege
- TableNames []ID
- TvfNames []ID
- ViewNames []ID
- ChangeStreamNames []ID
- Position Position // position of the "REVOKE" token
-}
-
-func (rr *RevokeRole) String() string { return fmt.Sprintf("%#v", rr) }
-func (*RevokeRole) isDDLStmt() {}
-func (rr *RevokeRole) Pos() Position { return rr.Position }
-func (rr *RevokeRole) clearOffset() { rr.Position.Offset = 0 }
-
-// Privilege represents privilege to grant or revoke.
-type Privilege struct {
- Type PrivilegeType
- Columns []ID
-}
-
-// AlterTable represents an ALTER TABLE statement.
-// https://cloud.google.com/spanner/docs/data-definition-language#alter_table
-type AlterTable struct {
- Name ID
- Alteration TableAlteration
-
- Position Position // position of the "ALTER" token
-}
-
-func (at *AlterTable) String() string { return fmt.Sprintf("%#v", at) }
-func (*AlterTable) isDDLStmt() {}
-func (at *AlterTable) Pos() Position { return at.Position }
-func (at *AlterTable) clearOffset() {
- switch alt := at.Alteration.(type) {
- case AddColumn:
- alt.Def.clearOffset()
- at.Alteration = alt
- case AddConstraint:
- alt.Constraint.clearOffset()
- at.Alteration = alt
- }
- at.Position.Offset = 0
-}
-
-// TableAlteration is satisfied by AddColumn, DropColumn, AddConstraint,
-// DropConstraint, SetOnDelete, AlterColumn,
-// AddRowDeletionPolicy, ReplaceRowDeletionPolicy, DropRowDeletionPolicy,
-// RenameTo, AddSynonym, and DropSynonym.
-type TableAlteration interface {
- isTableAlteration()
- SQL() string
-}
-
-func (AddColumn) isTableAlteration() {}
-func (DropColumn) isTableAlteration() {}
-func (AddConstraint) isTableAlteration() {}
-func (DropConstraint) isTableAlteration() {}
-func (SetOnDelete) isTableAlteration() {}
-func (AlterColumn) isTableAlteration() {}
-func (AddRowDeletionPolicy) isTableAlteration() {}
-func (ReplaceRowDeletionPolicy) isTableAlteration() {}
-func (DropRowDeletionPolicy) isTableAlteration() {}
-func (RenameTo) isTableAlteration() {}
-func (AddSynonym) isTableAlteration() {}
-func (DropSynonym) isTableAlteration() {}
-
-type (
- AddColumn struct {
- IfNotExists bool
- Def ColumnDef
- }
- DropColumn struct{ Name ID }
- AddConstraint struct{ Constraint TableConstraint }
- DropConstraint struct{ Name ID }
- SetOnDelete struct{ Action OnDelete }
- AlterColumn struct {
- Name ID
- Alteration ColumnAlteration
- }
-)
-
-type (
- AddRowDeletionPolicy struct{ RowDeletionPolicy RowDeletionPolicy }
- ReplaceRowDeletionPolicy struct{ RowDeletionPolicy RowDeletionPolicy }
- DropRowDeletionPolicy struct{}
-)
-
-// ColumnAlteration is satisfied by SetColumnType and SetColumnOptions.
-type ColumnAlteration interface {
- isColumnAlteration()
- SQL() string
-}
-
-func (SetColumnType) isColumnAlteration() {}
-func (SetColumnOptions) isColumnAlteration() {}
-func (SetDefault) isColumnAlteration() {}
-func (DropDefault) isColumnAlteration() {}
-
-type SetColumnType struct {
- Type Type
- NotNull bool
- Default Expr
-}
-
-type SetColumnOptions struct{ Options ColumnOptions }
-
-type SetDefault struct {
- Default Expr
-}
-
-type DropDefault struct{}
-
-type OnDelete int
-
-const (
- NoActionOnDelete OnDelete = iota
- CascadeOnDelete
-)
-
-type (
- RenameTo struct {
- ToName ID
- Synonym ID // may be empty
- }
- AddSynonym struct{ Name ID }
- DropSynonym struct{ Name ID }
-)
-
-// RenameTable represents a RENAME TABLE statement.
-type RenameTable struct {
- TableRenameOps []TableRenameOp
-
- Position Position // position of the "RENAME" token
-}
-
-type TableRenameOp struct {
- FromName ID
- ToName ID
-}
-
-func (rt *RenameTable) String() string { return fmt.Sprintf("%#v", rt) }
-func (*RenameTable) isDDLStmt() {}
-func (rt *RenameTable) Pos() Position { return rt.Position }
-func (rt *RenameTable) clearOffset() { rt.Position.Offset = 0 }
-
-// AlterDatabase represents an ALTER DATABASE statement.
-// https://cloud.google.com/spanner/docs/data-definition-language#alter-database
-type AlterDatabase struct {
- Name ID
- Alteration DatabaseAlteration
-
- Position Position // position of the "ALTER" token
-}
-
-func (ad *AlterDatabase) String() string { return fmt.Sprintf("%#v", ad) }
-func (*AlterDatabase) isDDLStmt() {}
-func (ad *AlterDatabase) Pos() Position { return ad.Position }
-func (ad *AlterDatabase) clearOffset() { ad.Position.Offset = 0 }
-
-type DatabaseAlteration interface {
- isDatabaseAlteration()
- SQL() string
-}
-
-type SetDatabaseOptions struct{ Options DatabaseOptions }
-
-func (SetDatabaseOptions) isDatabaseAlteration() {}
-
-// DatabaseOptions represents options on a database as part of a
-// ALTER DATABASE statement.
-type DatabaseOptions struct {
- OptimizerVersion *int
- OptimizerStatisticsPackage *string
- VersionRetentionPeriod *string
- EnableKeyVisualizer *bool
- DefaultLeader *string
-}
-
-// Delete represents a DELETE statement.
-// https://cloud.google.com/spanner/docs/dml-syntax#delete-statement
-type Delete struct {
- Table ID
- Where BoolExpr
-
- // TODO: Alias
-}
-
-func (d *Delete) String() string { return fmt.Sprintf("%#v", d) }
-func (*Delete) isDMLStmt() {}
-
-// Insert represents an INSERT statement.
-// https://cloud.google.com/spanner/docs/dml-syntax#insert-statement
-type Insert struct {
- Table ID
- Columns []ID
- Input ValuesOrSelect
-}
-
-// Values represents one or more lists of expressions passed to an `INSERT` statement.
-type Values [][]Expr
-
-func (v Values) isValuesOrSelect() {}
-func (v Values) String() string { return fmt.Sprintf("%#v", v) }
-
-type ValuesOrSelect interface {
- isValuesOrSelect()
- SQL() string
-}
-
-func (Select) isValuesOrSelect() {}
-
-func (i *Insert) String() string { return fmt.Sprintf("%#v", i) }
-func (*Insert) isDMLStmt() {}
-
-// Update represents an UPDATE statement.
-// https://cloud.google.com/spanner/docs/dml-syntax#update-statement
-type Update struct {
- Table ID
- Items []UpdateItem
- Where BoolExpr
-
- // TODO: Alias
-}
-
-func (u *Update) String() string { return fmt.Sprintf("%#v", u) }
-func (*Update) isDMLStmt() {}
-
-type UpdateItem struct {
- Column ID
- Value Expr // or nil for DEFAULT
-}
-
-// ColumnDef represents a column definition as part of a CREATE TABLE
-// or ALTER TABLE statement.
-type ColumnDef struct {
- Name ID
- Type Type
- NotNull bool
-
- Default Expr // set if this column has a default value
- Generated Expr // set of this is a generated column
-
- Options ColumnOptions
-
- Position Position // position of the column name
-}
-
-func (cd ColumnDef) Pos() Position { return cd.Position }
-func (cd *ColumnDef) clearOffset() { cd.Position.Offset = 0 }
-
-// ColumnOptions represents options on a column as part of a
-// CREATE TABLE or ALTER TABLE statement.
-type ColumnOptions struct {
- // AllowCommitTimestamp represents a column OPTIONS.
- // `true` if query is `OPTIONS (allow_commit_timestamp = true)`
- // `false` if query is `OPTIONS (allow_commit_timestamp = null)`
- // `nil` if there are no OPTIONS
- AllowCommitTimestamp *bool
-}
-
-// ForeignKey represents a foreign key definition as part of a CREATE TABLE
-// or ALTER TABLE statement.
-type ForeignKey struct {
- Columns []ID
- RefTable ID
- RefColumns []ID
- OnDelete OnDelete
-
- Position Position // position of the "FOREIGN" token
-}
-
-func (fk ForeignKey) Pos() Position { return fk.Position }
-func (fk *ForeignKey) clearOffset() { fk.Position.Offset = 0 }
-func (ForeignKey) isConstraint() {}
-
-// Check represents a check constraint as part of a CREATE TABLE
-// or ALTER TABLE statement.
-type Check struct {
- Expr BoolExpr
-
- Position Position // position of the "CHECK" token
-}
-
-func (c Check) Pos() Position { return c.Position }
-func (c *Check) clearOffset() { c.Position.Offset = 0 }
-func (Check) isConstraint() {}
-
-// Type represents a column type.
-type Type struct {
- Array bool
- Base TypeBase // Bool, Int64, Float64, Numeric, String, Bytes, Date, Timestamp
- Len int64 // if Base is String or Bytes; may be MaxLen
-}
-
-// MaxLen is a sentinel for Type's Len field, representing the MAX value.
-const MaxLen = math.MaxInt64
-
-type TypeBase int
-
-const (
- Bool TypeBase = iota
- Int64
- Float64
- Numeric
- String
- Bytes
- Date
- Timestamp
- JSON
-)
-
-type PrivilegeType int
-
-const (
- PrivilegeTypeSelect PrivilegeType = iota
- PrivilegeTypeInsert
- PrivilegeTypeUpdate
- PrivilegeTypeDelete
-)
-
-// KeyPart represents a column specification as part of a primary key or index definition.
-type KeyPart struct {
- Column ID
- Desc bool
-}
-
-// Query represents a query statement.
-// https://cloud.google.com/spanner/docs/query-syntax#sql-syntax
-type Query struct {
- Select Select
- Order []Order
-
- Limit, Offset LiteralOrParam
-}
-
-// Select represents a SELECT statement.
-// https://cloud.google.com/spanner/docs/query-syntax#select-list
-type Select struct {
- Distinct bool
- List []Expr
- From []SelectFrom
- Where BoolExpr
- GroupBy []Expr
- // TODO: Having
-
- // When the FROM clause has TABLESAMPLE operators,
- // TableSamples will be populated 1:1 with From;
- // FROM clauses without will have a nil value.
- TableSamples []*TableSample
-
- // If the SELECT list has explicit aliases ("AS alias"),
- // ListAliases will be populated 1:1 with List;
- // aliases that are present will be non-empty.
- ListAliases []ID
-}
-
-// SelectFrom represents the FROM clause of a SELECT.
-// https://cloud.google.com/spanner/docs/query-syntax#from_clause
-type SelectFrom interface {
- isSelectFrom()
- SQL() string
-}
-
-// SelectFromTable is a SelectFrom that specifies a table to read from.
-type SelectFromTable struct {
- Table ID
- Alias ID // empty if not aliased
- Hints map[string]string
-}
-
-func (SelectFromTable) isSelectFrom() {}
-
-// SelectFromJoin is a SelectFrom that joins two other SelectFroms.
-// https://cloud.google.com/spanner/docs/query-syntax#join_types
-type SelectFromJoin struct {
- Type JoinType
- LHS, RHS SelectFrom
-
- // Join condition.
- // At most one of {On,Using} may be set.
- On BoolExpr
- Using []ID
-
- // Hints are suggestions for how to evaluate a join.
- // https://cloud.google.com/spanner/docs/query-syntax#join-hints
- Hints map[string]string
-}
-
-func (SelectFromJoin) isSelectFrom() {}
-
-type JoinType int
-
-const (
- InnerJoin JoinType = iota
- CrossJoin
- FullJoin
- LeftJoin
- RightJoin
-)
-
-// SelectFromUnnest is a SelectFrom that yields a virtual table from an array.
-// https://cloud.google.com/spanner/docs/query-syntax#unnest
-type SelectFromUnnest struct {
- Expr Expr
- Alias ID // empty if not aliased
-
- // TODO: Implicit
-}
-
-func (SelectFromUnnest) isSelectFrom() {}
-
-// TODO: SelectFromSubquery, etc.
-
-type Order struct {
- Expr Expr
- Desc bool
-}
-
-type TableSample struct {
- Method TableSampleMethod
- Size Expr
- SizeType TableSampleSizeType
-}
-
-type TableSampleMethod int
-
-const (
- Bernoulli TableSampleMethod = iota
- Reservoir
-)
-
-type TableSampleSizeType int
-
-const (
- PercentTableSample TableSampleSizeType = iota
- RowsTableSample
-)
-
-type BoolExpr interface {
- isBoolExpr()
- Expr
-}
-
-type Expr interface {
- isExpr()
- SQL() string
- addSQL(*strings.Builder)
-}
-
-// LiteralOrParam is implemented by integer literal and parameter values.
-type LiteralOrParam interface {
- isLiteralOrParam()
- SQL() string
-}
-
-type ArithOp struct {
- Op ArithOperator
- LHS, RHS Expr // only RHS is set for Neg, Plus, BitNot
-}
-
-func (ArithOp) isExpr() {}
-
-type ArithOperator int
-
-const (
- Neg ArithOperator = iota // unary -
- Plus // unary +
- BitNot // unary ~
- Mul // *
- Div // /
- Concat // ||
- Add // +
- Sub // -
- BitShl // <<
- BitShr // >>
- BitAnd // &
- BitXor // ^
- BitOr // |
-)
-
-type LogicalOp struct {
- Op LogicalOperator
- LHS, RHS BoolExpr // only RHS is set for Not
-}
-
-func (LogicalOp) isBoolExpr() {}
-func (LogicalOp) isExpr() {}
-
-type LogicalOperator int
-
-const (
- And LogicalOperator = iota
- Or
- Not
-)
-
-type ComparisonOp struct {
- Op ComparisonOperator
- LHS, RHS Expr
-
- // RHS2 is the third operand for BETWEEN.
- // "<LHS> BETWEEN <RHS> AND <RHS2>".
- RHS2 Expr
-}
-
-func (ComparisonOp) isBoolExpr() {}
-func (ComparisonOp) isExpr() {}
-
-type ComparisonOperator int
-
-const (
- Lt ComparisonOperator = iota
- Le
- Gt
- Ge
- Eq
- Ne // both "!=" and "<>"
- Like
- NotLike
- Between
- NotBetween
-)
-
-type InOp struct {
- LHS Expr
- Neg bool
- RHS []Expr
- Unnest bool
-
- // TODO: support subquery form
-}
-
-func (InOp) isBoolExpr() {} // usually
-func (InOp) isExpr() {}
-
-type IsOp struct {
- LHS Expr
- Neg bool
- RHS IsExpr
-}
-
-func (IsOp) isBoolExpr() {}
-func (IsOp) isExpr() {}
-
-type IsExpr interface {
- isIsExpr()
- Expr
-}
-
-// PathExp represents a path expression.
-//
-// The grammar for path expressions is not defined (see b/169017423 internally),
-// so this captures the most common form only, namely a dotted sequence of identifiers.
-type PathExp []ID
-
-func (PathExp) isExpr() {}
-
-// Func represents a function call.
-type Func struct {
- Name string // not ID
- Args []Expr
-
- Distinct bool
- NullsHandling NullsHandling
- Having *AggregateHaving
-}
-
-func (Func) isBoolExpr() {} // possibly bool
-func (Func) isExpr() {}
-
-// TypedExpr represents a typed expression in the form `expr AS type_name`, e.g. `'17' AS INT64`.
-type TypedExpr struct {
- Type Type
- Expr Expr
-}
-
-func (TypedExpr) isBoolExpr() {} // possibly bool
-func (TypedExpr) isExpr() {}
-
-type ExtractExpr struct {
- Part string
- Type Type
- Expr Expr
-}
-
-func (ExtractExpr) isBoolExpr() {} // possibly bool
-func (ExtractExpr) isExpr() {}
-
-type AtTimeZoneExpr struct {
- Expr Expr
- Type Type
- Zone string
-}
-
-func (AtTimeZoneExpr) isBoolExpr() {} // possibly bool
-func (AtTimeZoneExpr) isExpr() {}
-
-type IntervalExpr struct {
- Expr Expr
- DatePart string
-}
-
-func (IntervalExpr) isBoolExpr() {} // possibly bool
-func (IntervalExpr) isExpr() {}
-
-type SequenceExpr struct {
- Name ID
-}
-
-func (SequenceExpr) isExpr() {}
-
-// NullsHandling represents the method of dealing with NULL values in aggregate functions.
-type NullsHandling int
-
-const (
- NullsHandlingUnspecified NullsHandling = iota
- RespectNulls
- IgnoreNulls
-)
-
-// AggregateHaving represents the HAVING clause specific to aggregate functions, restricting rows based on a maximal or minimal value.
-type AggregateHaving struct {
- Condition AggregateHavingCondition
- Expr Expr
-}
-
-// AggregateHavingCondition represents the condition (MAX or MIN) for the AggregateHaving clause.
-type AggregateHavingCondition int
-
-const (
- HavingMax AggregateHavingCondition = iota
- HavingMin
-)
-
-// Paren represents a parenthesised expression.
-type Paren struct {
- Expr Expr
-}
-
-func (Paren) isBoolExpr() {} // possibly bool
-func (Paren) isExpr() {}
-
-// Array represents an array literal.
-type Array []Expr
-
-func (Array) isExpr() {}
-
-// ID represents an identifier.
-// https://cloud.google.com/spanner/docs/lexical#identifiers
-type ID string
-
-func (ID) isBoolExpr() {} // possibly bool
-func (ID) isExpr() {}
-
-// Param represents a query parameter.
-type Param string
-
-func (Param) isBoolExpr() {} // possibly bool
-func (Param) isExpr() {}
-func (Param) isLiteralOrParam() {}
-
-type Case struct {
- Expr Expr
- WhenClauses []WhenClause
- ElseResult Expr
-}
-
-func (Case) isBoolExpr() {} // possibly bool
-func (Case) isExpr() {}
-
-type WhenClause struct {
- Cond Expr
- Result Expr
-}
-
-type Coalesce struct {
- ExprList []Expr
-}
-
-func (Coalesce) isBoolExpr() {} // possibly bool
-func (Coalesce) isExpr() {}
-
-type If struct {
- Expr Expr
- TrueResult Expr
- ElseResult Expr
-}
-
-func (If) isBoolExpr() {} // possibly bool
-func (If) isExpr() {}
-
-type IfNull struct {
- Expr Expr
- NullResult Expr
-}
-
-func (IfNull) isBoolExpr() {} // possibly bool
-func (IfNull) isExpr() {}
-
-type NullIf struct {
- Expr Expr
- ExprToMatch Expr
-}
-
-func (NullIf) isBoolExpr() {} // possibly bool
-func (NullIf) isExpr() {}
-
-type BoolLiteral bool
-
-const (
- True = BoolLiteral(true)
- False = BoolLiteral(false)
-)
-
-func (BoolLiteral) isBoolExpr() {}
-func (BoolLiteral) isIsExpr() {}
-func (BoolLiteral) isExpr() {}
-
-type NullLiteral int
-
-const Null = NullLiteral(0)
-
-func (NullLiteral) isIsExpr() {}
-func (NullLiteral) isExpr() {}
-
-// IntegerLiteral represents an integer literal.
-// https://cloud.google.com/spanner/docs/lexical#integer-literals
-type IntegerLiteral int64
-
-func (IntegerLiteral) isLiteralOrParam() {}
-func (IntegerLiteral) isExpr() {}
-
-// FloatLiteral represents a floating point literal.
-// https://cloud.google.com/spanner/docs/lexical#floating-point-literals
-type FloatLiteral float64
-
-func (FloatLiteral) isExpr() {}
-
-// StringLiteral represents a string literal.
-// https://cloud.google.com/spanner/docs/lexical#string-and-bytes-literals
-type StringLiteral string
-
-func (StringLiteral) isExpr() {}
-
-// BytesLiteral represents a bytes literal.
-// https://cloud.google.com/spanner/docs/lexical#string-and-bytes-literals
-type BytesLiteral string
-
-func (BytesLiteral) isExpr() {}
-
-// DateLiteral represents a date literal.
-// https://cloud.google.com/spanner/docs/lexical#date_literals
-type DateLiteral civil.Date
-
-func (DateLiteral) isExpr() {}
-
-// TimestampLiteral represents a timestamp literal.
-// https://cloud.google.com/spanner/docs/lexical#timestamp_literals
-type TimestampLiteral time.Time
-
-func (TimestampLiteral) isExpr() {}
-
-// JSONLiteral represents a JSON literal
-// https://cloud.google.com/spanner/docs/reference/standard-sql/lexical#json_literals
-type JSONLiteral []byte
-
-func (JSONLiteral) isExpr() {}
-
-type StarExpr int
-
-// Star represents a "*" in an expression.
-const Star = StarExpr(0)
-
-func (StarExpr) isExpr() {}
-
-type statements interface {
- setFilename(string)
- getComments() []*Comment
- addComment(*Comment)
-}
-
-// DDL
-// https://cloud.google.com/spanner/docs/data-definition-language#ddl_syntax
-
-// DDL represents a Data Definition Language (DDL) file.
-type DDL struct {
- List []DDLStmt
-
- Filename string // if known at parse time
-
- Comments []*Comment // all comments, sorted by position
-}
-
-func (d *DDL) clearOffset() {
- for _, stmt := range d.List {
- stmt.clearOffset()
- }
- for _, c := range d.Comments {
- c.clearOffset()
- }
-}
-
-func (d *DDL) setFilename(filename string) {
- d.Filename = filename
-}
-
-func (d *DDL) addComment(comment *Comment) {
- d.Comments = append(d.Comments, comment)
-}
-
-func (d *DDL) getComments() []*Comment {
- return d.Comments
-}
-
-// DML
-// https://cloud.google.com/spanner/docs/reference/standard-sql/dml-syntax
-
-// DML represents a Data Manipulation Language (DML) file.
-type DML struct {
- List []DMLStmt
-
- Filename string // if known at parse time
-
- Comments []*Comment // all comments, sorted by position
-}
-
-func (d *DML) clearOffset() {
- for _, c := range d.Comments {
- c.clearOffset()
- }
-}
-
-func (d *DML) setFilename(filename string) {
- d.Filename = filename
-}
-
-func (d *DML) addComment(comment *Comment) {
- d.Comments = append(d.Comments, comment)
-}
-
-func (d *DML) getComments() []*Comment {
- return d.Comments
-}
-
-// DDLStmt is satisfied by a type that can appear in a DDL.
-type DDLStmt interface {
- isDDLStmt()
- clearOffset()
- SQL() string
- Node
-}
-
-// DMLStmt is satisfied by a type that is a DML statement.
-type DMLStmt interface {
- isDMLStmt()
- SQL() string
-}
-
-// Comment represents a comment.
-type Comment struct {
- Marker string // Opening marker; one of "#", "--", "/*".
- Isolated bool // Whether this comment is on its own line.
- // Start and End are the position of the opening and terminating marker.
- Start, End Position
- Text []string
-}
-
-func (c *Comment) String() string { return fmt.Sprintf("%#v", c) }
-func (c *Comment) Pos() Position { return c.Start }
-func (c *Comment) clearOffset() { c.Start.Offset, c.End.Offset = 0, 0 }
-
-// Node is implemented by concrete types in this package that represent things
-// appearing in a DDL file.
-type Node interface {
- Pos() Position
- // clearOffset() is not included here because some types like ColumnDef
- // have the method on their pointer type rather than their natural value type.
- // This method is only invoked from within this package, so it isn't
- // important to enforce such things.
-}
-
-// Position describes a source position in an input DDL file.
-// It is only valid if the line number is positive.
-type Position struct {
- Line int // 1-based line number
- Offset int // 0-based byte offset
-}
-
-func (pos Position) IsValid() bool { return pos.Line > 0 }
-func (pos Position) String() string {
- if pos.Line == 0 {
- return ":<invalid>"
- }
- return fmt.Sprintf(":%d", pos.Line)
-}
-
-// LeadingComment returns the comment that immediately precedes a node,
-// or nil if there's no such comment.
-func (d *DDL) LeadingComment(n Node) *Comment {
- return getLeadingComment(d, n)
-}
-
-// InlineComment returns the comment on the same line as a node,
-// or nil if there's no inline comment.
-// The returned comment is guaranteed to be a single line.
-func (d *DDL) InlineComment(n Node) *Comment {
- return getInlineComment(d, n)
-}
-
-// LeadingComment returns the comment that immediately precedes a node,
-// or nil if there's no such comment.
-func (d *DML) LeadingComment(n Node) *Comment {
- return getLeadingComment(d, n)
-}
-
-// InlineComment returns the comment on the same line as a node,
-// or nil if there's no inline comment.
-// The returned comment is guaranteed to be a single line.
-func (d *DML) InlineComment(n Node) *Comment {
- return getInlineComment(d, n)
-}
-
-func getLeadingComment(stmts statements, n Node) *Comment {
- // Get the comment whose End position is on the previous line.
- lineEnd := n.Pos().Line - 1
- comments := stmts.getComments()
- ci := sort.Search(len(comments), func(i int) bool {
- return comments[i].End.Line >= lineEnd
- })
- if ci >= len(comments) || comments[ci].End.Line != lineEnd {
- return nil
- }
- if !comments[ci].Isolated {
- // This is an inline comment for a previous node.
- return nil
- }
- return comments[ci]
-}
-
-func getInlineComment(stmts statements, n Node) *Comment {
- // TODO: Do we care about comments like this?
- // string name = 1; /* foo
- // bar */
-
- pos := n.Pos()
- comments := stmts.getComments()
- ci := sort.Search(len(comments), func(i int) bool {
- return comments[i].Start.Line >= pos.Line
- })
- if ci >= len(comments) {
- return nil
- }
- c := comments[ci]
- if c.Start.Line != pos.Line {
- return nil
- }
- if c.Start.Line != c.End.Line || len(c.Text) != 1 {
- // Multi-line comment; don't return it.
- return nil
- }
- return c
-}
-
-// CreateChangeStream represents a CREATE CHANGE STREAM statement.
-// https://cloud.google.com/spanner/docs/change-streams/manage
-type CreateChangeStream struct {
- Name ID
- Watch []WatchDef
- WatchAllTables bool
- Options ChangeStreamOptions
-
- Position Position
-}
-
-func (cs *CreateChangeStream) String() string { return fmt.Sprintf("%#v", cs) }
-func (*CreateChangeStream) isDDLStmt() {}
-func (cs *CreateChangeStream) Pos() Position { return cs.Position }
-func (cs *CreateChangeStream) clearOffset() {
- for i := range cs.Watch {
- // Mutate in place.
- cs.Watch[i].clearOffset()
- }
- cs.Position.Offset = 0
-}
-
-// AlterChangeStream represents a ALTER CHANGE STREAM statement.
-type AlterChangeStream struct {
- Name ID
- Alteration ChangeStreamAlteration
-
- Position Position
-}
-
-func (acs *AlterChangeStream) String() string { return fmt.Sprintf("%#v", acs) }
-func (*AlterChangeStream) isDDLStmt() {}
-func (acs *AlterChangeStream) Pos() Position { return acs.Position }
-func (acs *AlterChangeStream) clearOffset() {
- acs.Position.Offset = 0
-}
-
-type ChangeStreamAlteration interface {
- isChangeStreamAlteration()
- SQL() string
-}
-
-func (AlterWatch) isChangeStreamAlteration() {}
-func (DropChangeStreamWatch) isChangeStreamAlteration() {}
-func (AlterChangeStreamOptions) isChangeStreamAlteration() {}
-
-type (
- AlterWatch struct {
- WatchAllTables bool
- Watch []WatchDef
- }
- DropChangeStreamWatch struct{}
- AlterChangeStreamOptions struct{ Options ChangeStreamOptions }
-)
-
-// DropChangeStream represents a DROP CHANGE STREAM statement.
-type DropChangeStream struct {
- Name ID
-
- Position Position
-}
-
-func (dc *DropChangeStream) String() string { return fmt.Sprintf("%#v", dc) }
-func (*DropChangeStream) isDDLStmt() {}
-func (dc *DropChangeStream) Pos() Position { return dc.Position }
-func (dc *DropChangeStream) clearOffset() { dc.Position.Offset = 0 }
-
-type WatchDef struct {
- Table ID
- Columns []ID
- WatchAllCols bool
-
- Position Position
-}
-
-func (wd WatchDef) Pos() Position { return wd.Position }
-func (wd *WatchDef) clearOffset() { wd.Position.Offset = 0 }
-
-type ChangeStreamOptions struct {
- RetentionPeriod *string
- ValueCaptureType *string
-}
-
-// AlterStatistics represents an ALTER STATISTICS statement.
-// https://cloud.google.com/spanner/docs/data-definition-language#alter-statistics
-type AlterStatistics struct {
- Name ID
- Alteration StatisticsAlteration
-
- Position Position // position of the "ALTER" token
-}
-
-func (as *AlterStatistics) String() string { return fmt.Sprintf("%#v", as) }
-func (*AlterStatistics) isDDLStmt() {}
-func (as *AlterStatistics) Pos() Position { return as.Position }
-func (as *AlterStatistics) clearOffset() { as.Position.Offset = 0 }
-
-type StatisticsAlteration interface {
- isStatisticsAlteration()
- SQL() string
-}
-
-type SetStatisticsOptions struct{ Options StatisticsOptions }
-
-func (SetStatisticsOptions) isStatisticsAlteration() {}
-
-// StatisticsOptions represents options on a statistics as part of a ALTER STATISTICS statement.
-type StatisticsOptions struct {
- AllowGC *bool
-}
-
-type AlterIndex struct {
- Name ID
- Alteration IndexAlteration
-
- Position Position // position of the "ALTER" token
-}
-
-func (as *AlterIndex) String() string { return fmt.Sprintf("%#v", as) }
-func (*AlterIndex) isDDLStmt() {}
-func (as *AlterIndex) Pos() Position { return as.Position }
-func (as *AlterIndex) clearOffset() { as.Position.Offset = 0 }
-
-type IndexAlteration interface {
- isIndexAlteration()
- SQL() string
-}
-
-func (AddStoredColumn) isIndexAlteration() {}
-func (DropStoredColumn) isIndexAlteration() {}
-
-type (
- AddStoredColumn struct{ Name ID }
- DropStoredColumn struct{ Name ID }
-)
-
-// CreateSequence represents an ALTER SEQUENCE statement.
-// https://cloud.google.com/spanner/docs/reference/standard-sql/data-definition-language#create-sequence
-type CreateSequence struct {
- Name ID
- IfNotExists bool
- Options SequenceOptions
-
- Position Position
-}
-
-func (cs *CreateSequence) String() string { return fmt.Sprintf("%#v", cs) }
-func (*CreateSequence) isDDLStmt() {}
-func (cs *CreateSequence) Pos() Position { return cs.Position }
-func (cs *CreateSequence) clearOffset() { cs.Position.Offset = 0 }
-
-// AlterSequence represents an ALTER SEQUENCE statement.
-// https://cloud.google.com/spanner/docs/reference/standard-sql/data-definition-language#alter-sequence
-type AlterSequence struct {
- Name ID
- Alteration SequenceAlteration
-
- Position Position
-}
-
-func (as *AlterSequence) String() string { return fmt.Sprintf("%#v", as) }
-func (*AlterSequence) isDDLStmt() {}
-func (as *AlterSequence) Pos() Position { return as.Position }
-func (as *AlterSequence) clearOffset() { as.Position.Offset = 0 }
-
-type SequenceAlteration interface {
- isSequenceAlteration()
- SQL() string
-}
-
-type SetSequenceOptions struct{ Options SequenceOptions }
-
-func (SetSequenceOptions) isSequenceAlteration() {}
-
-// SequenceOptions represents options on a sequence as part of a CREATE SEQUENCE and ALTER SEQUENCE statement.
-type SequenceOptions struct {
- SequenceKind *string
- SkipRangeMin *int
- SkipRangeMax *int
- StartWithCounter *int
-}
-
-// DropSequence represents a DROP SEQUENCE statement.
-// https://cloud.google.com/spanner/docs/reference/standard-sql/data-definition-language#drop-sequence
-type DropSequence struct {
- Name ID
- IfExists bool
-
- Position Position
-}
-
-func (ds *DropSequence) String() string { return fmt.Sprintf("%#v", ds) }
-func (*DropSequence) isDDLStmt() {}
-func (ds *DropSequence) Pos() Position { return ds.Position }
-func (ds *DropSequence) clearOffset() { ds.Position.Offset = 0 }
diff --git a/vendor/cloud.google.com/go/spanner/statement.go b/vendor/cloud.google.com/go/spanner/statement.go
deleted file mode 100644
index 407036312..000000000
--- a/vendor/cloud.google.com/go/spanner/statement.go
+++ /dev/null
@@ -1,83 +0,0 @@
-/*
-Copyright 2017 Google LLC
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-*/
-
-package spanner
-
-import (
- "fmt"
-
- sppb "cloud.google.com/go/spanner/apiv1/spannerpb"
- "google.golang.org/grpc/codes"
- proto3 "google.golang.org/protobuf/types/known/structpb"
- structpb "google.golang.org/protobuf/types/known/structpb"
-)
-
-// A Statement is a SQL query with named parameters.
-//
-// A parameter placeholder consists of '@' followed by the parameter name.
-// The parameter name is an identifier which must conform to the naming
-// requirements in https://cloud.google.com/spanner/docs/lexical#identifiers.
-// Parameters may appear anywhere that a literal value is expected. The same
-// parameter name may be used more than once. It is an error to execute a
-// statement with unbound parameters. On the other hand, it is allowable to
-// bind parameter names that are not used.
-//
-// See the documentation of the Row type for how Go types are mapped to Cloud
-// Spanner types.
-type Statement struct {
- SQL string
- Params map[string]interface{}
-}
-
-// NewStatement returns a Statement with the given SQL and an empty Params map.
-func NewStatement(sql string) Statement {
- return Statement{SQL: sql, Params: map[string]interface{}{}}
-}
-
-// convertParams converts a statement's parameters into proto Param and
-// ParamTypes.
-func (s *Statement) convertParams() (*structpb.Struct, map[string]*sppb.Type, error) {
- params := &proto3.Struct{
- Fields: map[string]*proto3.Value{},
- }
- paramTypes := map[string]*sppb.Type{}
- for k, v := range s.Params {
- val, t, err := encodeValue(v)
- if err != nil {
- return nil, nil, errBindParam(k, v, err)
- }
- params.Fields[k] = val
- if t != nil {
- paramTypes[k] = t
- }
- }
-
- return params, paramTypes, nil
-}
-
-// errBindParam returns error for not being able to bind parameter to query
-// request.
-func errBindParam(k string, v interface{}, err error) error {
- if err == nil {
- return nil
- }
- var se *Error
- if !errorAs(err, &se) {
- return spannerErrorf(codes.InvalidArgument, "failed to bind query parameter(name: %q, value: %v), error = <%v>", k, v, err)
- }
- se.decorate(fmt.Sprintf("failed to bind query parameter(name: %q, value: %v)", k, v))
- return se
-}
diff --git a/vendor/cloud.google.com/go/spanner/stats.go b/vendor/cloud.google.com/go/spanner/stats.go
deleted file mode 100644
index bc8176b6d..000000000
--- a/vendor/cloud.google.com/go/spanner/stats.go
+++ /dev/null
@@ -1,377 +0,0 @@
-// Copyright 2017 Google LLC
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package spanner
-
-import (
- "context"
- "strconv"
- "strings"
- "sync"
-
- "cloud.google.com/go/spanner/internal"
- "go.opencensus.io/stats"
- "go.opencensus.io/stats/view"
- "go.opencensus.io/tag"
- "google.golang.org/grpc/metadata"
-)
-
-const statsPrefix = "cloud.google.com/go/spanner/"
-
-// Deprecated: OpenCensus project is deprecated. Use OpenTelemetry for capturing metrics.
-var (
- tagKeyClientID = tag.MustNewKey("client_id")
- tagKeyDatabase = tag.MustNewKey("database")
- tagKeyInstance = tag.MustNewKey("instance_id")
- tagKeyLibVersion = tag.MustNewKey("library_version")
- tagKeyType = tag.MustNewKey("type")
- tagKeyIsMultiplexed = tag.MustNewKey("is_multiplexed")
-
- tagCommonKeys = []tag.Key{tagKeyClientID, tagKeyDatabase, tagKeyInstance, tagKeyLibVersion}
-
- tagNumInUseSessions = tag.Tag{Key: tagKeyType, Value: "num_in_use_sessions"}
- tagNumSessions = tag.Tag{Key: tagKeyType, Value: "num_sessions"}
-
- // Deprecated: With InLine Begin transaction client won't maintain separate read or write sessions
- tagNumBeingPrepared = tag.Tag{Key: tagKeyType, Value: "num_sessions_being_prepared"}
- tagNumReadSessions = tag.Tag{Key: tagKeyType, Value: "num_read_sessions"}
- tagNumWriteSessions = tag.Tag{Key: tagKeyType, Value: "num_write_prepared_sessions"}
-
- tagKeyMethod = tag.MustNewKey("grpc_client_method")
- // gfeLatencyMetricsEnabled is used to track if GFELatency and GFEHeaderMissingCount need to be recorded
- gfeLatencyMetricsEnabled = false
- // mutex to avoid data race in reading/writing the above flag
- statsMu = sync.RWMutex{}
-)
-
-func recordStat(ctx context.Context, m *stats.Int64Measure, n int64) {
- stats.Record(ctx, m.M(n))
-}
-
-var (
- // OpenSessionCount is a measure of the number of sessions currently opened.
- // It is EXPERIMENTAL and subject to change or removal without notice.
- //
- // Deprecated: OpenCensus project is deprecated. Use OpenTelemetry to get open_session_count metrics.
- OpenSessionCount = stats.Int64(
- statsPrefix+"open_session_count",
- "Number of sessions currently opened",
- stats.UnitDimensionless,
- )
-
- // OpenSessionCountView is a view of the last value of OpenSessionCount.
- // It is EXPERIMENTAL and subject to change or removal without notice.
- //
- // Deprecated: OpenCensus project is deprecated. Use OpenTelemetry to get open_session_count metrics.
- OpenSessionCountView = &view.View{
- Measure: OpenSessionCount,
- Aggregation: view.LastValue(),
- TagKeys: tagCommonKeys,
- }
-
- // MaxAllowedSessionsCount is a measure of the maximum number of sessions
- // allowed. Configurable by the user.
- //
- // Deprecated: OpenCensus project is deprecated. Use OpenTelemetry to get max_allowed_sessions metrics.
- MaxAllowedSessionsCount = stats.Int64(
- statsPrefix+"max_allowed_sessions",
- "The maximum number of sessions allowed. Configurable by the user.",
- stats.UnitDimensionless,
- )
-
- // MaxAllowedSessionsCountView is a view of the last value of
- // MaxAllowedSessionsCount.
- //
- // Deprecated: OpenCensus project is deprecated. Use OpenTelemetry to get max_allowed_sessions metrics.
- MaxAllowedSessionsCountView = &view.View{
- Measure: MaxAllowedSessionsCount,
- Aggregation: view.LastValue(),
- TagKeys: tagCommonKeys,
- }
-
- // SessionsCount is a measure of the number of sessions in the pool
- // including both in-use, idle, and being prepared.
- //
- // Deprecated: OpenCensus project is deprecated. Use OpenTelemetry to get num_sessions_in_pool metrics.
- SessionsCount = stats.Int64(
- statsPrefix+"num_sessions_in_pool",
- "The number of sessions currently in use.",
- stats.UnitDimensionless,
- )
-
- // SessionsCountView is a view of the last value of SessionsCount.
- //
- // Deprecated: OpenCensus project is deprecated. Use OpenTelemetry to get num_sessions_in_pool metrics.
- SessionsCountView = &view.View{
- Measure: SessionsCount,
- Aggregation: view.LastValue(),
- TagKeys: append(tagCommonKeys, tagKeyType),
- }
-
- // MaxInUseSessionsCount is a measure of the maximum number of sessions
- // in use during the last 10 minute interval.
- //
- // Deprecated: OpenCensus project is deprecated. Use OpenTelemetry to get max_in_use_sessions metrics.
- MaxInUseSessionsCount = stats.Int64(
- statsPrefix+"max_in_use_sessions",
- "The maximum number of sessions in use during the last 10 minute interval.",
- stats.UnitDimensionless,
- )
-
- // MaxInUseSessionsCountView is a view of the last value of
- // MaxInUseSessionsCount.
- //
- // Deprecated: OpenCensus project is deprecated. Use OpenTelemetry to get max_in_use_sessions metrics.
- MaxInUseSessionsCountView = &view.View{
- Measure: MaxInUseSessionsCount,
- Aggregation: view.LastValue(),
- TagKeys: tagCommonKeys,
- }
-
- // GetSessionTimeoutsCount is a measure of the number of get sessions
- // timeouts due to pool exhaustion.
- //
- // Deprecated: OpenCensus project is deprecated. Use OpenTelemetry to get get_session_timeouts metrics.
- GetSessionTimeoutsCount = stats.Int64(
- statsPrefix+"get_session_timeouts",
- "The number of get sessions timeouts due to pool exhaustion.",
- stats.UnitDimensionless,
- )
-
- // GetSessionTimeoutsCountView is a view of the last value of
- // GetSessionTimeoutsCount.
- //
- // Deprecated: OpenCensus project is deprecated. Use OpenTelemetry to get get_session_timeouts metrics.
- GetSessionTimeoutsCountView = &view.View{
- Measure: GetSessionTimeoutsCount,
- Aggregation: view.Count(),
- TagKeys: tagCommonKeys,
- }
-
- // AcquiredSessionsCount is the number of sessions acquired from
- // the session pool.
- //
- // Deprecated: OpenCensus project is deprecated. Use OpenTelemetry to get num_acquired_sessions metrics.
- AcquiredSessionsCount = stats.Int64(
- statsPrefix+"num_acquired_sessions",
- "The number of sessions acquired from the session pool.",
- stats.UnitDimensionless,
- )
-
- // AcquiredSessionsCountView is a view of the last value of
- // AcquiredSessionsCount.
- //
- // Deprecated: OpenCensus project is deprecated. Use OpenTelemetry to get num_acquired_sessions metrics.
- AcquiredSessionsCountView = &view.View{
- Measure: AcquiredSessionsCount,
- Aggregation: view.Count(),
- TagKeys: tagCommonKeys,
- }
-
- // ReleasedSessionsCount is the number of sessions released by the user
- // and pool maintainer.
- //
- // Deprecated: OpenCensus project is deprecated. Use OpenTelemetry to get num_released_sessions metrics.
- ReleasedSessionsCount = stats.Int64(
- statsPrefix+"num_released_sessions",
- "The number of sessions released by the user and pool maintainer.",
- stats.UnitDimensionless,
- )
-
- // ReleasedSessionsCountView is a view of the last value of
- // ReleasedSessionsCount.
- //
- // Deprecated: OpenCensus project is deprecated. Use OpenTelemetry to get num_released_sessions metrics.
- ReleasedSessionsCountView = &view.View{
- Measure: ReleasedSessionsCount,
- Aggregation: view.Count(),
- TagKeys: tagCommonKeys,
- }
-
- // GFELatency is the latency between Google's network receiving an RPC and reading back the first byte of the response
- //
- // Deprecated: OpenCensus project is deprecated. Use OpenTelemetry to get gfe_latency metrics.
- GFELatency = stats.Int64(
- statsPrefix+"gfe_latency",
- "Latency between Google's network receiving an RPC and reading back the first byte of the response",
- stats.UnitMilliseconds,
- )
-
- // GFELatencyView is the view of distribution of GFELatency values
- //
- // Deprecated: OpenCensus project is deprecated. Use OpenTelemetry to get gfe_latency metrics.
- GFELatencyView = &view.View{
- Name: "cloud.google.com/go/spanner/gfe_latency",
- Measure: GFELatency,
- Description: "Latency between Google's network receives an RPC and reads back the first byte of the response",
- Aggregation: view.Distribution(0.0, 0.01, 0.05, 0.1, 0.3, 0.6, 0.8, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 8.0, 10.0, 13.0,
- 16.0, 20.0, 25.0, 30.0, 40.0, 50.0, 65.0, 80.0, 100.0, 130.0, 160.0, 200.0, 250.0,
- 300.0, 400.0, 500.0, 650.0, 800.0, 1000.0, 2000.0, 5000.0, 10000.0, 20000.0, 50000.0,
- 100000.0),
- TagKeys: append(tagCommonKeys, tagKeyMethod),
- }
-
- // GFEHeaderMissingCount is the number of RPC responses received without the server-timing header, most likely means that the RPC never reached Google's network
- //
- // Deprecated: OpenCensus project is deprecated. Use OpenTelemetry to get gfe_header_missing_count metrics.
- GFEHeaderMissingCount = stats.Int64(
- statsPrefix+"gfe_header_missing_count",
- "Number of RPC responses received without the server-timing header, most likely means that the RPC never reached Google's network",
- stats.UnitDimensionless,
- )
-
- // GFEHeaderMissingCountView is the view of number of GFEHeaderMissingCount
- //
- // Deprecated: OpenCensus project is deprecated. Use OpenTelemetry to get gfe_header_missing_count metrics.
- GFEHeaderMissingCountView = &view.View{
- Name: "cloud.google.com/go/spanner/gfe_header_missing_count",
- Measure: GFEHeaderMissingCount,
- Description: "Number of RPC responses received without the server-timing header, most likely means that the RPC never reached Google's network",
- Aggregation: view.Count(),
- TagKeys: append(tagCommonKeys, tagKeyMethod),
- }
-)
-
-// EnableStatViews enables all views of metrics relate to session management.
-//
-// Deprecated: OpenCensus project is deprecated.
-// Use EnableOpenTelemetryMetrics to get Session metrics through OpenTelemetry instrumentation.
-func EnableStatViews() error {
- return view.Register(
- OpenSessionCountView,
- MaxAllowedSessionsCountView,
- SessionsCountView,
- MaxInUseSessionsCountView,
- GetSessionTimeoutsCountView,
- AcquiredSessionsCountView,
- ReleasedSessionsCountView,
- )
-}
-
-// EnableGfeLatencyView enables GFELatency metric
-//
-// Deprecated: OpenCensus project is deprecated.
-// Use EnableOpenTelemetryMetrics to get GfeLatency metrics through OpenTelemetry instrumentation.
-func EnableGfeLatencyView() error {
- setGFELatencyMetricsFlag(true)
- return view.Register(GFELatencyView)
-}
-
-// EnableGfeHeaderMissingCountView enables GFEHeaderMissingCount metric
-//
-// Deprecated: OpenCensus project is deprecated.
-// Use EnableOpenTelemetryMetrics to get GfeHeaderMissingCount metrics through OpenTelemetry instrumentation.
-func EnableGfeHeaderMissingCountView() error {
- setGFELatencyMetricsFlag(true)
- return view.Register(GFEHeaderMissingCountView)
-}
-
-// EnableGfeLatencyAndHeaderMissingCountViews enables GFEHeaderMissingCount and GFELatency metric
-//
-// Deprecated: OpenCensus project is deprecated.
-// Use EnableOpenTelemetryMetrics to get GfeLatency and GfeHeaderMissingCount metrics through OpenTelemetry instrumentation.
-func EnableGfeLatencyAndHeaderMissingCountViews() error {
- setGFELatencyMetricsFlag(true)
- return view.Register(
- GFELatencyView,
- GFEHeaderMissingCountView,
- )
-}
-
-// Deprecated: OpenCensus project is deprecated.
-func getGFELatencyMetricsFlag() bool {
- statsMu.RLock()
- defer statsMu.RUnlock()
- return gfeLatencyMetricsEnabled
-}
-
-// Deprecated: OpenCensus project is deprecated. Use OpenTelemetry for capturing metrics.
-func setGFELatencyMetricsFlag(enable bool) {
- statsMu.Lock()
- gfeLatencyMetricsEnabled = enable
- statsMu.Unlock()
-}
-
-// DisableGfeLatencyAndHeaderMissingCountViews disables GFEHeaderMissingCount and GFELatency metric
-//
-// Deprecated: OpenCensus project is deprecated. Use OpenTelemetry for capturing metrics.
-func DisableGfeLatencyAndHeaderMissingCountViews() {
- setGFELatencyMetricsFlag(false)
- view.Unregister(
- GFELatencyView,
- GFEHeaderMissingCountView,
- )
-}
-
-// Deprecated: OpenCensus project is deprecated. Use OpenTelemetry for capturing metrics.
-func captureGFELatencyStats(ctx context.Context, md metadata.MD, keyMethod string) error {
- if len(md.Get("server-timing")) == 0 {
- recordStat(ctx, GFEHeaderMissingCount, 1)
- return nil
- }
- serverTiming := md.Get("server-timing")[0]
- gfeLatency, err := strconv.Atoi(strings.TrimPrefix(serverTiming, "gfet4t7; dur="))
- if !strings.HasPrefix(serverTiming, "gfet4t7; dur=") || err != nil {
- return err
- }
- // Record GFE latency with OpenCensus.
- ctx = tag.NewContext(ctx, tag.FromContext(ctx))
- ctx, err = tag.New(ctx, tag.Insert(tagKeyMethod, keyMethod))
- if err != nil {
- return err
- }
- recordStat(ctx, GFELatency, int64(gfeLatency))
- return nil
-}
-
-// Deprecated: OpenCensus project is deprecated. Use OpenTelemetry for capturing metrics.
-func createContextAndCaptureGFELatencyMetrics(ctx context.Context, ct *commonTags, md metadata.MD, keyMethod string) error {
- var ctxGFE, err = tag.New(ctx,
- tag.Upsert(tagKeyClientID, ct.clientID),
- tag.Upsert(tagKeyDatabase, ct.database),
- tag.Upsert(tagKeyInstance, ct.instance),
- tag.Upsert(tagKeyLibVersion, ct.libVersion),
- )
- if err != nil {
- return err
- }
- return captureGFELatencyStats(ctxGFE, md, keyMethod)
-}
-
-// Deprecated: OpenCensus project is deprecated. Use OpenTelemetry for capturing metrics.
-func getCommonTags(sc *sessionClient) *commonTags {
- _, instance, database, err := parseDatabaseName(sc.database)
- if err != nil {
- return nil
- }
- return &commonTags{
- clientID: sc.id,
- database: database,
- instance: instance,
- libVersion: internal.Version,
- }
-}
-
-// commonTags are common key-value pairs of data associated with the GFELatency measure
-// Deprecated: OpenCensus project is deprecated. Use OpenTelemetry for capturing metrics.
-type commonTags struct {
- // Client ID
- clientID string
- // Database Name
- database string
- // Instance ID
- instance string
- // Library Version
- libVersion string
-}
diff --git a/vendor/cloud.google.com/go/spanner/timestampbound.go b/vendor/cloud.google.com/go/spanner/timestampbound.go
deleted file mode 100644
index 00a1396a1..000000000
--- a/vendor/cloud.google.com/go/spanner/timestampbound.go
+++ /dev/null
@@ -1,242 +0,0 @@
-/*
-Copyright 2017 Google LLC
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-*/
-
-package spanner
-
-import (
- "fmt"
- "time"
-
- sppb "cloud.google.com/go/spanner/apiv1/spannerpb"
- pbd "google.golang.org/protobuf/types/known/durationpb"
- pbt "google.golang.org/protobuf/types/known/timestamppb"
-)
-
-// timestampBoundType specifies the timestamp bound mode.
-type timestampBoundType int
-
-const (
- strong timestampBoundType = iota // strong reads
- exactStaleness // read with exact staleness
- maxStaleness // read with max staleness
- minReadTimestamp // read with min freshness
- readTimestamp // read data at exact timestamp
-)
-
-// TimestampBound defines how Cloud Spanner will choose a timestamp for a single
-// read/query or read-only transaction.
-//
-// There are three types of timestamp bound: strong, bounded staleness and exact
-// staleness. Strong is the default.
-//
-// If the Cloud Spanner database to be read is geographically distributed, stale
-// read-only transactions can execute more quickly than strong or read-write
-// transactions, because they are able to execute far from the leader replica.
-//
-// Each type of timestamp bound is discussed in detail below. A TimestampBound
-// can be specified when creating transactions, see the documentation of
-// spanner.Client for an example.
-//
-// # Strong reads
-//
-// Strong reads are guaranteed to see the effects of all transactions that have
-// committed before the start of the read. Furthermore, all rows yielded by a
-// single read are consistent with each other: if any part of the read
-// observes a transaction, all parts of the read see the transaction.
-//
-// Strong reads are not repeatable: two consecutive strong read-only
-// transactions might return inconsistent results if there are concurrent
-// writes. If consistency across reads is required, the reads should be
-// executed within a transaction or at an exact read timestamp.
-//
-// Use StrongRead to create a bound of this type.
-//
-// # Exact staleness
-//
-// An exact staleness timestamp bound executes reads at a user-specified
-// timestamp. Reads at a timestamp are guaranteed to see a consistent prefix of
-// the global transaction history: they observe modifications done by all
-// transactions with a commit timestamp less than or equal to the read
-// timestamp, and observe none of the modifications done by transactions with a
-// larger commit timestamp. They will block until all conflicting transactions
-// that may be assigned commit timestamps less than or equal to the read
-// timestamp have finished.
-//
-// The timestamp can either be expressed as an absolute Cloud Spanner commit
-// timestamp or a staleness relative to the current time.
-//
-// These modes do not require a "negotiation phase" to pick a timestamp. As a
-// result, they execute slightly faster than the equivalent boundedly stale
-// concurrency modes. On the other hand, boundedly stale reads usually return
-// fresher results.
-//
-// Use ReadTimestamp and ExactStaleness to create a bound of this type.
-//
-// # Bounded staleness
-//
-// Bounded staleness modes allow Cloud Spanner to pick the read timestamp,
-// subject to a user-provided staleness bound. Cloud Spanner chooses the newest
-// timestamp within the staleness bound that allows execution of the reads at
-// the closest available replica without blocking.
-//
-// All rows yielded are consistent with each other: if any part of the read
-// observes a transaction, all parts of the read see the transaction. Boundedly
-// stale reads are not repeatable: two stale reads, even if they use the same
-// staleness bound, can execute at different timestamps and thus return
-// inconsistent results.
-//
-// Boundedly stale reads execute in two phases. The first phase negotiates a
-// timestamp among all replicas needed to serve the read. In the second phase,
-// reads are executed at the negotiated timestamp.
-//
-// As a result of this two-phase execution, bounded staleness reads are usually
-// a little slower than comparable exact staleness reads. However, they are
-// typically able to return fresher results, and are more likely to execute at
-// the closest replica.
-//
-// Because the timestamp negotiation requires up-front knowledge of which rows
-// will be read, it can only be used with single-use reads and single-use
-// read-only transactions.
-//
-// Use MinReadTimestamp and MaxStaleness to create a bound of this type.
-//
-// # Old read timestamps and garbage collection
-//
-// Cloud Spanner continuously garbage collects deleted and overwritten data in
-// the background to reclaim storage space. This process is known as "version
-// GC". By default, version GC reclaims versions after they are one hour old.
-// Because of this, Cloud Spanner cannot perform reads at read timestamps more
-// than one hour in the past. This restriction also applies to in-progress reads
-// and/or SQL queries whose timestamps become too old while executing. Reads and
-// SQL queries with too-old read timestamps fail with the error
-// ErrorCode.FAILED_PRECONDITION.
-type TimestampBound struct {
- mode timestampBoundType
- d time.Duration
- t time.Time
-}
-
-// StrongRead returns a TimestampBound that will perform reads and queries at a
-// timestamp where all previously committed transactions are visible.
-func StrongRead() TimestampBound {
- return TimestampBound{mode: strong}
-}
-
-// ExactStaleness returns a TimestampBound that will perform reads and queries
-// at an exact staleness.
-func ExactStaleness(d time.Duration) TimestampBound {
- return TimestampBound{
- mode: exactStaleness,
- d: d,
- }
-}
-
-// MaxStaleness returns a TimestampBound that will perform reads and queries at
-// a time chosen to be at most "d" stale.
-func MaxStaleness(d time.Duration) TimestampBound {
- return TimestampBound{
- mode: maxStaleness,
- d: d,
- }
-}
-
-// MinReadTimestamp returns a TimestampBound that bound that will perform reads
-// and queries at a time chosen to be at least "t".
-func MinReadTimestamp(t time.Time) TimestampBound {
- return TimestampBound{
- mode: minReadTimestamp,
- t: t,
- }
-}
-
-// ReadTimestamp returns a TimestampBound that will peform reads and queries at
-// the given time.
-func ReadTimestamp(t time.Time) TimestampBound {
- return TimestampBound{
- mode: readTimestamp,
- t: t,
- }
-}
-
-func (tb TimestampBound) String() string {
- switch tb.mode {
- case strong:
- return fmt.Sprintf("(strong)")
- case exactStaleness:
- return fmt.Sprintf("(exactStaleness: %s)", tb.d)
- case maxStaleness:
- return fmt.Sprintf("(maxStaleness: %s)", tb.d)
- case minReadTimestamp:
- return fmt.Sprintf("(minReadTimestamp: %s)", tb.t)
- case readTimestamp:
- return fmt.Sprintf("(readTimestamp: %s)", tb.t)
- default:
- return fmt.Sprintf("{mode=%v, d=%v, t=%v}", tb.mode, tb.d, tb.t)
- }
-}
-
-// durationProto takes a time.Duration and converts it into pdb.Duration for
-// calling gRPC APIs.
-func durationProto(d time.Duration) *pbd.Duration {
- n := d.Nanoseconds()
- return &pbd.Duration{
- Seconds: n / int64(time.Second),
- Nanos: int32(n % int64(time.Second)),
- }
-}
-
-// timestampProto takes a time.Time and converts it into pbt.Timestamp for
-// calling gRPC APIs.
-func timestampProto(t time.Time) *pbt.Timestamp {
- return &pbt.Timestamp{
- Seconds: t.Unix(),
- Nanos: int32(t.Nanosecond()),
- }
-}
-
-// buildTransactionOptionsReadOnly converts a spanner.TimestampBound into a
-// sppb.TransactionOptions_ReadOnly transaction option, which is then used in
-// transactional reads.
-func buildTransactionOptionsReadOnly(tb TimestampBound, returnReadTimestamp bool) *sppb.TransactionOptions_ReadOnly {
- pb := &sppb.TransactionOptions_ReadOnly{
- ReturnReadTimestamp: returnReadTimestamp,
- }
- switch tb.mode {
- case strong:
- pb.TimestampBound = &sppb.TransactionOptions_ReadOnly_Strong{
- Strong: true,
- }
- case exactStaleness:
- pb.TimestampBound = &sppb.TransactionOptions_ReadOnly_ExactStaleness{
- ExactStaleness: durationProto(tb.d),
- }
- case maxStaleness:
- pb.TimestampBound = &sppb.TransactionOptions_ReadOnly_MaxStaleness{
- MaxStaleness: durationProto(tb.d),
- }
- case minReadTimestamp:
- pb.TimestampBound = &sppb.TransactionOptions_ReadOnly_MinReadTimestamp{
- MinReadTimestamp: timestampProto(tb.t),
- }
- case readTimestamp:
- pb.TimestampBound = &sppb.TransactionOptions_ReadOnly_ReadTimestamp{
- ReadTimestamp: timestampProto(tb.t),
- }
- default:
- panic(fmt.Sprintf("buildTransactionOptionsReadOnly(%v,%v)", tb, returnReadTimestamp))
- }
- return pb
-}
diff --git a/vendor/cloud.google.com/go/spanner/transaction.go b/vendor/cloud.google.com/go/spanner/transaction.go
deleted file mode 100644
index f251adca1..000000000
--- a/vendor/cloud.google.com/go/spanner/transaction.go
+++ /dev/null
@@ -1,1956 +0,0 @@
-/*
-Copyright 2017 Google LLC
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-*/
-
-package spanner
-
-import (
- "context"
- "sync"
- "sync/atomic"
- "time"
-
- "cloud.google.com/go/internal/trace"
- sppb "cloud.google.com/go/spanner/apiv1/spannerpb"
- "github.com/googleapis/gax-go/v2"
- "google.golang.org/api/iterator"
- "google.golang.org/grpc"
- "google.golang.org/grpc/codes"
- "google.golang.org/grpc/metadata"
- "google.golang.org/grpc/status"
- "google.golang.org/protobuf/proto"
-
- vkit "cloud.google.com/go/spanner/apiv1"
- durationpb "google.golang.org/protobuf/types/known/durationpb"
-)
-
-// transactionID stores a transaction ID which uniquely identifies a transaction
-// in Cloud Spanner.
-type transactionID []byte
-
-// txReadEnv manages a read-transaction environment consisting of a session
-// handle and a transaction selector.
-type txReadEnv interface {
- // acquire returns a read-transaction environment that can be used to
- // perform a transactional read.
- acquire(ctx context.Context) (*sessionHandle, *sppb.TransactionSelector, error)
- // getTransactionSelector returns the transaction selector based on state of the transaction it is in
- getTransactionSelector() *sppb.TransactionSelector
- // sets the transactionID
- setTransactionID(id transactionID)
- // sets the transaction's read timestamp
- setTimestamp(time.Time)
- // release should be called at the end of every transactional read to deal
- // with session recycling.
- release(error)
- setSessionEligibilityForLongRunning(sh *sessionHandle)
-}
-
-// txReadOnly contains methods for doing transactional reads.
-type txReadOnly struct {
- // read-transaction environment for performing transactional read
- // operations.
- txReadEnv
-
- // Atomic. Only needed for DML statements, but used forall.
- sequenceNumber int64
-
- // replaceSessionFunc is a function that can be called to replace the
- // session that is used by the transaction. This function should only be
- // defined for single-use transactions that can safely be retried on a
- // different session. All other transactions will set this function to nil.
- replaceSessionFunc func(ctx context.Context) error
-
- // sp is the session pool for allocating a session to execute the read-only
- // transaction. It is set only once during initialization of the
- // txReadOnly.
- sp *sessionPool
- // sh is the sessionHandle allocated from sp.
- sh *sessionHandle
-
- // qo provides options for executing a sql query.
- qo QueryOptions
-
- // ro provides options for reading rows from a database.
- ro ReadOptions
-
- // txOpts provides options for a transaction.
- txOpts TransactionOptions
-
- // commonTags for opencensus metrics
- ct *commonTags
-
- // disableRouteToLeader specifies if all the requests of type read-write and PDML
- // need to be routed to the leader region.
- disableRouteToLeader bool
-
- otConfig *openTelemetryConfig
-}
-
-// TransactionOptions provides options for a transaction.
-type TransactionOptions struct {
- CommitOptions CommitOptions
-
- // The transaction tag to use for a read/write transaction.
- // This tag is automatically included with each statement and the commit
- // request of a read/write transaction.
- TransactionTag string
-
- // CommitPriority is the priority to use for the Commit RPC for the
- // transaction.
- CommitPriority sppb.RequestOptions_Priority
-
- // the transaction lock mode is used to specify a concurrency mode for the
- // read/query operations. It works for a read/write transaction only.
- ReadLockMode sppb.TransactionOptions_ReadWrite_ReadLockMode
-
- // Controls whether to exclude recording modifications in current transaction
- // from the allowed tracking change streams(with DDL option allow_txn_exclusion=true).
- ExcludeTxnFromChangeStreams bool
-}
-
-// merge combines two TransactionOptions that the input parameter will have higher
-// order of precedence.
-func (to TransactionOptions) merge(opts TransactionOptions) TransactionOptions {
- merged := TransactionOptions{
- CommitOptions: to.CommitOptions.merge(opts.CommitOptions),
- TransactionTag: to.TransactionTag,
- CommitPriority: to.CommitPriority,
- ExcludeTxnFromChangeStreams: to.ExcludeTxnFromChangeStreams || opts.ExcludeTxnFromChangeStreams,
- }
- if opts.TransactionTag != "" {
- merged.TransactionTag = opts.TransactionTag
- }
- if opts.CommitPriority != sppb.RequestOptions_PRIORITY_UNSPECIFIED {
- merged.CommitPriority = opts.CommitPriority
- }
- if opts.ReadLockMode != sppb.TransactionOptions_ReadWrite_READ_LOCK_MODE_UNSPECIFIED {
- merged.ReadLockMode = opts.ReadLockMode
- }
- return merged
-}
-
-// errSessionClosed returns error for using a recycled/destroyed session
-func errSessionClosed(sh *sessionHandle) error {
- return spannerErrorf(codes.FailedPrecondition,
- "session is already recycled / destroyed: session_id = %q, rpc_client = %v", sh.getID(), sh.getClient())
-}
-
-// Read returns a RowIterator for reading multiple rows from the database.
-func (t *txReadOnly) Read(ctx context.Context, table string, keys KeySet, columns []string) *RowIterator {
- return t.ReadWithOptions(ctx, table, keys, columns, nil)
-}
-
-// ReadUsingIndex calls ReadWithOptions with ReadOptions{Index: index}.
-func (t *txReadOnly) ReadUsingIndex(ctx context.Context, table, index string, keys KeySet, columns []string) (ri *RowIterator) {
- return t.ReadWithOptions(ctx, table, keys, columns, &ReadOptions{Index: index})
-}
-
-// ReadOptions provides options for reading rows from a database.
-type ReadOptions struct {
- // The index to use for reading. If non-empty, you can only read columns
- // that are part of the index key, part of the primary key, or stored in the
- // index due to a STORING clause in the index definition.
- Index string
-
- // The maximum number of rows to read. A limit value less than 1 means no
- // limit.
- Limit int
-
- // Priority is the RPC priority to use for the operation.
- Priority sppb.RequestOptions_Priority
-
- // The request tag to use for this request.
- RequestTag string
-
- // If this is for a partitioned read and DataBoostEnabled field is set to true, the request will be executed
- // via Spanner independent compute resources. Setting this option for regular read operations has no effect.
- DataBoostEnabled bool
-
- // ReadOptions option used to set the DirectedReadOptions for all ReadRequests which indicate
- // which replicas or regions should be used for running read operations.
- DirectedReadOptions *sppb.DirectedReadOptions
-
- // An option to control the order in which rows are returned from a read.
- OrderBy sppb.ReadRequest_OrderBy
-
- // A lock hint mechanism to use for this request. This setting is only applicable for
- // read-write transaction as as read-only transactions do not take locks.
- LockHint sppb.ReadRequest_LockHint
-}
-
-// merge combines two ReadOptions that the input parameter will have higher
-// order of precedence.
-func (ro ReadOptions) merge(opts ReadOptions) ReadOptions {
- merged := ReadOptions{
- Index: ro.Index,
- Limit: ro.Limit,
- Priority: ro.Priority,
- RequestTag: ro.RequestTag,
- DataBoostEnabled: ro.DataBoostEnabled,
- DirectedReadOptions: ro.DirectedReadOptions,
- OrderBy: ro.OrderBy,
- LockHint: ro.LockHint,
- }
- if opts.Index != "" {
- merged.Index = opts.Index
- }
- if opts.Limit > 0 {
- merged.Limit = opts.Limit
- }
- if opts.Priority != sppb.RequestOptions_PRIORITY_UNSPECIFIED {
- merged.Priority = opts.Priority
- }
- if opts.RequestTag != "" {
- merged.RequestTag = opts.RequestTag
- }
- if opts.DataBoostEnabled {
- merged.DataBoostEnabled = opts.DataBoostEnabled
- }
- if opts.DirectedReadOptions != nil {
- merged.DirectedReadOptions = opts.DirectedReadOptions
- }
- if opts.OrderBy != sppb.ReadRequest_ORDER_BY_UNSPECIFIED {
- merged.OrderBy = opts.OrderBy
- }
- if opts.LockHint != sppb.ReadRequest_LOCK_HINT_UNSPECIFIED {
- merged.LockHint = opts.LockHint
- }
- return merged
-}
-
-// ReadWithOptions returns a RowIterator for reading multiple rows from the
-// database. Pass a ReadOptions to modify the read operation.
-func (t *txReadOnly) ReadWithOptions(ctx context.Context, table string, keys KeySet, columns []string, opts *ReadOptions) (ri *RowIterator) {
- ctx = trace.StartSpan(ctx, "cloud.google.com/go/spanner.Read")
- defer func() { trace.EndSpan(ctx, ri.err) }()
- var (
- sh *sessionHandle
- ts *sppb.TransactionSelector
- err error
- )
- kset, err := keys.keySetProto()
- if err != nil {
- return &RowIterator{err: err}
- }
- if sh, ts, err = t.acquire(ctx); err != nil {
- return &RowIterator{err: err}
- }
- // Cloud Spanner will return "Session not found" on bad sessions.
- client := sh.getClient()
- if client == nil {
- // Might happen if transaction is closed in the middle of a API call.
- return &RowIterator{err: errSessionClosed(sh)}
- }
- index := t.ro.Index
- limit := t.ro.Limit
- prio := t.ro.Priority
- requestTag := t.ro.RequestTag
- dataBoostEnabled := t.ro.DataBoostEnabled
- directedReadOptions := t.ro.DirectedReadOptions
- orderBy := t.ro.OrderBy
- lockHint := t.ro.LockHint
- if opts != nil {
- index = opts.Index
- if opts.Limit > 0 {
- limit = opts.Limit
- }
- prio = opts.Priority
- requestTag = opts.RequestTag
- if opts.DataBoostEnabled {
- dataBoostEnabled = opts.DataBoostEnabled
- }
- if opts.DirectedReadOptions != nil {
- directedReadOptions = opts.DirectedReadOptions
- }
- if opts.OrderBy != sppb.ReadRequest_ORDER_BY_UNSPECIFIED {
- orderBy = opts.OrderBy
- }
- if opts.LockHint != sppb.ReadRequest_LOCK_HINT_UNSPECIFIED {
- lockHint = opts.LockHint
- }
-
- }
- var setTransactionID func(transactionID)
- if _, ok := ts.Selector.(*sppb.TransactionSelector_Begin); ok {
- setTransactionID = t.setTransactionID
- } else {
- setTransactionID = nil
- }
- return streamWithReplaceSessionFunc(
- contextWithOutgoingMetadata(ctx, sh.getMetadata(), t.disableRouteToLeader),
- sh.session.logger,
- func(ctx context.Context, resumeToken []byte) (streamingReceiver, error) {
- if t.sh != nil {
- t.sh.updateLastUseTime()
- }
- client, err := client.StreamingRead(ctx,
- &sppb.ReadRequest{
- Session: t.sh.getID(),
- Transaction: t.getTransactionSelector(),
- Table: table,
- Index: index,
- Columns: columns,
- KeySet: kset,
- ResumeToken: resumeToken,
- Limit: int64(limit),
- RequestOptions: createRequestOptions(prio, requestTag, t.txOpts.TransactionTag),
- DataBoostEnabled: dataBoostEnabled,
- DirectedReadOptions: directedReadOptions,
- OrderBy: orderBy,
- LockHint: lockHint,
- })
- if err != nil {
- if _, ok := t.getTransactionSelector().GetSelector().(*sppb.TransactionSelector_Begin); ok {
- t.setTransactionID(nil)
- return client, errInlineBeginTransactionFailed()
- }
- return client, err
- }
- md, err := client.Header()
- if getGFELatencyMetricsFlag() && md != nil && t.ct != nil {
- if err := createContextAndCaptureGFELatencyMetrics(ctx, t.ct, md, "ReadWithOptions"); err != nil {
- trace.TracePrintf(ctx, nil, "Error in recording GFE Latency. Try disabling and rerunning. Error: %v", err)
- }
- }
- if metricErr := recordGFELatencyMetricsOT(ctx, md, "ReadWithOptions", t.otConfig); metricErr != nil {
- trace.TracePrintf(ctx, nil, "Error in recording GFE Latency through OpenTelemetry. Error: %v", metricErr)
- }
- return client, err
- },
- t.replaceSessionFunc,
- setTransactionID,
- t.setTimestamp,
- t.release,
- )
-}
-
-// errRowNotFound returns error for not being able to read the row identified by
-// key.
-func errRowNotFound(table string, key Key) error {
- err := spannerErrorf(codes.NotFound, "row not found(Table: %v, PrimaryKey: %v)", table, key)
- err.(*Error).err = ErrRowNotFound
- return err
-}
-
-// errRowNotFoundByIndex returns error for not being able to read the row by index.
-func errRowNotFoundByIndex(table string, key Key, index string) error {
- err := spannerErrorf(codes.NotFound, "row not found(Table: %v, IndexKey: %v, Index: %v)", table, key, index)
- err.(*Error).err = ErrRowNotFound
- return err
-}
-
-// errMultipleRowsFound returns error for receiving more than one row when reading a single row using an index.
-func errMultipleRowsFound(table string, key Key, index string) error {
- return spannerErrorf(codes.FailedPrecondition, "more than one row found by index(Table: %v, IndexKey: %v, Index: %v)", table, key, index)
-}
-
-// errInlineBeginTransactionFailed returns error for read-write transaction to explicitly begin the transaction
-func errInlineBeginTransactionFailed() error {
- return spannerErrorf(codes.Internal, "failed inline begin transaction")
-}
-
-// ReadRow reads a single row from the database.
-//
-// If no row is present with the given key, then ReadRow returns an error(spanner.ErrRowNotFound) where
-// spanner.ErrCode(err) is codes.NotFound.
-//
-// To check if the error is spanner.ErrRowNotFound:
-//
-// if errors.Is(err, spanner.ErrRowNotFound) {
-// ...
-// }
-func (t *txReadOnly) ReadRow(ctx context.Context, table string, key Key, columns []string) (*Row, error) {
- return t.ReadRowWithOptions(ctx, table, key, columns, nil)
-}
-
-// ReadRowWithOptions reads a single row from the database. Pass a ReadOptions to modify the read operation.
-//
-// If no row is present with the given key, then ReadRowWithOptions returns an error where
-// spanner.ErrCode(err) is codes.NotFound.
-//
-// To check if the error is spanner.ErrRowNotFound:
-//
-// if errors.Is(err, spanner.ErrRowNotFound) {
-// ...
-// }
-func (t *txReadOnly) ReadRowWithOptions(ctx context.Context, table string, key Key, columns []string, opts *ReadOptions) (*Row, error) {
- iter := t.ReadWithOptions(ctx, table, key, columns, opts)
- defer iter.Stop()
- row, err := iter.Next()
- switch err {
- case iterator.Done:
- return nil, errRowNotFound(table, key)
- case nil:
- return row, nil
- default:
- return nil, err
- }
-}
-
-// ReadRowUsingIndex reads a single row from the database using an index.
-//
-// If no row is present with the given index, then ReadRowUsingIndex returns an
-// error(spanner.ErrRowNotFound) where spanner.ErrCode(err) is codes.NotFound.
-//
-// To check if the error is spanner.ErrRowNotFound:
-//
-// if errors.Is(err, spanner.ErrRowNotFound) {
-// ...
-// }
-//
-// If more than one row received with the given index, then ReadRowUsingIndex
-// returns an error where spanner.ErrCode(err) is codes.FailedPrecondition.
-func (t *txReadOnly) ReadRowUsingIndex(ctx context.Context, table string, index string, key Key, columns []string) (*Row, error) {
- iter := t.ReadUsingIndex(ctx, table, index, key, columns)
- defer iter.Stop()
- row, err := iter.Next()
- switch err {
- case iterator.Done:
- return nil, errRowNotFoundByIndex(table, key, index)
- case nil:
- // If more than one row found, return an error.
- _, err := iter.Next()
- switch err {
- case iterator.Done:
- return row, nil
- case nil:
- return nil, errMultipleRowsFound(table, key, index)
- default:
- return nil, err
- }
- default:
- return nil, err
- }
-}
-
-// QueryOptions provides options for executing a sql query or update statement.
-type QueryOptions struct {
- Mode *sppb.ExecuteSqlRequest_QueryMode
- Options *sppb.ExecuteSqlRequest_QueryOptions
-
- // Priority is the RPC priority to use for the query/update.
- Priority sppb.RequestOptions_Priority
-
- // The request tag to use for this request.
- RequestTag string
-
- // If this is for a partitioned query and DataBoostEnabled field is set to true, the request will be executed
- // via Spanner independent compute resources. Setting this option for regular query operations has no effect.
- DataBoostEnabled bool
-
- // QueryOptions option used to set the DirectedReadOptions for all ExecuteSqlRequests which indicate
- // which replicas or regions should be used for executing queries.
- DirectedReadOptions *sppb.DirectedReadOptions
-
- // Controls whether to exclude recording modifications in current partitioned update operation
- // from the allowed tracking change streams(with DDL option allow_txn_exclusion=true). Setting
- // this value for any sql/dml requests other than partitioned udpate will receive an error.
- ExcludeTxnFromChangeStreams bool
-}
-
-// merge combines two QueryOptions that the input parameter will have higher
-// order of precedence.
-func (qo QueryOptions) merge(opts QueryOptions) QueryOptions {
- merged := QueryOptions{
- Mode: qo.Mode,
- Options: &sppb.ExecuteSqlRequest_QueryOptions{},
- RequestTag: qo.RequestTag,
- Priority: qo.Priority,
- DataBoostEnabled: qo.DataBoostEnabled,
- DirectedReadOptions: qo.DirectedReadOptions,
- ExcludeTxnFromChangeStreams: qo.ExcludeTxnFromChangeStreams || opts.ExcludeTxnFromChangeStreams,
- }
- if opts.Mode != nil {
- merged.Mode = opts.Mode
- }
- if opts.RequestTag != "" {
- merged.RequestTag = opts.RequestTag
- }
- if opts.Priority != sppb.RequestOptions_PRIORITY_UNSPECIFIED {
- merged.Priority = opts.Priority
- }
- if opts.DataBoostEnabled {
- merged.DataBoostEnabled = opts.DataBoostEnabled
- }
- if opts.DirectedReadOptions != nil {
- merged.DirectedReadOptions = opts.DirectedReadOptions
- }
- proto.Merge(merged.Options, qo.Options)
- proto.Merge(merged.Options, opts.Options)
- return merged
-}
-
-func createRequestOptions(prio sppb.RequestOptions_Priority, requestTag, transactionTag string) (ro *sppb.RequestOptions) {
- ro = &sppb.RequestOptions{}
- if prio != sppb.RequestOptions_PRIORITY_UNSPECIFIED {
- ro.Priority = prio
- }
- if requestTag != "" {
- ro.RequestTag = requestTag
- }
- if transactionTag != "" {
- ro.TransactionTag = transactionTag
- }
- return ro
-}
-
-// Query executes a query against the database. It returns a RowIterator for
-// retrieving the resulting rows.
-//
-// Query returns only row data, without a query plan or execution statistics.
-// Use QueryWithStats to get rows along with the plan and statistics. Use
-// AnalyzeQuery to get just the plan.
-func (t *txReadOnly) Query(ctx context.Context, statement Statement) *RowIterator {
- mode := sppb.ExecuteSqlRequest_NORMAL
- return t.query(ctx, statement, QueryOptions{
- Mode: &mode,
- Options: t.qo.Options,
- Priority: t.qo.Priority,
- DirectedReadOptions: t.qo.DirectedReadOptions,
- })
-}
-
-// QueryWithOptions executes a SQL statment against the database. It returns
-// a RowIterator for retrieving the resulting rows. The sql query execution
-// will be optimized based on the given query options.
-func (t *txReadOnly) QueryWithOptions(ctx context.Context, statement Statement, opts QueryOptions) *RowIterator {
- return t.query(ctx, statement, t.qo.merge(opts))
-}
-
-// QueryWithStats executes a SQL statement against the database. It returns
-// a RowIterator for retrieving the resulting rows. The RowIterator will also
-// be populated with a query plan and execution statistics.
-func (t *txReadOnly) QueryWithStats(ctx context.Context, statement Statement) *RowIterator {
- mode := sppb.ExecuteSqlRequest_PROFILE
- return t.query(ctx, statement, QueryOptions{
- Mode: &mode,
- Options: t.qo.Options,
- Priority: t.qo.Priority,
- DirectedReadOptions: t.qo.DirectedReadOptions,
- })
-}
-
-// AnalyzeQuery returns the query plan for statement.
-func (t *txReadOnly) AnalyzeQuery(ctx context.Context, statement Statement) (*sppb.QueryPlan, error) {
- mode := sppb.ExecuteSqlRequest_PLAN
- iter := t.query(ctx, statement, QueryOptions{
- Mode: &mode,
- Options: t.qo.Options,
- Priority: t.qo.Priority,
- DirectedReadOptions: t.qo.DirectedReadOptions,
- })
- defer iter.Stop()
- for {
- _, err := iter.Next()
- if err == iterator.Done {
- break
- }
- if err != nil {
- return nil, err
- }
- }
- if iter.QueryPlan == nil {
- return nil, spannerErrorf(codes.Internal, "query plan unavailable")
- }
- return iter.QueryPlan, nil
-}
-
-func (t *txReadOnly) query(ctx context.Context, statement Statement, options QueryOptions) (ri *RowIterator) {
- ctx = trace.StartSpan(ctx, "cloud.google.com/go/spanner.Query")
- defer func() { trace.EndSpan(ctx, ri.err) }()
- req, sh, err := t.prepareExecuteSQL(ctx, statement, options)
- if err != nil {
- return &RowIterator{err: err}
- }
- var setTransactionID func(transactionID)
- if _, ok := req.Transaction.GetSelector().(*sppb.TransactionSelector_Begin); ok {
- setTransactionID = t.setTransactionID
- } else {
- setTransactionID = nil
- }
- client := sh.getClient()
- return streamWithReplaceSessionFunc(
- contextWithOutgoingMetadata(ctx, sh.getMetadata(), t.disableRouteToLeader),
- sh.session.logger,
- func(ctx context.Context, resumeToken []byte) (streamingReceiver, error) {
- req.ResumeToken = resumeToken
- req.Session = t.sh.getID()
- req.Transaction = t.getTransactionSelector()
- t.sh.updateLastUseTime()
-
- client, err := client.ExecuteStreamingSql(ctx, req)
- if err != nil {
- if _, ok := req.Transaction.GetSelector().(*sppb.TransactionSelector_Begin); ok {
- t.setTransactionID(nil)
- return client, errInlineBeginTransactionFailed()
- }
- return client, err
- }
- md, err := client.Header()
- if getGFELatencyMetricsFlag() && md != nil && t.ct != nil {
- if err := createContextAndCaptureGFELatencyMetrics(ctx, t.ct, md, "query"); err != nil {
- trace.TracePrintf(ctx, nil, "Error in recording GFE Latency. Try disabling and rerunning. Error: %v", err)
- }
- }
- if metricErr := recordGFELatencyMetricsOT(ctx, md, "query", t.otConfig); metricErr != nil {
- trace.TracePrintf(ctx, nil, "Error in recording GFE Latency through OpenTelemetry. Error: %v", metricErr)
- }
- return client, err
- },
- t.replaceSessionFunc,
- setTransactionID,
- t.setTimestamp,
- t.release)
-}
-
-func (t *txReadOnly) prepareExecuteSQL(ctx context.Context, stmt Statement, options QueryOptions) (*sppb.ExecuteSqlRequest, *sessionHandle, error) {
- sh, ts, err := t.acquire(ctx)
- if err != nil {
- return nil, nil, err
- }
- // Cloud Spanner will return "Session not found" on bad sessions.
- sid := sh.getID()
- if sid == "" {
- // Might happen if transaction is closed in the middle of a API call.
- return nil, nil, errSessionClosed(sh)
- }
- params, paramTypes, err := stmt.convertParams()
- if err != nil {
- return nil, nil, err
- }
- mode := sppb.ExecuteSqlRequest_NORMAL
- if options.Mode != nil {
- mode = *options.Mode
- }
- req := &sppb.ExecuteSqlRequest{
- Session: sid,
- Transaction: ts,
- Sql: stmt.SQL,
- QueryMode: mode,
- Seqno: atomic.AddInt64(&t.sequenceNumber, 1),
- Params: params,
- ParamTypes: paramTypes,
- QueryOptions: options.Options,
- RequestOptions: createRequestOptions(options.Priority, options.RequestTag, t.txOpts.TransactionTag),
- DataBoostEnabled: options.DataBoostEnabled,
- DirectedReadOptions: options.DirectedReadOptions,
- }
- return req, sh, nil
-}
-
-// txState is the status of a transaction.
-type txState int
-
-const (
- // transaction is new, waiting to be initialized..
- txNew txState = iota
- // transaction is being initialized.
- txInit
- // transaction is active and can perform read/write.
- txActive
- // transaction is closed, cannot be used anymore.
- txClosed
-)
-
-// errRtsUnavailable returns error for read transaction's read timestamp being
-// unavailable.
-func errRtsUnavailable() error {
- return spannerErrorf(codes.Internal, "read timestamp is unavailable")
-}
-
-// errTxClosed returns error for using a closed transaction.
-func errTxClosed() error {
- return spannerErrorf(codes.InvalidArgument, "cannot use a closed transaction")
-}
-
-// errUnexpectedTxState returns error for transaction enters an unexpected state.
-func errUnexpectedTxState(ts txState) error {
- return spannerErrorf(codes.FailedPrecondition, "unexpected transaction state: %v", ts)
-}
-
-// errExcludeRequestLevelDmlFromChangeStreams returns error for passing
-// QueryOptions.ExcludeTxnFromChangeStreams to request-level DML functions. This
-// options should only be used for partitioned update.
-func errExcludeRequestLevelDmlFromChangeStreams() error {
- return spannerErrorf(codes.InvalidArgument, "cannot set exclude transaction from change streams for a request-level DML statement.")
-}
-
-// ReadOnlyTransaction provides a snapshot transaction with guaranteed
-// consistency across reads, but does not allow writes. Read-only transactions
-// can be configured to read at timestamps in the past.
-//
-// Read-only transactions do not take locks. Instead, they work by choosing a
-// Cloud Spanner timestamp, then executing all reads at that timestamp. Since
-// they do not acquire locks, they do not block concurrent read-write
-// transactions.
-//
-// Unlike locking read-write transactions, read-only transactions never abort.
-// They can fail if the chosen read timestamp is garbage collected; however, the
-// default garbage collection policy is generous enough that most applications
-// do not need to worry about this in practice. See the documentation of
-// TimestampBound for more details.
-//
-// A ReadOnlyTransaction consumes resources on the server until Close is called.
-type ReadOnlyTransaction struct {
- // mu protects concurrent access to the internal states of ReadOnlyTransaction.
- mu sync.Mutex
- // txReadOnly contains methods for performing transactional reads.
- txReadOnly
- // singleUse indicates that the transaction can be used for only one read.
- singleUse bool
- // tx is the transaction ID in Cloud Spanner that uniquely identifies the
- // ReadOnlyTransaction.
- tx transactionID
- // txReadyOrClosed is for broadcasting that transaction ID has been returned
- // by Cloud Spanner or that transaction is closed.
- txReadyOrClosed chan struct{}
- // state is the current transaction status of the ReadOnly transaction.
- state txState
- // rts is the read timestamp returned by transactional reads.
- rts time.Time
- // tb is the read staleness bound specification for transactional reads.
- tb TimestampBound
- // isLongRunningTransaction indicates whether the transaction is long-running or not.
- isLongRunningTransaction bool
-}
-
-// errTxInitTimeout returns error for timeout in waiting for initialization of
-// the transaction.
-func errTxInitTimeout() error {
- return spannerErrorf(codes.Canceled, "timeout/context canceled in waiting for transaction's initialization")
-}
-
-// getTimestampBound returns the read staleness bound specified for the
-// ReadOnlyTransaction.
-func (t *ReadOnlyTransaction) getTimestampBound() TimestampBound {
- t.mu.Lock()
- defer t.mu.Unlock()
- return t.tb
-}
-
-// begin starts a snapshot read-only Transaction on Cloud Spanner.
-func (t *ReadOnlyTransaction) begin(ctx context.Context) error {
- var (
- locked bool
- tx transactionID
- rts time.Time
- sh *sessionHandle
- err error
- res *sppb.Transaction
- )
- defer func() {
- if !locked {
- t.mu.Lock()
- // Not necessary, just to make it clear that t.mu is being held when
- // locked == true.
- locked = true
- }
- if t.state != txClosed {
- // Signal other initialization routines.
- close(t.txReadyOrClosed)
- t.txReadyOrClosed = make(chan struct{})
- }
- t.mu.Unlock()
- if err != nil && sh != nil {
- // Got a valid session handle, but failed to initialize transaction=
- // on Cloud Spanner.
- if isSessionNotFoundError(err) {
- sh.destroy()
- }
- // If sh.destroy was already executed, this becomes a noop.
- sh.recycle()
- }
- }()
- // Retry the BeginTransaction call if a 'Session not found' is returned.
- for {
- sh, err = t.sp.takeMultiplexed(ctx)
- if err != nil {
- return err
- }
- t.setSessionEligibilityForLongRunning(sh)
- sh.updateLastUseTime()
- var md metadata.MD
- res, err = sh.getClient().BeginTransaction(contextWithOutgoingMetadata(ctx, sh.getMetadata(), t.disableRouteToLeader), &sppb.BeginTransactionRequest{
- Session: sh.getID(),
- Options: &sppb.TransactionOptions{
- Mode: &sppb.TransactionOptions_ReadOnly_{
- ReadOnly: buildTransactionOptionsReadOnly(t.getTimestampBound(), true),
- },
- },
- }, gax.WithGRPCOptions(grpc.Header(&md)))
-
- if getGFELatencyMetricsFlag() && md != nil && t.ct != nil {
- if err := createContextAndCaptureGFELatencyMetrics(ctx, t.ct, md, "begin_BeginTransaction"); err != nil {
- trace.TracePrintf(ctx, nil, "Error in recording GFE Latency. Try disabling and rerunning. Error: %v", err)
- }
- }
- if metricErr := recordGFELatencyMetricsOT(ctx, md, "begin_BeginTransaction", t.otConfig); metricErr != nil {
- trace.TracePrintf(ctx, nil, "Error in recording GFE Latency through OpenTelemetry. Error: %v", metricErr)
- }
-
- if isSessionNotFoundError(err) {
- sh.destroy()
- continue
- } else if err == nil {
- tx = res.Id
- if res.ReadTimestamp != nil {
- rts = time.Unix(res.ReadTimestamp.Seconds, int64(res.ReadTimestamp.Nanos))
- }
- } else {
- err = ToSpannerError(err)
- }
- break
- }
- t.mu.Lock()
-
- // defer function will be executed with t.mu being held.
- locked = true
-
- // During the execution of t.begin(), t.Close() was invoked.
- if t.state == txClosed {
- return errSessionClosed(sh)
- }
-
- // If begin() fails, this allows other queries to take over the
- // initialization.
- t.tx = nil
- if err == nil {
- t.tx = tx
- t.rts = rts
- t.sh = sh
- // State transite to txActive.
- t.state = txActive
- }
- return err
-}
-
-// acquire implements txReadEnv.acquire.
-func (t *ReadOnlyTransaction) acquire(ctx context.Context) (*sessionHandle, *sppb.TransactionSelector, error) {
- if err := checkNestedTxn(ctx); err != nil {
- return nil, nil, err
- }
- if t.singleUse {
- return t.acquireSingleUse(ctx)
- }
- return t.acquireMultiUse(ctx)
-}
-
-func (t *ReadOnlyTransaction) acquireSingleUse(ctx context.Context) (*sessionHandle, *sppb.TransactionSelector, error) {
- t.mu.Lock()
- defer t.mu.Unlock()
- switch t.state {
- case txClosed:
- // A closed single-use transaction can never be reused.
- return nil, nil, errTxClosed()
- case txNew:
- t.state = txClosed
- ts := &sppb.TransactionSelector{
- Selector: &sppb.TransactionSelector_SingleUse{
- SingleUse: &sppb.TransactionOptions{
- Mode: &sppb.TransactionOptions_ReadOnly_{
- ReadOnly: buildTransactionOptionsReadOnly(t.tb, true),
- },
- },
- },
- }
- sh, err := t.sp.takeMultiplexed(ctx)
- if err != nil {
- return nil, nil, err
- }
-
- // Install session handle into t, which can be used for readonly
- // operations later.
- t.sh = sh
- return sh, ts, nil
- }
- us := t.state
-
- // SingleUse transaction should only be in either txNew state or txClosed
- // state.
- return nil, nil, errUnexpectedTxState(us)
-}
-
-func (t *ReadOnlyTransaction) acquireMultiUse(ctx context.Context) (*sessionHandle, *sppb.TransactionSelector, error) {
- for {
- t.mu.Lock()
- switch t.state {
- case txClosed:
- t.mu.Unlock()
- return nil, nil, errTxClosed()
- case txNew:
- // State transit to txInit so that no further TimestampBound change
- // is accepted.
- t.state = txInit
- t.mu.Unlock()
- continue
- case txInit:
- if t.tx != nil {
- // Wait for a transaction ID to become ready.
- txReadyOrClosed := t.txReadyOrClosed
- t.mu.Unlock()
- select {
- case <-txReadyOrClosed:
- // Need to check transaction state again.
- continue
- case <-ctx.Done():
- // The waiting for initialization is timeout, return error
- // directly.
- return nil, nil, errTxInitTimeout()
- }
- }
- // Take the ownership of initializing the transaction.
- t.tx = transactionID{}
- t.mu.Unlock()
- // Begin a read-only transaction.
- //
- // TODO: consider adding a transaction option which allow queries to
- // initiate transactions by themselves. Note that this option might
- // not be always good because the ID of the new transaction won't
- // be ready till the query returns some data or completes.
- if err := t.begin(ctx); err != nil {
- return nil, nil, err
- }
-
- // If t.begin() succeeded, t.state should have been changed to
- // txActive, so we can just continue here.
- continue
- case txActive:
- sh := t.sh
- ts := &sppb.TransactionSelector{
- Selector: &sppb.TransactionSelector_Id{
- Id: t.tx,
- },
- }
- t.mu.Unlock()
- return sh, ts, nil
- }
- state := t.state
- t.mu.Unlock()
- return nil, nil, errUnexpectedTxState(state)
- }
-}
-
-func (t *ReadOnlyTransaction) getTransactionSelector() *sppb.TransactionSelector {
- t.mu.Lock()
- defer t.mu.Unlock()
- if t.singleUse {
- return &sppb.TransactionSelector{
- Selector: &sppb.TransactionSelector_SingleUse{
- SingleUse: &sppb.TransactionOptions{
- Mode: &sppb.TransactionOptions_ReadOnly_{
- ReadOnly: buildTransactionOptionsReadOnly(t.tb, true),
- },
- },
- },
- }
- }
- return &sppb.TransactionSelector{
- Selector: &sppb.TransactionSelector_Id{
- Id: t.tx,
- },
- }
-}
-
-func (t *ReadOnlyTransaction) setTimestamp(ts time.Time) {
- t.mu.Lock()
- defer t.mu.Unlock()
- if t.rts.IsZero() {
- t.rts = ts
- }
-}
-
-// release implements txReadEnv.release.
-func (t *ReadOnlyTransaction) release(err error) {
- t.mu.Lock()
- sh := t.sh
- t.mu.Unlock()
- if sh != nil { // sh could be nil if t.acquire() fails.
- if isSessionNotFoundError(err) || isClientClosing(err) {
- sh.destroy()
- }
- if t.singleUse {
- // If session handle is already destroyed, this becomes a noop.
- sh.recycle()
- }
- }
-}
-
-// Close closes a ReadOnlyTransaction, the transaction cannot perform any reads
-// after being closed.
-func (t *ReadOnlyTransaction) Close() {
- if t.singleUse {
- return
- }
- t.mu.Lock()
- if t.state != txClosed {
- t.state = txClosed
- close(t.txReadyOrClosed)
- }
- sh := t.sh
- t.mu.Unlock()
- if sh == nil {
- return
- }
- // If session handle is already destroyed, this becomes a noop. If there are
- // still active queries and if the recycled session is reused before they
- // complete, Cloud Spanner will cancel them on behalf of the new transaction
- // on the session.
- if sh != nil {
- sh.recycle()
- }
-}
-
-// Timestamp returns the timestamp chosen to perform reads and queries in this
-// transaction. The value can only be read after some read or query has either
-// returned some data or completed without returning any data.
-func (t *ReadOnlyTransaction) Timestamp() (time.Time, error) {
- t.mu.Lock()
- defer t.mu.Unlock()
- if t.rts.IsZero() {
- return t.rts, errRtsUnavailable()
- }
- return t.rts, nil
-}
-
-// WithTimestampBound specifies the TimestampBound to use for read or query.
-// This can only be used before the first read or query is invoked. Note:
-// bounded staleness is not available with general ReadOnlyTransactions; use a
-// single-use ReadOnlyTransaction instead.
-//
-// The returned value is the ReadOnlyTransaction so calls can be chained.
-func (t *ReadOnlyTransaction) WithTimestampBound(tb TimestampBound) *ReadOnlyTransaction {
- t.mu.Lock()
- defer t.mu.Unlock()
- if t.state == txNew {
- // Only allow to set TimestampBound before the first query.
- t.tb = tb
- }
- return t
-}
-
-func (t *ReadOnlyTransaction) setSessionEligibilityForLongRunning(sh *sessionHandle) {
- if t != nil && sh != nil {
- sh.mu.Lock()
- t.mu.Lock()
- sh.eligibleForLongRunning = t.isLongRunningTransaction
- t.mu.Unlock()
- sh.mu.Unlock()
- }
-}
-
-// ReadWriteTransaction provides a locking read-write transaction.
-//
-// This type of transaction is the only way to write data into Cloud Spanner;
-// (*Client).Apply, (*Client).ApplyAtLeastOnce, (*Client).PartitionedUpdate use
-// transactions internally. These transactions rely on pessimistic locking and,
-// if necessary, two-phase commit. Locking read-write transactions may abort,
-// requiring the application to retry. However, the interface exposed by
-// (*Client).ReadWriteTransaction eliminates the need for applications to write
-// retry loops explicitly.
-//
-// Locking transactions may be used to atomically read-modify-write data
-// anywhere in a database. This type of transaction is externally consistent.
-//
-// Clients should attempt to minimize the amount of time a transaction is
-// active. Faster transactions commit with higher probability and cause less
-// contention. Cloud Spanner attempts to keep read locks active as long as the
-// transaction continues to do reads. Long periods of inactivity at the client
-// may cause Cloud Spanner to release a transaction's locks and abort it.
-//
-// Reads performed within a transaction acquire locks on the data being
-// read. Writes can only be done at commit time, after all reads have been
-// completed. Conceptually, a read-write transaction consists of zero or more
-// reads or SQL queries followed by a commit.
-//
-// See (*Client).ReadWriteTransaction for an example.
-//
-// # Semantics
-//
-// Cloud Spanner can commit the transaction if all read locks it acquired are
-// still valid at commit time, and it is able to acquire write locks for all
-// writes. Cloud Spanner can abort the transaction for any reason. If a commit
-// attempt returns ABORTED, Cloud Spanner guarantees that the transaction has
-// not modified any user data in Cloud Spanner.
-//
-// Unless the transaction commits, Cloud Spanner makes no guarantees about how
-// long the transaction's locks were held for. It is an error to use Cloud
-// Spanner locks for any sort of mutual exclusion other than between Cloud
-// Spanner transactions themselves.
-//
-// # Aborted transactions
-//
-// Application code does not need to retry explicitly; RunInTransaction will
-// automatically retry a transaction if an attempt results in an abort. The lock
-// priority of a transaction increases after each prior aborted transaction,
-// meaning that the next attempt has a slightly better chance of success than
-// before.
-//
-// Under some circumstances (e.g., many transactions attempting to modify the
-// same row(s)), a transaction can abort many times in a short period before
-// successfully committing. Thus, it is not a good idea to cap the number of
-// retries a transaction can attempt; instead, it is better to limit the total
-// amount of wall time spent retrying.
-//
-// # Idle transactions
-//
-// A transaction is considered idle if it has no outstanding reads or SQL
-// queries and has not started a read or SQL query within the last 10
-// seconds. Idle transactions can be aborted by Cloud Spanner so that they don't
-// hold on to locks indefinitely. In that case, the commit will fail with error
-// ABORTED.
-//
-// If this behavior is undesirable, periodically executing a simple SQL query
-// in the transaction (e.g., SELECT 1) prevents the transaction from becoming
-// idle.
-type ReadWriteTransaction struct {
- // txReadOnly contains methods for performing transactional reads.
- txReadOnly
- // tx is the transaction ID in Cloud Spanner that uniquely identifies the
- // ReadWriteTransaction. It is set only once in ReadWriteTransaction.begin()
- // during the initialization of ReadWriteTransaction.
- tx transactionID
- // txReadyOrClosed is for broadcasting that transaction ID has been returned
- // by Cloud Spanner or that transaction is closed.
- txReadyOrClosed chan struct{}
- // mu protects concurrent access to the internal states of
- // ReadWriteTransaction.
- mu sync.Mutex
- // state is the current transaction status of the read-write transaction.
- state txState
- // wb is the set of buffered mutations waiting to be committed.
- wb []*Mutation
- // isLongRunningTransaction indicates whether the transaction is long-running or not.
- isLongRunningTransaction bool
-}
-
-// BufferWrite adds a list of mutations to the set of updates that will be
-// applied when the transaction is committed. It does not actually apply the
-// write until the transaction is committed, so the operation does not block.
-// The effects of the write won't be visible to any reads (including reads done
-// in the same transaction) until the transaction commits.
-//
-// See the example for Client.ReadWriteTransaction.
-func (t *ReadWriteTransaction) BufferWrite(ms []*Mutation) error {
- t.mu.Lock()
- defer t.mu.Unlock()
- if t.state == txClosed {
- return errTxClosed()
- }
- t.wb = append(t.wb, ms...)
- return nil
-}
-
-// Update executes a DML statement against the database. It returns the number
-// of affected rows. Update returns an error if the statement is a query.
-// However, the query is executed, and any data read will be validated upon
-// commit.
-func (t *ReadWriteTransaction) Update(ctx context.Context, stmt Statement) (rowCount int64, err error) {
- mode := sppb.ExecuteSqlRequest_NORMAL
- return t.update(ctx, stmt, QueryOptions{
- Mode: &mode,
- Options: t.qo.Options,
- Priority: t.qo.Priority,
- })
-}
-
-// UpdateWithOptions executes a DML statement against the database. It returns
-// the number of affected rows. The given QueryOptions will be used for the
-// execution of this statement.
-func (t *ReadWriteTransaction) UpdateWithOptions(ctx context.Context, stmt Statement, opts QueryOptions) (rowCount int64, err error) {
- if opts.ExcludeTxnFromChangeStreams {
- return 0, errExcludeRequestLevelDmlFromChangeStreams()
- }
-
- return t.update(ctx, stmt, t.qo.merge(opts))
-}
-
-func (t *ReadWriteTransaction) update(ctx context.Context, stmt Statement, opts QueryOptions) (rowCount int64, err error) {
- ctx = trace.StartSpan(ctx, "cloud.google.com/go/spanner.Update")
- defer func() { trace.EndSpan(ctx, err) }()
- req, sh, err := t.prepareExecuteSQL(ctx, stmt, opts)
- if err != nil {
- return 0, err
- }
- hasInlineBeginTransaction := false
- if _, ok := req.GetTransaction().GetSelector().(*sppb.TransactionSelector_Begin); ok {
- hasInlineBeginTransaction = true
- }
-
- sh.updateLastUseTime()
- var md metadata.MD
- resultSet, err := sh.getClient().ExecuteSql(contextWithOutgoingMetadata(ctx, sh.getMetadata(), t.disableRouteToLeader), req, gax.WithGRPCOptions(grpc.Header(&md)))
-
- if getGFELatencyMetricsFlag() && md != nil && t.ct != nil {
- if err := createContextAndCaptureGFELatencyMetrics(ctx, t.ct, md, "update"); err != nil {
- trace.TracePrintf(ctx, nil, "Error in recording GFE Latency. Try disabling and rerunning. Error: %v", err)
- }
- }
- if metricErr := recordGFELatencyMetricsOT(ctx, md, "update", t.otConfig); metricErr != nil {
- trace.TracePrintf(ctx, nil, "Error in recording GFE Latency through OpenTelemetry. Error: %v", metricErr)
- }
- if err != nil {
- if hasInlineBeginTransaction {
- t.setTransactionID(nil)
- return 0, errInlineBeginTransactionFailed()
- }
- return 0, ToSpannerError(err)
- }
- if hasInlineBeginTransaction {
- if resultSet != nil && resultSet.GetMetadata() != nil && resultSet.GetMetadata().GetTransaction() != nil &&
- resultSet.GetMetadata().GetTransaction().GetId() != nil {
- t.setTransactionID(resultSet.GetMetadata().GetTransaction().GetId())
- } else {
- // retry with explicit begin transaction
- t.setTransactionID(nil)
- return 0, errInlineBeginTransactionFailed()
- }
- }
- if resultSet.Stats == nil {
- return 0, spannerErrorf(codes.InvalidArgument, "query passed to Update: %q", stmt.SQL)
- }
-
- return extractRowCount(resultSet.Stats)
-}
-
-// BatchUpdate groups one or more DML statements and sends them to Spanner in a
-// single RPC. This is an efficient way to execute multiple DML statements.
-//
-// A slice of counts is returned, where each count represents the number of
-// affected rows for the given query at the same index. If an error occurs,
-// counts will be returned up to the query that encountered the error.
-func (t *ReadWriteTransaction) BatchUpdate(ctx context.Context, stmts []Statement) (_ []int64, err error) {
- return t.BatchUpdateWithOptions(ctx, stmts, QueryOptions{})
-}
-
-// BatchUpdateWithOptions groups one or more DML statements and sends them to
-// Spanner in a single RPC. This is an efficient way to execute multiple DML
-// statements.
-//
-// A slice of counts is returned, where each count represents the number of
-// affected rows for the given query at the same index. If an error occurs,
-// counts will be returned up to the query that encountered the error.
-//
-// The request tag and priority given in the QueryOptions are included with the
-// RPC. Any other options that are set in the QueryOptions struct are ignored.
-func (t *ReadWriteTransaction) BatchUpdateWithOptions(ctx context.Context, stmts []Statement, opts QueryOptions) (_ []int64, err error) {
- if opts.ExcludeTxnFromChangeStreams {
- return nil, errExcludeRequestLevelDmlFromChangeStreams()
- }
- return t.batchUpdateWithOptions(ctx, stmts, t.qo.merge(opts))
-}
-
-func (t *ReadWriteTransaction) batchUpdateWithOptions(ctx context.Context, stmts []Statement, opts QueryOptions) (_ []int64, err error) {
- ctx = trace.StartSpan(ctx, "cloud.google.com/go/spanner.BatchUpdate")
- defer func() { trace.EndSpan(ctx, err) }()
-
- sh, ts, err := t.acquire(ctx)
- if err != nil {
- return nil, err
- }
-
- // Cloud Spanner will return "Session not found" on bad sessions.
- sid := sh.getID()
- if sid == "" {
- // Might happen if transaction is closed in the middle of a API call.
- return nil, errSessionClosed(sh)
- }
-
- // mark transaction and session to be eligible for long-running
- t.mu.Lock()
- t.isLongRunningTransaction = true
- t.mu.Unlock()
- t.setSessionEligibilityForLongRunning(sh)
-
- var sppbStmts []*sppb.ExecuteBatchDmlRequest_Statement
- for _, st := range stmts {
- params, paramTypes, err := st.convertParams()
- if err != nil {
- return nil, err
- }
- sppbStmts = append(sppbStmts, &sppb.ExecuteBatchDmlRequest_Statement{
- Sql: st.SQL,
- Params: params,
- ParamTypes: paramTypes,
- })
- }
-
- hasInlineBeginTransaction := false
- if _, ok := ts.GetSelector().(*sppb.TransactionSelector_Begin); ok {
- hasInlineBeginTransaction = true
- }
-
- sh.updateLastUseTime()
- var md metadata.MD
- resp, err := sh.getClient().ExecuteBatchDml(contextWithOutgoingMetadata(ctx, sh.getMetadata(), t.disableRouteToLeader), &sppb.ExecuteBatchDmlRequest{
- Session: sh.getID(),
- Transaction: ts,
- Statements: sppbStmts,
- Seqno: atomic.AddInt64(&t.sequenceNumber, 1),
- RequestOptions: createRequestOptions(opts.Priority, opts.RequestTag, t.txOpts.TransactionTag),
- }, gax.WithGRPCOptions(grpc.Header(&md)))
-
- if getGFELatencyMetricsFlag() && md != nil && t.ct != nil {
- if err := createContextAndCaptureGFELatencyMetrics(ctx, t.ct, md, "batchUpdateWithOptions"); err != nil {
- trace.TracePrintf(ctx, nil, "Error in recording GFE Latency. Try disabling and rerunning. Error: %v", ToSpannerError(err))
- }
- }
- if metricErr := recordGFELatencyMetricsOT(ctx, md, "batchUpdateWithOptions", t.otConfig); metricErr != nil {
- trace.TracePrintf(ctx, nil, "Error in recording GFE Latency through OpenTelemetry. Error: %v", metricErr)
- }
- if err != nil {
- if hasInlineBeginTransaction {
- t.setTransactionID(nil)
- return nil, errInlineBeginTransactionFailed()
- }
- return nil, ToSpannerError(err)
- }
-
- haveTransactionID := false
- var counts []int64
- for _, rs := range resp.ResultSets {
- if hasInlineBeginTransaction && !haveTransactionID && rs != nil && rs.GetMetadata() != nil &&
- rs.GetMetadata().GetTransaction() != nil && rs.GetMetadata().GetTransaction().GetId() != nil {
- t.setTransactionID(rs.GetMetadata().GetTransaction().GetId())
- haveTransactionID = true
- }
- count, err := extractRowCount(rs.Stats)
- if err != nil {
- return nil, err
- }
- counts = append(counts, count)
- }
- if hasInlineBeginTransaction && !haveTransactionID {
- // retry with explicit BeginTransaction
- t.setTransactionID(nil)
- return counts, errInlineBeginTransactionFailed()
- }
- if resp.Status != nil && resp.Status.Code != 0 {
- return counts, spannerErrorf(codes.Code(uint32(resp.Status.Code)), resp.Status.Message)
- }
- return counts, nil
-}
-
-// acquire implements txReadEnv.acquire.
-// This will make sure that only one operation will be running with TransactionSelector::begin option
-// in a ReadWriteTransaction by changing the state to init, all other operations will wait for state
-// to become active/closed. If state is active transactionID is already set, if closed returns error.
-func (t *ReadWriteTransaction) acquire(ctx context.Context) (*sessionHandle, *sppb.TransactionSelector, error) {
- for {
- t.mu.Lock()
- switch t.state {
- case txClosed:
- if t.tx == nil {
- t.mu.Unlock()
- return nil, nil, errInlineBeginTransactionFailed()
- }
- t.mu.Unlock()
- return nil, nil, errTxClosed()
- case txNew:
- // State transit to txInit so that only one TransactionSelector::begin
- // is accepted.
- t.state = txInit
- sh := t.sh
- ts := &sppb.TransactionSelector{
- Selector: &sppb.TransactionSelector_Begin{
- Begin: &sppb.TransactionOptions{
- Mode: &sppb.TransactionOptions_ReadWrite_{
- ReadWrite: &sppb.TransactionOptions_ReadWrite{},
- },
- ExcludeTxnFromChangeStreams: t.txOpts.ExcludeTxnFromChangeStreams,
- },
- },
- }
- t.mu.Unlock()
- return sh, ts, nil
- case txInit:
- if t.tx == nil {
- // Wait for a transaction ID to become ready.
- txReadyOrClosed := t.txReadyOrClosed
- t.mu.Unlock()
- select {
- case <-txReadyOrClosed:
- // Need to check transaction state again.
- continue
- case <-ctx.Done():
- // The waiting for initialization is timeout, return error
- // directly.
- return nil, nil, errTxInitTimeout()
- }
- }
- t.mu.Unlock()
- // If first statement with TransactionSelector::begin succeeded, t.state should have been changed to
- // txActive, so we can just continue here.
- continue
- case txActive:
- sh := t.sh
- ts := &sppb.TransactionSelector{
- Selector: &sppb.TransactionSelector_Id{
- Id: t.tx,
- },
- }
- t.mu.Unlock()
- return sh, ts, nil
- default:
- state := t.state
- t.mu.Unlock()
- return nil, nil, errUnexpectedTxState(state)
- }
- }
-}
-
-func (t *ReadWriteTransaction) getTransactionSelector() *sppb.TransactionSelector {
- t.mu.Lock()
- defer t.mu.Unlock()
- if t.state == txActive {
- return &sppb.TransactionSelector{
- Selector: &sppb.TransactionSelector_Id{
- Id: t.tx,
- },
- }
- }
- return &sppb.TransactionSelector{
- Selector: &sppb.TransactionSelector_Begin{
- Begin: &sppb.TransactionOptions{
- Mode: &sppb.TransactionOptions_ReadWrite_{
- ReadWrite: &sppb.TransactionOptions_ReadWrite{
- ReadLockMode: t.txOpts.ReadLockMode,
- },
- },
- ExcludeTxnFromChangeStreams: t.txOpts.ExcludeTxnFromChangeStreams,
- },
- },
- }
-}
-
-func (t *ReadWriteTransaction) setTransactionID(tx transactionID) {
- t.mu.Lock()
- defer t.mu.Unlock()
- // When inline begin transaction fails close the transaction to retry with explicit begin transaction
- if tx == nil {
- t.state = txClosed
- // unblock other waiting operations to abort and retry with explicit begin transaction.
- close(t.txReadyOrClosed)
- t.txReadyOrClosed = make(chan struct{})
- return
- }
- t.tx = tx
- t.state = txActive
- close(t.txReadyOrClosed)
- t.txReadyOrClosed = make(chan struct{})
-}
-
-// release implements txReadEnv.release.
-func (t *ReadWriteTransaction) release(err error) {
- t.mu.Lock()
- sh := t.sh
- state := t.state
- t.mu.Unlock()
- if sh != nil && isSessionNotFoundError(err) {
- sh.destroy()
- }
- // if transaction is released during initialization then do explicit begin transaction
- if state == txInit {
- t.setTransactionID(nil)
- }
-}
-
-func (t *ReadWriteTransaction) setSessionEligibilityForLongRunning(sh *sessionHandle) {
- if t != nil && sh != nil {
- sh.mu.Lock()
- t.mu.Lock()
- sh.eligibleForLongRunning = t.isLongRunningTransaction
- t.mu.Unlock()
- sh.mu.Unlock()
- }
-}
-
-func beginTransaction(ctx context.Context, sid string, client *vkit.Client, opts TransactionOptions) (transactionID, error) {
- res, err := client.BeginTransaction(ctx, &sppb.BeginTransactionRequest{
- Session: sid,
- Options: &sppb.TransactionOptions{
- Mode: &sppb.TransactionOptions_ReadWrite_{
- ReadWrite: &sppb.TransactionOptions_ReadWrite{
- ReadLockMode: opts.ReadLockMode,
- },
- },
- ExcludeTxnFromChangeStreams: opts.ExcludeTxnFromChangeStreams,
- },
- })
- if err != nil {
- return nil, err
- }
- if res.Id == nil {
- return nil, spannerErrorf(codes.Unknown, "BeginTransaction returned a transaction with a nil ID.")
- }
- return res.Id, nil
-}
-
-// shouldExplicitBegin checks if ReadWriteTransaction should do an explicit BeginTransaction
-func (t *ReadWriteTransaction) shouldExplicitBegin(attempt int) bool {
- // don't begin during the first attempt
- if attempt == 0 {
- return false
- }
- t.mu.Lock()
- defer t.mu.Unlock()
- // don't begin if transactionId is already set
- if t == nil || t.tx != nil || t.state == txNew {
- return false
- }
- return true
-}
-
-// begin starts a read-write transaction on Cloud Spanner.
-func (t *ReadWriteTransaction) begin(ctx context.Context) error {
- t.mu.Lock()
- if t.tx != nil {
- t.state = txActive
- return nil
- }
- sh := t.sh
- t.mu.Unlock()
-
- var (
- tx transactionID
- err error
- )
- defer func() {
- if err != nil && sh != nil {
- // Got a valid session handle, but failed to initialize transaction=
- // on Cloud Spanner.
- if isSessionNotFoundError(err) {
- sh.destroy()
- }
- // If sh.destroy was already executed, this becomes a noop.
- sh.recycle()
- }
- }()
- // Retry the BeginTransaction call if a 'Session not found' is returned.
- for {
- if sh != nil {
- sh.updateLastUseTime()
- }
- tx, err = beginTransaction(contextWithOutgoingMetadata(ctx, sh.getMetadata(), t.disableRouteToLeader), sh.getID(), sh.getClient(), t.txOpts)
- if isSessionNotFoundError(err) {
- sh.destroy()
- sh, err = t.sp.take(ctx)
- if err != nil {
- return err
- }
- // Some operations (for ex BatchUpdate) can be long-running. For such operations set the isLongRunningTransaction flag to be true
- t.setSessionEligibilityForLongRunning(sh)
- continue
- } else {
- err = ToSpannerError(err)
- }
- break
- }
- if err == nil {
- t.mu.Lock()
- t.tx = tx
- t.sh = sh
- // Transition state to txActive.
- t.state = txActive
- t.mu.Unlock()
- }
- return err
-}
-
-// CommitResponse provides a response of a transaction commit in a database.
-type CommitResponse struct {
- // CommitTs is the commit time for a transaction.
- CommitTs time.Time
- // CommitStats is the commit statistics for a transaction.
- CommitStats *sppb.CommitResponse_CommitStats
-}
-
-// CommitOptions provides options for committing a transaction in a database.
-type CommitOptions struct {
- ReturnCommitStats bool
- MaxCommitDelay *time.Duration
-}
-
-// merge combines two CommitOptions that the input parameter will have higher
-// order of precedence.
-func (co CommitOptions) merge(opts CommitOptions) CommitOptions {
- var newOpts CommitOptions
- newOpts = CommitOptions{
- ReturnCommitStats: co.ReturnCommitStats || opts.ReturnCommitStats,
- MaxCommitDelay: opts.MaxCommitDelay,
- }
-
- if newOpts.MaxCommitDelay == nil {
- newOpts.MaxCommitDelay = co.MaxCommitDelay
- }
- return newOpts
-}
-
-// commit tries to commit a readwrite transaction to Cloud Spanner. It also
-// returns the commit response for the transactions.
-func (t *ReadWriteTransaction) commit(ctx context.Context, options CommitOptions) (CommitResponse, error) {
- resp := CommitResponse{}
- t.mu.Lock()
- if t.tx == nil {
- if t.state == txClosed {
- // inline begin transaction failed
- t.mu.Unlock()
- return resp, errInlineBeginTransactionFailed()
- }
- t.mu.Unlock()
- // mutations or empty transaction body only
- if err := t.begin(ctx); err != nil {
- return resp, err
- }
- t.mu.Lock()
- }
- t.state = txClosed // No further operations after commit.
- close(t.txReadyOrClosed)
- mPb, err := mutationsProto(t.wb)
-
- t.mu.Unlock()
- if err != nil {
- return resp, err
- }
-
- // In case that sessionHandle was destroyed but transaction body fails to
- // report it.
- sid, client := t.sh.getID(), t.sh.getClient()
- if sid == "" || client == nil {
- return resp, errSessionClosed(t.sh)
- }
- t.sh.updateLastUseTime()
-
- var md metadata.MD
- var maxCommitDelay *durationpb.Duration
- if options.MaxCommitDelay != nil {
- maxCommitDelay = durationpb.New(*(options.MaxCommitDelay))
- }
- res, e := client.Commit(contextWithOutgoingMetadata(ctx, t.sh.getMetadata(), t.disableRouteToLeader), &sppb.CommitRequest{
- Session: sid,
- Transaction: &sppb.CommitRequest_TransactionId{
- TransactionId: t.tx,
- },
- RequestOptions: createRequestOptions(t.txOpts.CommitPriority, "", t.txOpts.TransactionTag),
- Mutations: mPb,
- ReturnCommitStats: options.ReturnCommitStats,
- MaxCommitDelay: maxCommitDelay,
- }, gax.WithGRPCOptions(grpc.Header(&md)))
- if getGFELatencyMetricsFlag() && md != nil && t.ct != nil {
- if err := createContextAndCaptureGFELatencyMetrics(ctx, t.ct, md, "commit"); err != nil {
- trace.TracePrintf(ctx, nil, "Error in recording GFE Latency. Try disabling and rerunning. Error: %v", err)
- }
- }
- if metricErr := recordGFELatencyMetricsOT(ctx, md, "commit", t.otConfig); metricErr != nil {
- trace.TracePrintf(ctx, nil, "Error in recording GFE Latency through OpenTelemetry. Error: %v", metricErr)
- }
- if e != nil {
- return resp, toSpannerErrorWithCommitInfo(e, true)
- }
- if tstamp := res.GetCommitTimestamp(); tstamp != nil {
- resp.CommitTs = time.Unix(tstamp.Seconds, int64(tstamp.Nanos))
- }
- if options.ReturnCommitStats {
- resp.CommitStats = res.CommitStats
- }
- if isSessionNotFoundError(err) {
- t.sh.destroy()
- }
- return resp, err
-}
-
-// rollback is called when a commit is aborted or the transaction body runs
-// into error.
-func (t *ReadWriteTransaction) rollback(ctx context.Context) {
- t.mu.Lock()
- // Forbid further operations on rollbacked transaction.
- t.state = txClosed
- if t.tx == nil {
- t.mu.Unlock()
- return
- }
- t.mu.Unlock()
- // In case that sessionHandle was destroyed but transaction body fails to
- // report it.
- sid, client := t.sh.getID(), t.sh.getClient()
- if sid == "" || client == nil {
- return
- }
- t.sh.updateLastUseTime()
- err := client.Rollback(contextWithOutgoingMetadata(ctx, t.sh.getMetadata(), t.disableRouteToLeader), &sppb.RollbackRequest{
- Session: sid,
- TransactionId: t.tx,
- })
- if isSessionNotFoundError(err) {
- t.sh.destroy()
- }
-}
-
-// runInTransaction executes f under a read-write transaction context.
-func (t *ReadWriteTransaction) runInTransaction(ctx context.Context, f func(context.Context, *ReadWriteTransaction) error) (CommitResponse, error) {
- var (
- resp CommitResponse
- err error
- errDuringCommit bool
- )
- if err = f(context.WithValue(ctx, transactionInProgressKey{}, 1), t); err == nil {
- // Try to commit if transaction body returns no error.
- resp, err = t.commit(ctx, t.txOpts.CommitOptions)
- errDuringCommit = err != nil
- }
- if err != nil {
- if isAbortedErr(err) {
- // Retry the transaction using the same session on ABORT error.
- // Cloud Spanner will create the new transaction with the previous
- // one's wound-wait priority.
- return resp, err
- }
- if isSessionNotFoundError(err) {
- t.sh.destroy()
- return resp, err
- }
- if isFailedInlineBeginTransaction(err) {
- return resp, err
- }
-
- // Rollback the transaction unless the error occurred during the
- // commit. Executing a rollback after a commit has failed will
- // otherwise cause an error. Note that transient errors, such as
- // UNAVAILABLE, are already handled in the gRPC layer and do not show
- // up here. Context errors (deadline exceeded / canceled) during
- // commits are also not rolled back.
- if !errDuringCommit {
- t.rollback(ctx)
- }
- return resp, err
- }
- // err == nil, return commit response.
- return resp, nil
-}
-
-// ReadWriteStmtBasedTransaction provides a wrapper of ReadWriteTransaction in
-// order to run a read-write transaction in a statement-based way.
-//
-// This struct is returned by NewReadWriteStmtBasedTransaction and contains
-// Commit() and Rollback() methods to end a transaction.
-type ReadWriteStmtBasedTransaction struct {
- // ReadWriteTransaction contains methods for performing transactional reads.
- ReadWriteTransaction
-
- options TransactionOptions
-}
-
-// NewReadWriteStmtBasedTransaction starts a read-write transaction. Commit() or
-// Rollback() must be called to end a transaction. If Commit() or Rollback() is
-// not called, the session that is used by the transaction will not be returned
-// to the pool and cause a session leak.
-//
-// This method should only be used when manual error handling and retry
-// management is needed. Cloud Spanner may abort a read/write transaction at any
-// moment, and each statement that is executed on the transaction should be
-// checked for an Aborted error, including queries and read operations.
-//
-// For most use cases, client.ReadWriteTransaction should be used, as it will
-// handle all Aborted and 'Session not found' errors automatically.
-func NewReadWriteStmtBasedTransaction(ctx context.Context, c *Client) (*ReadWriteStmtBasedTransaction, error) {
- return NewReadWriteStmtBasedTransactionWithOptions(ctx, c, TransactionOptions{})
-}
-
-// NewReadWriteStmtBasedTransactionWithOptions starts a read-write transaction
-// with configurable options. Commit() or Rollback() must be called to end a
-// transaction. If Commit() or Rollback() is not called, the session that is
-// used by the transaction will not be returned to the pool and cause a session
-// leak.
-//
-// NewReadWriteStmtBasedTransactionWithOptions is a configurable version of
-// NewReadWriteStmtBasedTransaction.
-func NewReadWriteStmtBasedTransactionWithOptions(ctx context.Context, c *Client, options TransactionOptions) (*ReadWriteStmtBasedTransaction, error) {
- var (
- sh *sessionHandle
- err error
- t *ReadWriteStmtBasedTransaction
- )
- sh, err = c.idleSessions.take(ctx)
- if err != nil {
- // If session retrieval fails, just fail the transaction.
- return nil, err
- }
- t = &ReadWriteStmtBasedTransaction{
- ReadWriteTransaction: ReadWriteTransaction{
- txReadyOrClosed: make(chan struct{}),
- },
- }
- t.txReadOnly.sp = c.idleSessions
- t.txReadOnly.sh = sh
- t.txReadOnly.txReadEnv = t
- t.txReadOnly.qo = c.qo
- t.txReadOnly.ro = c.ro
- t.txReadOnly.disableRouteToLeader = c.disableRouteToLeader
- t.txOpts = c.txo.merge(options)
- t.ct = c.ct
- t.otConfig = c.otConfig
-
- // always explicit begin the transactions
- if err = t.begin(ctx); err != nil {
- if sh != nil {
- sh.recycle()
- }
- return nil, err
- }
- return t, err
-}
-
-// Commit tries to commit a readwrite transaction to Cloud Spanner. It also
-// returns the commit timestamp for the transactions.
-func (t *ReadWriteStmtBasedTransaction) Commit(ctx context.Context) (time.Time, error) {
- resp, err := t.CommitWithReturnResp(ctx)
- return resp.CommitTs, err
-}
-
-// CommitWithReturnResp tries to commit a readwrite transaction. It also returns
-// the commit timestamp and stats for the transactions.
-func (t *ReadWriteStmtBasedTransaction) CommitWithReturnResp(ctx context.Context) (CommitResponse, error) {
- resp, err := t.commit(ctx, t.txOpts.CommitOptions)
- // Rolling back an aborted transaction is not necessary.
- if err != nil && status.Code(err) != codes.Aborted {
- t.rollback(ctx)
- }
- if t.sh != nil {
- t.sh.recycle()
- }
- return resp, err
-}
-
-// Rollback is called to cancel the ongoing transaction that has not been
-// committed yet.
-func (t *ReadWriteStmtBasedTransaction) Rollback(ctx context.Context) {
- t.rollback(ctx)
- if t.sh != nil {
- t.sh.recycle()
- }
-}
-
-// writeOnlyTransaction provides the most efficient way of doing write-only
-// transactions. It essentially does blind writes to Cloud Spanner.
-type writeOnlyTransaction struct {
- // sp is the session pool which writeOnlyTransaction uses to get Cloud
- // Spanner sessions for blind writes.
- sp *sessionPool
- // transactionTag is the tag that will be included with the CommitRequest
- // of the write-only transaction.
- transactionTag string
- // commitPriority is the RPC priority to use for the commit operation.
- commitPriority sppb.RequestOptions_Priority
- // disableRouteToLeader specifies if we want to disable RW/PDML requests to be routed to leader.
- disableRouteToLeader bool
- // ExcludeTxnFromChangeStreams controls whether to exclude recording modifications in
- // current transaction from the allowed tracking change streams with DDL option
- // allow_txn_exclusion=true.
- excludeTxnFromChangeStreams bool
- // commitOptions are applied to the Commit request for the writeOnlyTransaction..
- commitOptions CommitOptions
-}
-
-// applyAtLeastOnce commits a list of mutations to Cloud Spanner at least once,
-// unless one of the following happens:
-//
-// 1. Context times out.
-// 2. An unretryable error (e.g. database not found) occurs.
-// 3. There is a malformed Mutation object.
-func (t *writeOnlyTransaction) applyAtLeastOnce(ctx context.Context, ms ...*Mutation) (time.Time, error) {
- var (
- ts time.Time
- sh *sessionHandle
- )
- defer func() {
- if sh != nil {
- sh.recycle()
- }
- }()
- mPb, err := mutationsProto(ms)
- if err != nil {
- // Malformed mutation found, just return the error.
- return ts, err
- }
-
- var maxCommitDelay *durationpb.Duration
- if t.commitOptions.MaxCommitDelay != nil {
- maxCommitDelay = durationpb.New(*(t.commitOptions.MaxCommitDelay))
- }
-
- // Make a retryer for Aborted and certain Internal errors.
- retryer := onCodes(DefaultRetryBackoff, codes.Aborted, codes.Internal)
- // Apply the mutation and retry if the commit is aborted.
- applyMutationWithRetry := func(ctx context.Context) error {
- for {
- if sh == nil || sh.getID() == "" || sh.getClient() == nil {
- // No usable session for doing the commit, take one from pool.
- sh, err = t.sp.takeMultiplexed(ctx)
- if err != nil {
- // sessionPool.Take already retries for session
- // creations/retrivals.
- return ToSpannerError(err)
- }
- }
- sh.updateLastUseTime()
- res, err := sh.getClient().Commit(contextWithOutgoingMetadata(ctx, sh.getMetadata(), t.disableRouteToLeader), &sppb.CommitRequest{
- Session: sh.getID(),
- Transaction: &sppb.CommitRequest_SingleUseTransaction{
- SingleUseTransaction: &sppb.TransactionOptions{
- Mode: &sppb.TransactionOptions_ReadWrite_{
- ReadWrite: &sppb.TransactionOptions_ReadWrite{},
- },
- ExcludeTxnFromChangeStreams: t.excludeTxnFromChangeStreams,
- },
- },
- Mutations: mPb,
- RequestOptions: createRequestOptions(t.commitPriority, "", t.transactionTag),
- MaxCommitDelay: maxCommitDelay,
- })
- if err != nil && !isAbortedErr(err) {
- // should not be the case with multiplexed sessions
- if isSessionNotFoundError(err) {
- // Discard the bad session.
- sh.destroy()
- }
- return toSpannerErrorWithCommitInfo(err, true)
- } else if err == nil {
- if tstamp := res.GetCommitTimestamp(); tstamp != nil {
- ts = time.Unix(tstamp.Seconds, int64(tstamp.Nanos))
- }
- }
- delay, shouldRetry := retryer.Retry(err)
- if !shouldRetry {
- return err
- }
- if err := gax.Sleep(ctx, delay); err != nil {
- return err
- }
- }
- }
- return ts, applyMutationWithRetry(ctx)
-}
-
-// isAbortedErr returns true if the error indicates that an gRPC call is
-// aborted on the server side.
-func isAbortedErr(err error) bool {
- if err == nil {
- return false
- }
- if ErrCode(err) == codes.Aborted {
- return true
- }
- return false
-}
diff --git a/vendor/cloud.google.com/go/spanner/value.go b/vendor/cloud.google.com/go/spanner/value.go
deleted file mode 100644
index b25867874..000000000
--- a/vendor/cloud.google.com/go/spanner/value.go
+++ /dev/null
@@ -1,4934 +0,0 @@
-/*
-Copyright 2017 Google LLC
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-*/
-
-package spanner
-
-import (
- "bytes"
- "database/sql"
- "database/sql/driver"
- "encoding/base64"
- "encoding/json"
- "fmt"
- "math"
- "math/big"
- "reflect"
- "strconv"
- "strings"
- "time"
-
- "cloud.google.com/go/civil"
- "cloud.google.com/go/internal/fields"
- sppb "cloud.google.com/go/spanner/apiv1/spannerpb"
- "google.golang.org/grpc/codes"
- "google.golang.org/protobuf/proto"
- "google.golang.org/protobuf/protoadapt"
- "google.golang.org/protobuf/reflect/protoreflect"
- proto3 "google.golang.org/protobuf/types/known/structpb"
-)
-
-const (
- // nullString is returned by the String methods of NullableValues when the
- // underlying database value is null.
- nullString = "<null>"
- commitTimestampPlaceholderString = "spanner.commit_timestamp()"
-
- // NumericPrecisionDigits is the maximum number of digits in a NUMERIC
- // value.
- NumericPrecisionDigits = 38
-
- // NumericScaleDigits is the maximum number of digits after the decimal
- // point in a NUMERIC value.
- NumericScaleDigits = 9
-)
-
-// LossOfPrecisionHandlingOption describes the option to deal with loss of
-// precision on numeric values.
-type LossOfPrecisionHandlingOption int
-
-const (
- // NumericRound automatically rounds a numeric value that has a higher
- // precision than what is supported by Spanner, e.g., 0.1234567895 rounds
- // to 0.123456790.
- NumericRound LossOfPrecisionHandlingOption = iota
- // NumericError returns an error for numeric values that have a higher
- // precision than what is supported by Spanner. E.g. the client returns an
- // error if the application tries to insert the value 0.1234567895.
- NumericError
-)
-
-// LossOfPrecisionHandling configures how to deal with loss of precision on
-// numeric values. The value of this configuration is global and will be used
-// for all Spanner clients.
-var LossOfPrecisionHandling LossOfPrecisionHandlingOption
-
-// NumericString returns a string representing a *big.Rat in a format compatible
-// with Spanner SQL. It returns a floating-point literal with 9 digits after the
-// decimal point.
-func NumericString(r *big.Rat) string {
- return r.FloatString(NumericScaleDigits)
-}
-
-// validateNumeric returns nil if there are no errors. It will return an error
-// when the numeric number is not valid.
-func validateNumeric(r *big.Rat) error {
- if r == nil {
- return nil
- }
- // Add one more digit to the scale component to find out if there are more
- // digits than required.
- strRep := r.FloatString(NumericScaleDigits + 1)
- strRep = strings.TrimRight(strRep, "0")
- strRep = strings.TrimLeft(strRep, "-")
- s := strings.Split(strRep, ".")
- whole := s[0]
- scale := s[1]
- if len(scale) > NumericScaleDigits {
- return fmt.Errorf("max scale for a numeric is %d. The requested numeric has more", NumericScaleDigits)
- }
- if len(whole) > NumericPrecisionDigits-NumericScaleDigits {
- return fmt.Errorf("max precision for the whole component of a numeric is %d. The requested numeric has a whole component with precision %d", NumericPrecisionDigits-NumericScaleDigits, len(whole))
- }
- return nil
-}
-
-var (
- // CommitTimestamp is a special value used to tell Cloud Spanner to insert
- // the commit timestamp of the transaction into a column. It can be used in
- // a Mutation, or directly used in InsertStruct or InsertMap. See
- // ExampleCommitTimestamp. This is just a placeholder and the actual value
- // stored in this variable has no meaning.
- CommitTimestamp = commitTimestamp
- commitTimestamp = time.Unix(0, 0).In(time.FixedZone("CommitTimestamp placeholder", 0xDB))
-
- jsonNullBytes = []byte("null")
-
- jsonUseNumber bool
-
- protoMsgReflectType = reflect.TypeOf((*proto.Message)(nil)).Elem()
- protoEnumReflectType = reflect.TypeOf((*protoreflect.Enum)(nil)).Elem()
-)
-
-// UseNumberWithJSONDecoderEncoder specifies whether Cloud Spanner JSON numbers are decoded
-// as Number (preserving precision) or float64 (risking loss).
-// Defaults to the same behavior as the standard Go library, which means decoding to float64.
-// Call this method to enable lossless precision.
-// NOTE 1: Calling this method affects the behavior of all clients created by this library, both existing and future instances.
-// NOTE 2: This method sets a global variable that is used by the client to encode/decode JSON numbers. Access to the global variable is not synchronized. You should only call this method when there are no goroutines encoding/decoding Cloud Spanner JSON values. It is recommended to only call this method during the initialization of your application, and preferably before you create any Cloud Spanner clients, and/or in tests when there are no queries being executed.
-func UseNumberWithJSONDecoderEncoder(useNumber bool) {
- jsonUseNumber = useNumber
-}
-
-func jsonUnmarshal(data []byte, v any) error {
- dec := json.NewDecoder(bytes.NewReader(data))
- if jsonUseNumber {
- dec.UseNumber()
- }
- return dec.Decode(v)
-}
-
-// Encoder is the interface implemented by a custom type that can be encoded to
-// a supported type by Spanner. A code example:
-//
-// type customField struct {
-// Prefix string
-// Suffix string
-// }
-//
-// // Convert a customField value to a string
-// func (cf customField) EncodeSpanner() (interface{}, error) {
-// var b bytes.Buffer
-// b.WriteString(cf.Prefix)
-// b.WriteString("-")
-// b.WriteString(cf.Suffix)
-// return b.String(), nil
-// }
-type Encoder interface {
- EncodeSpanner() (interface{}, error)
-}
-
-// Decoder is the interface implemented by a custom type that can be decoded
-// from a supported type by Spanner. A code example:
-//
-// type customField struct {
-// Prefix string
-// Suffix string
-// }
-//
-// // Convert a string to a customField value
-// func (cf *customField) DecodeSpanner(val interface{}) (err error) {
-// strVal, ok := val.(string)
-// if !ok {
-// return fmt.Errorf("failed to decode customField: %v", val)
-// }
-// s := strings.Split(strVal, "-")
-// if len(s) > 1 {
-// cf.Prefix = s[0]
-// cf.Suffix = s[1]
-// }
-// return nil
-// }
-type Decoder interface {
- DecodeSpanner(input interface{}) error
-}
-
-// NullableValue is the interface implemented by all null value wrapper types.
-type NullableValue interface {
- // IsNull returns true if the underlying database value is null.
- IsNull() bool
-}
-
-// NullInt64 represents a Cloud Spanner INT64 that may be NULL.
-type NullInt64 struct {
- Int64 int64 // Int64 contains the value when it is non-NULL, and zero when NULL.
- Valid bool // Valid is true if Int64 is not NULL.
-}
-
-// IsNull implements NullableValue.IsNull for NullInt64.
-func (n NullInt64) IsNull() bool {
- return !n.Valid
-}
-
-// String implements Stringer.String for NullInt64
-func (n NullInt64) String() string {
- if !n.Valid {
- return nullString
- }
- return fmt.Sprintf("%v", n.Int64)
-}
-
-// MarshalJSON implements json.Marshaler.MarshalJSON for NullInt64.
-func (n NullInt64) MarshalJSON() ([]byte, error) {
- return nulljson(n.Valid, n.Int64)
-}
-
-// UnmarshalJSON implements json.Unmarshaler.UnmarshalJSON for NullInt64.
-func (n *NullInt64) UnmarshalJSON(payload []byte) error {
- if payload == nil {
- return fmt.Errorf("payload should not be nil")
- }
- if bytes.Equal(payload, jsonNullBytes) {
- n.Int64 = int64(0)
- n.Valid = false
- return nil
- }
- num, err := strconv.ParseInt(string(payload), 10, 64)
- if err != nil {
- return fmt.Errorf("payload cannot be converted to int64: got %v", string(payload))
- }
- n.Int64 = num
- n.Valid = true
- return nil
-}
-
-// Value implements the driver.Valuer interface.
-func (n NullInt64) Value() (driver.Value, error) {
- if n.IsNull() {
- return nil, nil
- }
- return n.Int64, nil
-}
-
-// Scan implements the sql.Scanner interface.
-func (n *NullInt64) Scan(value interface{}) error {
- if value == nil {
- n.Int64, n.Valid = 0, false
- return nil
- }
- n.Valid = true
- switch p := value.(type) {
- default:
- return spannerErrorf(codes.InvalidArgument, "invalid type for NullInt64: %v", p)
- case *int64:
- n.Int64 = *p
- case int64:
- n.Int64 = p
- case *NullInt64:
- n.Int64 = p.Int64
- n.Valid = p.Valid
- case NullInt64:
- n.Int64 = p.Int64
- n.Valid = p.Valid
- }
- return nil
-}
-
-// GormDataType is used by gorm to determine the default data type for fields with this type.
-func (n NullInt64) GormDataType() string {
- return "INT64"
-}
-
-// NullString represents a Cloud Spanner STRING that may be NULL.
-type NullString struct {
- StringVal string // StringVal contains the value when it is non-NULL, and an empty string when NULL.
- Valid bool // Valid is true if StringVal is not NULL.
-}
-
-// IsNull implements NullableValue.IsNull for NullString.
-func (n NullString) IsNull() bool {
- return !n.Valid
-}
-
-// String implements Stringer.String for NullString
-func (n NullString) String() string {
- if !n.Valid {
- return nullString
- }
- return n.StringVal
-}
-
-// MarshalJSON implements json.Marshaler.MarshalJSON for NullString.
-func (n NullString) MarshalJSON() ([]byte, error) {
- return nulljson(n.Valid, n.StringVal)
-}
-
-// UnmarshalJSON implements json.Unmarshaler.UnmarshalJSON for NullString.
-func (n *NullString) UnmarshalJSON(payload []byte) error {
- if payload == nil {
- return fmt.Errorf("payload should not be nil")
- }
- if bytes.Equal(payload, jsonNullBytes) {
- n.StringVal = ""
- n.Valid = false
- return nil
- }
- var s *string
- if err := jsonUnmarshal(payload, &s); err != nil {
- return err
- }
- if s != nil {
- n.StringVal = *s
- n.Valid = true
- } else {
- n.StringVal = ""
- n.Valid = false
- }
- return nil
-}
-
-// Value implements the driver.Valuer interface.
-func (n NullString) Value() (driver.Value, error) {
- if n.IsNull() {
- return nil, nil
- }
- return n.StringVal, nil
-}
-
-// Scan implements the sql.Scanner interface.
-func (n *NullString) Scan(value interface{}) error {
- if value == nil {
- n.StringVal, n.Valid = "", false
- return nil
- }
- n.Valid = true
- switch p := value.(type) {
- default:
- return spannerErrorf(codes.InvalidArgument, "invalid type for NullString: %v", p)
- case *string:
- n.StringVal = *p
- case string:
- n.StringVal = p
- case *NullString:
- n.StringVal = p.StringVal
- n.Valid = p.Valid
- case NullString:
- n.StringVal = p.StringVal
- n.Valid = p.Valid
- }
- return nil
-}
-
-// GormDataType is used by gorm to determine the default data type for fields with this type.
-func (n NullString) GormDataType() string {
- return "STRING(MAX)"
-}
-
-// NullFloat64 represents a Cloud Spanner FLOAT64 that may be NULL.
-type NullFloat64 struct {
- Float64 float64 // Float64 contains the value when it is non-NULL, and zero when NULL.
- Valid bool // Valid is true if Float64 is not NULL.
-}
-
-// IsNull implements NullableValue.IsNull for NullFloat64.
-func (n NullFloat64) IsNull() bool {
- return !n.Valid
-}
-
-// String implements Stringer.String for NullFloat64
-func (n NullFloat64) String() string {
- if !n.Valid {
- return nullString
- }
- return fmt.Sprintf("%v", n.Float64)
-}
-
-// MarshalJSON implements json.Marshaler.MarshalJSON for NullFloat64.
-func (n NullFloat64) MarshalJSON() ([]byte, error) {
- return nulljson(n.Valid, n.Float64)
-}
-
-// UnmarshalJSON implements json.Unmarshaler.UnmarshalJSON for NullFloat64.
-func (n *NullFloat64) UnmarshalJSON(payload []byte) error {
- if payload == nil {
- return fmt.Errorf("payload should not be nil")
- }
- if bytes.Equal(payload, jsonNullBytes) {
- n.Float64 = float64(0)
- n.Valid = false
- return nil
- }
- num, err := strconv.ParseFloat(string(payload), 64)
- if err != nil {
- return fmt.Errorf("payload cannot be converted to float64: got %v", string(payload))
- }
- n.Float64 = num
- n.Valid = true
- return nil
-}
-
-// Value implements the driver.Valuer interface.
-func (n NullFloat64) Value() (driver.Value, error) {
- if n.IsNull() {
- return nil, nil
- }
- return n.Float64, nil
-}
-
-// Scan implements the sql.Scanner interface.
-func (n *NullFloat64) Scan(value interface{}) error {
- if value == nil {
- n.Float64, n.Valid = 0, false
- return nil
- }
- n.Valid = true
- switch p := value.(type) {
- default:
- return spannerErrorf(codes.InvalidArgument, "invalid type for NullFloat64: %v", p)
- case *float64:
- n.Float64 = *p
- case float64:
- n.Float64 = p
- case *NullFloat64:
- n.Float64 = p.Float64
- n.Valid = p.Valid
- case NullFloat64:
- n.Float64 = p.Float64
- n.Valid = p.Valid
- }
- return nil
-}
-
-// GormDataType is used by gorm to determine the default data type for fields with this type.
-func (n NullFloat64) GormDataType() string {
- return "FLOAT64"
-}
-
-// NullFloat32 represents a Cloud Spanner FLOAT32 that may be NULL.
-type NullFloat32 struct {
- Float32 float32 // Float32 contains the value when it is non-NULL, and zero when NULL.
- Valid bool // Valid is true if FLOAT32 is not NULL.
-}
-
-// IsNull implements NullableValue.IsNull for NullFloat32.
-func (n NullFloat32) IsNull() bool {
- return !n.Valid
-}
-
-// String implements Stringer.String for NullFloat32
-func (n NullFloat32) String() string {
- if !n.Valid {
- return nullString
- }
- return fmt.Sprintf("%v", n.Float32)
-}
-
-// MarshalJSON implements json.Marshaler.MarshalJSON for NullFloat32.
-func (n NullFloat32) MarshalJSON() ([]byte, error) {
- return nulljson(n.Valid, n.Float32)
-}
-
-// UnmarshalJSON implements json.Unmarshaler.UnmarshalJSON for NullFloat32.
-func (n *NullFloat32) UnmarshalJSON(payload []byte) error {
- if payload == nil {
- return fmt.Errorf("payload should not be nil")
- }
- if bytes.Equal(payload, jsonNullBytes) {
- n.Float32 = float32(0)
- n.Valid = false
- return nil
- }
- num, err := strconv.ParseFloat(string(payload), 32)
- if err != nil {
- return fmt.Errorf("payload cannot be converted to float32: got %v", string(payload))
- }
- n.Float32 = float32(num)
- n.Valid = true
- return nil
-}
-
-// Value implements the driver.Valuer interface.
-func (n NullFloat32) Value() (driver.Value, error) {
- if n.IsNull() {
- return nil, nil
- }
- return n.Float32, nil
-}
-
-// Scan implements the sql.Scanner interface.
-func (n *NullFloat32) Scan(value interface{}) error {
- if value == nil {
- n.Float32, n.Valid = 0, false
- return nil
- }
- n.Valid = true
- switch p := value.(type) {
- default:
- return spannerErrorf(codes.InvalidArgument, "invalid type for NullFloat32: %v", p)
- case *float32:
- n.Float32 = *p
- case float32:
- n.Float32 = p
- case *NullFloat32:
- n.Float32 = p.Float32
- n.Valid = p.Valid
- case NullFloat32:
- n.Float32 = p.Float32
- n.Valid = p.Valid
- }
- return nil
-}
-
-// GormDataType is used by gorm to determine the default data type for fields with this type.
-func (n NullFloat32) GormDataType() string {
- return "FLOAT32"
-}
-
-// NullBool represents a Cloud Spanner BOOL that may be NULL.
-type NullBool struct {
- Bool bool // Bool contains the value when it is non-NULL, and false when NULL.
- Valid bool // Valid is true if Bool is not NULL.
-}
-
-// IsNull implements NullableValue.IsNull for NullBool.
-func (n NullBool) IsNull() bool {
- return !n.Valid
-}
-
-// String implements Stringer.String for NullBool
-func (n NullBool) String() string {
- if !n.Valid {
- return nullString
- }
- return fmt.Sprintf("%v", n.Bool)
-}
-
-// MarshalJSON implements json.Marshaler.MarshalJSON for NullBool.
-func (n NullBool) MarshalJSON() ([]byte, error) {
- return nulljson(n.Valid, n.Bool)
-}
-
-// UnmarshalJSON implements json.Unmarshaler.UnmarshalJSON for NullBool.
-func (n *NullBool) UnmarshalJSON(payload []byte) error {
- if payload == nil {
- return fmt.Errorf("payload should not be nil")
- }
- if bytes.Equal(payload, jsonNullBytes) {
- n.Bool = false
- n.Valid = false
- return nil
- }
- b, err := strconv.ParseBool(string(payload))
- if err != nil {
- return fmt.Errorf("payload cannot be converted to bool: got %v", string(payload))
- }
- n.Bool = b
- n.Valid = true
- return nil
-}
-
-// Value implements the driver.Valuer interface.
-func (n NullBool) Value() (driver.Value, error) {
- if n.IsNull() {
- return nil, nil
- }
- return n.Bool, nil
-}
-
-// Scan implements the sql.Scanner interface.
-func (n *NullBool) Scan(value interface{}) error {
- if value == nil {
- n.Bool, n.Valid = false, false
- return nil
- }
- n.Valid = true
- switch p := value.(type) {
- default:
- return spannerErrorf(codes.InvalidArgument, "invalid type for NullBool: %v", p)
- case *bool:
- n.Bool = *p
- case bool:
- n.Bool = p
- case *NullBool:
- n.Bool = p.Bool
- n.Valid = p.Valid
- case NullBool:
- n.Bool = p.Bool
- n.Valid = p.Valid
- }
- return nil
-}
-
-// GormDataType is used by gorm to determine the default data type for fields with this type.
-func (n NullBool) GormDataType() string {
- return "BOOL"
-}
-
-// NullTime represents a Cloud Spanner TIMESTAMP that may be null.
-type NullTime struct {
- Time time.Time // Time contains the value when it is non-NULL, and a zero time.Time when NULL.
- Valid bool // Valid is true if Time is not NULL.
-}
-
-// IsNull implements NullableValue.IsNull for NullTime.
-func (n NullTime) IsNull() bool {
- return !n.Valid
-}
-
-// String implements Stringer.String for NullTime
-func (n NullTime) String() string {
- if !n.Valid {
- return nullString
- }
- return n.Time.Format(time.RFC3339Nano)
-}
-
-// MarshalJSON implements json.Marshaler.MarshalJSON for NullTime.
-func (n NullTime) MarshalJSON() ([]byte, error) {
- return nulljson(n.Valid, n.Time)
-}
-
-// UnmarshalJSON implements json.Unmarshaler.UnmarshalJSON for NullTime.
-func (n *NullTime) UnmarshalJSON(payload []byte) error {
- if payload == nil {
- return fmt.Errorf("payload should not be nil")
- }
- if bytes.Equal(payload, jsonNullBytes) {
- n.Time = time.Time{}
- n.Valid = false
- return nil
- }
- payload, err := trimDoubleQuotes(payload)
- if err != nil {
- return err
- }
- s := string(payload)
- t, err := time.Parse(time.RFC3339Nano, s)
- if err != nil {
- return fmt.Errorf("payload cannot be converted to time.Time: got %v", string(payload))
- }
- n.Time = t
- n.Valid = true
- return nil
-}
-
-// Value implements the driver.Valuer interface.
-func (n NullTime) Value() (driver.Value, error) {
- if n.IsNull() {
- return nil, nil
- }
- return n.Time, nil
-}
-
-// Scan implements the sql.Scanner interface.
-func (n *NullTime) Scan(value interface{}) error {
- if value == nil {
- n.Time, n.Valid = time.Time{}, false
- return nil
- }
- n.Valid = true
- switch p := value.(type) {
- default:
- return spannerErrorf(codes.InvalidArgument, "invalid type for NullTime: %v", p)
- case *time.Time:
- n.Time = *p
- case time.Time:
- n.Time = p
- case *NullTime:
- n.Time = p.Time
- n.Valid = p.Valid
- case NullTime:
- n.Time = p.Time
- n.Valid = p.Valid
- }
- return nil
-}
-
-// GormDataType is used by gorm to determine the default data type for fields with this type.
-func (n NullTime) GormDataType() string {
- return "TIMESTAMP"
-}
-
-// NullDate represents a Cloud Spanner DATE that may be null.
-type NullDate struct {
- Date civil.Date // Date contains the value when it is non-NULL, and a zero civil.Date when NULL.
- Valid bool // Valid is true if Date is not NULL.
-}
-
-// IsNull implements NullableValue.IsNull for NullDate.
-func (n NullDate) IsNull() bool {
- return !n.Valid
-}
-
-// String implements Stringer.String for NullDate
-func (n NullDate) String() string {
- if !n.Valid {
- return nullString
- }
- return n.Date.String()
-}
-
-// MarshalJSON implements json.Marshaler.MarshalJSON for NullDate.
-func (n NullDate) MarshalJSON() ([]byte, error) {
- return nulljson(n.Valid, n.Date)
-}
-
-// UnmarshalJSON implements json.Unmarshaler.UnmarshalJSON for NullDate.
-func (n *NullDate) UnmarshalJSON(payload []byte) error {
- if payload == nil {
- return fmt.Errorf("payload should not be nil")
- }
- if bytes.Equal(payload, jsonNullBytes) {
- n.Date = civil.Date{}
- n.Valid = false
- return nil
- }
- payload, err := trimDoubleQuotes(payload)
- if err != nil {
- return err
- }
- s := string(payload)
- t, err := civil.ParseDate(s)
- if err != nil {
- return fmt.Errorf("payload cannot be converted to civil.Date: got %v", string(payload))
- }
- n.Date = t
- n.Valid = true
- return nil
-}
-
-// Value implements the driver.Valuer interface.
-func (n NullDate) Value() (driver.Value, error) {
- if n.IsNull() {
- return nil, nil
- }
- return n.Date, nil
-}
-
-// Scan implements the sql.Scanner interface.
-func (n *NullDate) Scan(value interface{}) error {
- if value == nil {
- n.Date, n.Valid = civil.Date{}, false
- return nil
- }
- n.Valid = true
- switch p := value.(type) {
- default:
- return spannerErrorf(codes.InvalidArgument, "invalid type for NullDate: %v", p)
- case *civil.Date:
- n.Date = *p
- case civil.Date:
- n.Date = p
- case *NullDate:
- n.Date = p.Date
- n.Valid = p.Valid
- case NullDate:
- n.Date = p.Date
- n.Valid = p.Valid
- }
- return nil
-}
-
-// GormDataType is used by gorm to determine the default data type for fields with this type.
-func (n NullDate) GormDataType() string {
- return "DATE"
-}
-
-// NullNumeric represents a Cloud Spanner Numeric that may be NULL.
-type NullNumeric struct {
- Numeric big.Rat // Numeric contains the value when it is non-NULL, and a zero big.Rat when NULL.
- Valid bool // Valid is true if Numeric is not NULL.
-}
-
-// IsNull implements NullableValue.IsNull for NullNumeric.
-func (n NullNumeric) IsNull() bool {
- return !n.Valid
-}
-
-// String implements Stringer.String for NullNumeric
-func (n NullNumeric) String() string {
- if !n.Valid {
- return nullString
- }
- return fmt.Sprintf("%v", NumericString(&n.Numeric))
-}
-
-// MarshalJSON implements json.Marshaler.MarshalJSON for NullNumeric.
-func (n NullNumeric) MarshalJSON() ([]byte, error) {
- return nulljson(n.Valid, NumericString(&n.Numeric))
-}
-
-// UnmarshalJSON implements json.Unmarshaler.UnmarshalJSON for NullNumeric.
-func (n *NullNumeric) UnmarshalJSON(payload []byte) error {
- if payload == nil {
- return fmt.Errorf("payload should not be nil")
- }
- if bytes.Equal(payload, jsonNullBytes) {
- n.Numeric = big.Rat{}
- n.Valid = false
- return nil
- }
- payload, err := trimDoubleQuotes(payload)
- if err != nil {
- return err
- }
- s := string(payload)
- val, ok := (&big.Rat{}).SetString(s)
- if !ok {
- return fmt.Errorf("payload cannot be converted to big.Rat: got %v", string(payload))
- }
- n.Numeric = *val
- n.Valid = true
- return nil
-}
-
-// Value implements the driver.Valuer interface.
-func (n NullNumeric) Value() (driver.Value, error) {
- if n.IsNull() {
- return nil, nil
- }
- return n.Numeric, nil
-}
-
-// Scan implements the sql.Scanner interface.
-func (n *NullNumeric) Scan(value interface{}) error {
- if value == nil {
- n.Numeric, n.Valid = big.Rat{}, false
- return nil
- }
- n.Valid = true
- switch p := value.(type) {
- default:
- return spannerErrorf(codes.InvalidArgument, "invalid type for NullNumeric: %v", p)
- case *big.Rat:
- n.Numeric = *p
- case big.Rat:
- n.Numeric = p
- case *NullNumeric:
- n.Numeric = p.Numeric
- n.Valid = p.Valid
- case NullNumeric:
- n.Numeric = p.Numeric
- n.Valid = p.Valid
- }
- return nil
-}
-
-// GormDataType is used by gorm to determine the default data type for fields with this type.
-func (n NullNumeric) GormDataType() string {
- return "NUMERIC"
-}
-
-// NullJSON represents a Cloud Spanner JSON that may be NULL.
-//
-// This type must always be used when encoding values to a JSON column in Cloud
-// Spanner.
-//
-// NullJSON does not implement the driver.Valuer and sql.Scanner interfaces, as
-// the underlying value can be anything. This means that the type NullJSON must
-// also be used when calling sql.Row#Scan(dest ...interface{}) for a JSON
-// column.
-type NullJSON struct {
- Value interface{} // Val contains the value when it is non-NULL, and nil when NULL.
- Valid bool // Valid is true if Json is not NULL.
-}
-
-// IsNull implements NullableValue.IsNull for NullJSON.
-func (n NullJSON) IsNull() bool {
- return !n.Valid
-}
-
-// String implements Stringer.String for NullJSON.
-func (n NullJSON) String() string {
- if !n.Valid {
- return nullString
- }
- b, err := json.Marshal(n.Value)
- if err != nil {
- return fmt.Sprintf("error: %v", err)
- }
- return fmt.Sprintf("%v", string(b))
-}
-
-// MarshalJSON implements json.Marshaler.MarshalJSON for NullJSON.
-func (n NullJSON) MarshalJSON() ([]byte, error) {
- return nulljson(n.Valid, n.Value)
-}
-
-// UnmarshalJSON implements json.Unmarshaler.UnmarshalJSON for NullJSON.
-func (n *NullJSON) UnmarshalJSON(payload []byte) error {
- if payload == nil {
- return fmt.Errorf("payload should not be nil")
- }
- if bytes.Equal(payload, jsonNullBytes) {
- n.Valid = false
- return nil
- }
- var v interface{}
- err := jsonUnmarshal(payload, &v)
- if err != nil {
- return fmt.Errorf("payload cannot be converted to a struct: got %v, err: %w", string(payload), err)
- }
- n.Value = v
- n.Valid = true
- return nil
-}
-
-// GormDataType is used by gorm to determine the default data type for fields with this type.
-func (n NullJSON) GormDataType() string {
- return "JSON"
-}
-
-// PGNumeric represents a Cloud Spanner PG Numeric that may be NULL.
-type PGNumeric struct {
- Numeric string // Numeric contains the value when it is non-NULL, and an empty string when NULL.
- Valid bool // Valid is true if Numeric is not NULL.
-}
-
-// IsNull implements NullableValue.IsNull for PGNumeric.
-func (n PGNumeric) IsNull() bool {
- return !n.Valid
-}
-
-// String implements Stringer.String for PGNumeric
-func (n PGNumeric) String() string {
- if !n.Valid {
- return nullString
- }
- return n.Numeric
-}
-
-// MarshalJSON implements json.Marshaler.MarshalJSON for PGNumeric.
-func (n PGNumeric) MarshalJSON() ([]byte, error) {
- return nulljson(n.Valid, n.Numeric)
-}
-
-// UnmarshalJSON implements json.Unmarshaler.UnmarshalJSON for PGNumeric.
-func (n *PGNumeric) UnmarshalJSON(payload []byte) error {
- if payload == nil {
- return fmt.Errorf("payload should not be nil")
- }
- if bytes.Equal(payload, jsonNullBytes) {
- n.Numeric = ""
- n.Valid = false
- return nil
- }
- payload, err := trimDoubleQuotes(payload)
- if err != nil {
- return err
- }
- n.Numeric = string(payload)
- n.Valid = true
- return nil
-}
-
-// NullProtoMessage represents a Cloud Spanner PROTO that may be NULL.
-// To write a NULL value using NullProtoMessage set ProtoMessageVal to typed nil and set Valid to true.
-type NullProtoMessage struct {
- ProtoMessageVal proto.Message // ProtoMessageVal contains the value when Valid is true, and nil when NULL.
- Valid bool // Valid is true if ProtoMessageVal is not NULL.
-}
-
-// IsNull implements NullableValue.IsNull for NullProtoMessage.
-func (n NullProtoMessage) IsNull() bool {
- return !n.Valid
-}
-
-// String implements Stringer.String for NullProtoMessage.
-func (n NullProtoMessage) String() string {
- if !n.Valid {
- return nullString
- }
- return protoadapt.MessageV1Of(n.ProtoMessageVal).String()
-}
-
-// MarshalJSON implements json.Marshaler.MarshalJSON for NullProtoMessage.
-func (n NullProtoMessage) MarshalJSON() ([]byte, error) {
- if n.Valid {
- return json.Marshal(n.ProtoMessageVal)
- }
- return jsonNullBytes, nil
-}
-
-// UnmarshalJSON implements json.Unmarshaler.UnmarshalJSON for NullProtoMessage.
-func (n *NullProtoMessage) UnmarshalJSON(payload []byte) error {
- if payload == nil {
- return fmt.Errorf("payload should not be nil")
- }
- if bytes.Equal(payload, jsonNullBytes) {
- n.ProtoMessageVal = nil
- n.Valid = false
- return nil
- }
- err := jsonUnmarshal(payload, n.ProtoMessageVal)
- if err != nil {
- return fmt.Errorf("payload cannot be converted to a proto message: err: %s", err)
- }
- n.Valid = true
- return nil
-}
-
-// NullProtoEnum represents a Cloud Spanner ENUM that may be NULL.
-// To write a NULL value using NullProtoEnum set ProtoEnumVal to typed nil and set Valid to true.
-type NullProtoEnum struct {
- ProtoEnumVal protoreflect.Enum // ProtoEnumVal contains the value when Valid is true, and nil when NULL.
- Valid bool // Valid is true if ProtoEnumVal is not NULL.
-}
-
-// IsNull implements NullableValue.IsNull for NullProtoEnum.
-func (n NullProtoEnum) IsNull() bool {
- return !n.Valid
-}
-
-// String implements Stringer.String for NullProtoEnum.
-func (n NullProtoEnum) String() string {
- if !n.Valid {
- return nullString
- }
- return fmt.Sprintf("%v", n.ProtoEnumVal)
-}
-
-// MarshalJSON implements json.Marshaler.MarshalJSON for NullProtoEnum.
-func (n NullProtoEnum) MarshalJSON() ([]byte, error) {
- if n.Valid && n.ProtoEnumVal != nil {
- return []byte(fmt.Sprintf("%v", n.ProtoEnumVal.Number())), nil
- }
- return jsonNullBytes, nil
-}
-
-// UnmarshalJSON implements json.Unmarshaler.UnmarshalJSON for NullProtoEnum.
-func (n *NullProtoEnum) UnmarshalJSON(payload []byte) error {
- if payload == nil {
- return fmt.Errorf("payload should not be nil")
- }
- if bytes.Equal(payload, jsonNullBytes) {
- n.ProtoEnumVal = nil
- n.Valid = false
- return nil
- }
- if reflect.ValueOf(n.ProtoEnumVal).Kind() != reflect.Ptr {
- return errNotAPointerField(n, n.ProtoEnumVal)
- }
- num, err := strconv.ParseInt(string(payload), 10, 64)
- if err != nil {
- return fmt.Errorf("payload cannot be converted to Enum: got %v", string(payload))
- }
- reflect.ValueOf(n.ProtoEnumVal).Elem().SetInt(num)
- n.Valid = true
- return nil
-}
-
-// NullRow represents a Cloud Spanner STRUCT that may be NULL.
-// See also the document for Row.
-// Note that NullRow is not a valid Cloud Spanner column Type.
-type NullRow struct {
- Row Row // Row contains the value when it is non-NULL, and a zero Row when NULL.
- Valid bool // Valid is true if Row is not NULL.
-}
-
-// PGJsonB represents a Cloud Spanner PGJsonB that may be NULL.
-type PGJsonB struct {
- Value interface{} // Val contains the value when it is non-NULL, and nil when NULL.
- Valid bool // Valid is true if PGJsonB is not NULL.
- // This is here to support customer wrappers around PGJsonB type, this will help during getDecodableSpannerType
- // to differentiate between PGJsonB and NullJSON types.
- _ bool
-}
-
-// IsNull implements NullableValue.IsNull for PGJsonB.
-func (n PGJsonB) IsNull() bool {
- return !n.Valid
-}
-
-// String implements Stringer.String for PGJsonB.
-func (n PGJsonB) String() string {
- if !n.Valid {
- return nullString
- }
- b, err := json.Marshal(n.Value)
- if err != nil {
- return fmt.Sprintf("error: %v", err)
- }
- return fmt.Sprintf("%v", string(b))
-}
-
-// MarshalJSON implements json.Marshaler.MarshalJSON for PGJsonB.
-func (n PGJsonB) MarshalJSON() ([]byte, error) {
- return nulljson(n.Valid, n.Value)
-}
-
-// UnmarshalJSON implements json.Unmarshaler.UnmarshalJSON for PGJsonB.
-func (n *PGJsonB) UnmarshalJSON(payload []byte) error {
- if payload == nil {
- return fmt.Errorf("payload should not be nil")
- }
- if bytes.Equal(payload, jsonNullBytes) {
- n.Valid = false
- return nil
- }
- var v interface{}
- err := jsonUnmarshal(payload, &v)
- if err != nil {
- return fmt.Errorf("payload cannot be converted to a struct: got %v, err: %w", string(payload), err)
- }
- n.Value = v
- n.Valid = true
- return nil
-}
-
-func nulljson(valid bool, v interface{}) ([]byte, error) {
- if !valid {
- return jsonNullBytes, nil
- }
- return json.Marshal(v)
-}
-
-// GenericColumnValue represents the generic encoded value and type of the
-// column. See google.spanner.v1.ResultSet proto for details. This can be
-// useful for proxying query results when the result types are not known in
-// advance.
-//
-// If you populate a GenericColumnValue from a row using Row.Column or related
-// methods, do not modify the contents of Type and Value.
-type GenericColumnValue struct {
- Type *sppb.Type
- Value *proto3.Value
-}
-
-// Decode decodes a GenericColumnValue. The ptr argument should be a pointer
-// to a Go value that can accept v.
-func (v GenericColumnValue) Decode(ptr interface{}) error {
- return decodeValue(v.Value, v.Type, ptr)
-}
-
-// NewGenericColumnValue creates a GenericColumnValue from Go value that is
-// valid for Cloud Spanner.
-func newGenericColumnValue(v interface{}) (*GenericColumnValue, error) {
- value, typ, err := encodeValue(v)
- if err != nil {
- return nil, err
- }
- return &GenericColumnValue{Value: value, Type: typ}, nil
-}
-
-// errTypeMismatch returns error for destination not having a compatible type
-// with source Cloud Spanner type.
-func errTypeMismatch(srcCode, elCode sppb.TypeCode, dst interface{}) error {
- s := srcCode.String()
- if srcCode == sppb.TypeCode_ARRAY {
- s = fmt.Sprintf("%v[%v]", srcCode, elCode)
- }
- return spannerErrorf(codes.InvalidArgument, "type %T cannot be used for decoding %s", dst, s)
-}
-
-// errNilSpannerType returns error for nil Cloud Spanner type in decoding.
-func errNilSpannerType() error {
- return spannerErrorf(codes.FailedPrecondition, "unexpected nil Cloud Spanner data type in decoding")
-}
-
-// errNilSrc returns error for decoding from nil proto value.
-func errNilSrc() error {
- return spannerErrorf(codes.FailedPrecondition, "unexpected nil Cloud Spanner value in decoding")
-}
-
-// errNilDst returns error for decoding into nil interface{}.
-func errNilDst(dst interface{}) error {
- return spannerErrorf(codes.InvalidArgument, "cannot decode into nil type %T", dst)
-}
-
-// errNilDstField returns error for decoding into nil interface{} of Value field in NullProtoMessage or NullProtoEnum.
-func errNilDstField(dst interface{}, field string) error {
- return spannerErrorf(codes.InvalidArgument, "field %s in %T cannot be nil", field, dst)
-}
-
-// errNilArrElemType returns error for input Cloud Spanner data type being a array but without a
-// non-nil array element type.
-func errNilArrElemType(t *sppb.Type) error {
- return spannerErrorf(codes.FailedPrecondition, "array type %v is with nil array element type", t)
-}
-
-// errNotValidSrc returns error if Valid field is false for NullProtoMessage and NullProtoEnum
-func errNotValidSrc(dst interface{}) error {
- return spannerErrorf(codes.InvalidArgument, "field \"Valid\" of %T cannot be set to false when writing data to Cloud Spanner. Use typed nil in %T to write null values to Cloud Spanner", dst, dst)
-}
-
-func errUnsupportedEmbeddedStructFields(fname string) error {
- return spannerErrorf(codes.InvalidArgument, "Embedded field: %s. Embedded and anonymous fields are not allowed "+
- "when converting Go structs to Cloud Spanner STRUCT values. To create a STRUCT value with an "+
- "unnamed field, use a `spanner:\"\"` field tag.", fname)
-}
-
-// errDstNotForNull returns error for decoding a SQL NULL value into a destination which doesn't
-// support NULL values.
-func errDstNotForNull(dst interface{}) error {
- return spannerErrorf(codes.InvalidArgument, "destination %T cannot support NULL SQL values", dst)
-}
-
-// errBadEncoding returns error for decoding wrongly encoded types.
-func errBadEncoding(v *proto3.Value, err error) error {
- return spannerErrorf(codes.FailedPrecondition, "%v wasn't correctly encoded: <%v>", v, err)
-}
-
-// errNotAPointer returns error for decoding a non pointer type.
-func errNotAPointer(dst interface{}) error {
- return spannerErrorf(codes.InvalidArgument, "destination %T must be a pointer", dst)
-}
-
-// errNotAPointerField returns error for decoding a non pointer type.
-func errNotAPointerField(dst interface{}, dstField interface{}) error {
- return spannerErrorf(codes.InvalidArgument, "destination %T in %T must be a pointer", dstField, dst)
-}
-
-func errNilNotAllowed(dst interface{}, name string) error {
- return spannerErrorf(codes.InvalidArgument, "destination %T does not support Null values. Use %s, an array with pointer type elements to read Null values", dst, name)
-}
-
-func parseNullTime(v *proto3.Value, p *NullTime, code sppb.TypeCode, isNull bool) error {
- if p == nil {
- return errNilDst(p)
- }
- if code != sppb.TypeCode_TIMESTAMP {
- return errTypeMismatch(code, sppb.TypeCode_TYPE_CODE_UNSPECIFIED, p)
- }
- if isNull {
- *p = NullTime{}
- return nil
- }
- x, err := getStringValue(v)
- if err != nil {
- return err
- }
- y, err := time.Parse(time.RFC3339Nano, x)
- if err != nil {
- return errBadEncoding(v, err)
- }
- p.Valid = true
- p.Time = y
- return nil
-}
-
-// decodeValue decodes a protobuf Value into a pointer to a Go value, as
-// specified by sppb.Type.
-func decodeValue(v *proto3.Value, t *sppb.Type, ptr interface{}, opts ...DecodeOptions) error {
- if v == nil {
- return errNilSrc()
- }
- if t == nil {
- return errNilSpannerType()
- }
- code := t.Code
- typeAnnotation := t.TypeAnnotation
- acode := sppb.TypeCode_TYPE_CODE_UNSPECIFIED
- atypeAnnotation := sppb.TypeAnnotationCode_TYPE_ANNOTATION_CODE_UNSPECIFIED
- if code == sppb.TypeCode_ARRAY {
- if t.ArrayElementType == nil {
- return errNilArrElemType(t)
- }
- acode = t.ArrayElementType.Code
- atypeAnnotation = t.ArrayElementType.TypeAnnotation
- }
- _, isNull := v.Kind.(*proto3.Value_NullValue)
-
- // Do the decoding based on the type of ptr.
- switch p := ptr.(type) {
- case nil:
- return errNilDst(nil)
- case *string:
- if p == nil {
- return errNilDst(p)
- }
- if code != sppb.TypeCode_STRING {
- return errTypeMismatch(code, acode, ptr)
- }
- if isNull {
- return errDstNotForNull(ptr)
- }
- x, err := getStringValue(v)
- if err != nil {
- return err
- }
- *p = x
- case *NullString, **string, *sql.NullString:
- // Most Null* types are automatically supported for both spanner.Null* and sql.Null* types, except for
- // NullString, and we need to add explicit support for it here. The reason that the other types are
- // automatically supported is that they use the same field names (e.g. spanner.NullBool and sql.NullBool both
- // contain the fields Valid and Bool). spanner.NullString has a field StringVal, sql.NullString has a field
- // String.
- if p == nil {
- return errNilDst(p)
- }
- if code != sppb.TypeCode_STRING {
- return errTypeMismatch(code, acode, ptr)
- }
- if isNull {
- switch sp := ptr.(type) {
- case *NullString:
- *sp = NullString{}
- case **string:
- *sp = nil
- case *sql.NullString:
- *sp = sql.NullString{}
- }
- break
- }
- x, err := getStringValue(v)
- if err != nil {
- return err
- }
- switch sp := ptr.(type) {
- case *NullString:
- sp.Valid = true
- sp.StringVal = x
- case **string:
- *sp = &x
- case *sql.NullString:
- sp.Valid = true
- sp.String = x
- }
- case *[]NullString, *[]*string:
- if p == nil {
- return errNilDst(p)
- }
- if acode != sppb.TypeCode_STRING {
- return errTypeMismatch(code, acode, ptr)
- }
- if isNull {
- switch sp := ptr.(type) {
- case *[]NullString:
- *sp = nil
- case *[]*string:
- *sp = nil
- }
- break
- }
- x, err := getListValue(v)
- if err != nil {
- return err
- }
- switch sp := ptr.(type) {
- case *[]NullString:
- y, err := decodeNullStringArray(x)
- if err != nil {
- return err
- }
- *sp = y
- case *[]*string:
- y, err := decodeStringPointerArray(x)
- if err != nil {
- return err
- }
- *sp = y
- }
- case *[]string:
- if p == nil {
- return errNilDst(p)
- }
- if acode != sppb.TypeCode_STRING {
- return errTypeMismatch(code, acode, ptr)
- }
- if isNull {
- *p = nil
- break
- }
- x, err := getListValue(v)
- if err != nil {
- return err
- }
- y, err := decodeStringArray(x)
- if err != nil {
- return err
- }
- *p = y
- case *[]byte:
- if p == nil {
- return errNilDst(p)
- }
- if code != sppb.TypeCode_BYTES && code != sppb.TypeCode_PROTO {
- return errTypeMismatch(code, acode, ptr)
- }
- if isNull {
- *p = nil
- break
- }
- x, err := getStringValue(v)
- if err != nil {
- return err
- }
- y, err := base64.StdEncoding.DecodeString(x)
- if err != nil {
- return errBadEncoding(v, err)
- }
- *p = y
- case *[][]byte:
- if p == nil {
- return errNilDst(p)
- }
- if acode != sppb.TypeCode_BYTES && acode != sppb.TypeCode_PROTO {
- return errTypeMismatch(code, acode, ptr)
- }
- if isNull {
- *p = nil
- break
- }
- x, err := getListValue(v)
- if err != nil {
- return err
- }
- y, err := decodeByteArray(x)
- if err != nil {
- return err
- }
- *p = y
- case *int64:
- if p == nil {
- return errNilDst(p)
- }
- if code != sppb.TypeCode_INT64 && code != sppb.TypeCode_ENUM {
- return errTypeMismatch(code, acode, ptr)
- }
- if isNull {
- return errDstNotForNull(ptr)
- }
- x, err := getStringValue(v)
- if err != nil {
- return err
- }
- y, err := strconv.ParseInt(x, 10, 64)
- if err != nil {
- return errBadEncoding(v, err)
- }
- *p = y
- case *NullInt64, **int64:
- if p == nil {
- return errNilDst(p)
- }
- if code != sppb.TypeCode_INT64 && code != sppb.TypeCode_ENUM {
- return errTypeMismatch(code, acode, ptr)
- }
- if isNull {
- switch sp := ptr.(type) {
- case *NullInt64:
- *sp = NullInt64{}
- case **int64:
- *sp = nil
- }
- break
- }
- x, err := getStringValue(v)
- if err != nil {
- return err
- }
- y, err := strconv.ParseInt(x, 10, 64)
- if err != nil {
- return errBadEncoding(v, err)
- }
- switch sp := ptr.(type) {
- case *NullInt64:
- sp.Valid = true
- sp.Int64 = y
- case **int64:
- *sp = &y
- }
- case *[]NullInt64, *[]*int64:
- if p == nil {
- return errNilDst(p)
- }
- if acode != sppb.TypeCode_INT64 && acode != sppb.TypeCode_ENUM {
- return errTypeMismatch(code, acode, ptr)
- }
- if isNull {
- switch sp := ptr.(type) {
- case *[]NullInt64:
- *sp = nil
- case *[]*int64:
- *sp = nil
- }
- break
- }
- x, err := getListValue(v)
- if err != nil {
- return err
- }
- switch sp := ptr.(type) {
- case *[]NullInt64:
- y, err := decodeNullInt64Array(x)
- if err != nil {
- return err
- }
- *sp = y
- case *[]*int64:
- y, err := decodeInt64PointerArray(x)
- if err != nil {
- return err
- }
- *sp = y
- }
- case *[]int64:
- if p == nil {
- return errNilDst(p)
- }
- if acode != sppb.TypeCode_INT64 && acode != sppb.TypeCode_ENUM {
- return errTypeMismatch(code, acode, ptr)
- }
- if isNull {
- *p = nil
- break
- }
- x, err := getListValue(v)
- if err != nil {
- return err
- }
- y, err := decodeInt64Array(x)
- if err != nil {
- return err
- }
- *p = y
- case *bool:
- if p == nil {
- return errNilDst(p)
- }
- if code != sppb.TypeCode_BOOL {
- return errTypeMismatch(code, acode, ptr)
- }
- if isNull {
- return errDstNotForNull(ptr)
- }
- x, err := getBoolValue(v)
- if err != nil {
- return err
- }
- *p = x
- case *NullBool, **bool:
- if p == nil {
- return errNilDst(p)
- }
- if code != sppb.TypeCode_BOOL {
- return errTypeMismatch(code, acode, ptr)
- }
- if isNull {
- switch sp := ptr.(type) {
- case *NullBool:
- *sp = NullBool{}
- case **bool:
- *sp = nil
- }
- break
- }
- x, err := getBoolValue(v)
- if err != nil {
- return err
- }
- switch sp := ptr.(type) {
- case *NullBool:
- sp.Valid = true
- sp.Bool = x
- case **bool:
- *sp = &x
- }
- case *[]NullBool, *[]*bool:
- if p == nil {
- return errNilDst(p)
- }
- if acode != sppb.TypeCode_BOOL {
- return errTypeMismatch(code, acode, ptr)
- }
- if isNull {
- switch sp := ptr.(type) {
- case *[]NullBool:
- *sp = nil
- case *[]*bool:
- *sp = nil
- }
- break
- }
- x, err := getListValue(v)
- if err != nil {
- return err
- }
- switch sp := ptr.(type) {
- case *[]NullBool:
- y, err := decodeNullBoolArray(x)
- if err != nil {
- return err
- }
- *sp = y
- case *[]*bool:
- y, err := decodeBoolPointerArray(x)
- if err != nil {
- return err
- }
- *sp = y
- }
- case *[]bool:
- if p == nil {
- return errNilDst(p)
- }
- if acode != sppb.TypeCode_BOOL {
- return errTypeMismatch(code, acode, ptr)
- }
- if isNull {
- *p = nil
- break
- }
- x, err := getListValue(v)
- if err != nil {
- return err
- }
- y, err := decodeBoolArray(x)
- if err != nil {
- return err
- }
- *p = y
- case *float64:
- if p == nil {
- return errNilDst(p)
- }
- if code != sppb.TypeCode_FLOAT64 {
- return errTypeMismatch(code, acode, ptr)
- }
- if isNull {
- return errDstNotForNull(ptr)
- }
- x, err := getFloat64Value(v)
- if err != nil {
- return err
- }
- *p = x
- case *NullFloat64, **float64:
- if p == nil {
- return errNilDst(p)
- }
- if code != sppb.TypeCode_FLOAT64 {
- return errTypeMismatch(code, acode, ptr)
- }
- if isNull {
- switch sp := ptr.(type) {
- case *NullFloat64:
- *sp = NullFloat64{}
- case **float64:
- *sp = nil
- }
- break
- }
- x, err := getFloat64Value(v)
- if err != nil {
- return err
- }
- switch sp := ptr.(type) {
- case *NullFloat64:
- sp.Valid = true
- sp.Float64 = x
- case **float64:
- *sp = &x
- }
- case *[]NullFloat64, *[]*float64:
- if p == nil {
- return errNilDst(p)
- }
- if acode != sppb.TypeCode_FLOAT64 {
- return errTypeMismatch(code, acode, ptr)
- }
- if isNull {
- switch sp := ptr.(type) {
- case *[]NullFloat64:
- *sp = nil
- case *[]*float64:
- *sp = nil
- }
- break
- }
- x, err := getListValue(v)
- if err != nil {
- return err
- }
- switch sp := ptr.(type) {
- case *[]NullFloat64:
- y, err := decodeNullFloat64Array(x)
- if err != nil {
- return err
- }
- *sp = y
- case *[]*float64:
- y, err := decodeFloat64PointerArray(x)
- if err != nil {
- return err
- }
- *sp = y
- }
- case *[]float64:
- if p == nil {
- return errNilDst(p)
- }
- if acode != sppb.TypeCode_FLOAT64 {
- return errTypeMismatch(code, acode, ptr)
- }
- if isNull {
- *p = nil
- break
- }
- x, err := getListValue(v)
- if err != nil {
- return err
- }
- y, err := decodeFloat64Array(x)
- if err != nil {
- return err
- }
- *p = y
- case *float32:
- if p == nil {
- return errNilDst(p)
- }
- if code != sppb.TypeCode_FLOAT32 {
- return errTypeMismatch(code, acode, ptr)
- }
- if isNull {
- return errDstNotForNull(ptr)
- }
- x, err := getFloat32Value(v)
- if err != nil {
- return err
- }
- *p = x
- case *NullFloat32, **float32:
- if p == nil {
- return errNilDst(p)
- }
- if code != sppb.TypeCode_FLOAT32 {
- return errTypeMismatch(code, acode, ptr)
- }
- if isNull {
- switch sp := ptr.(type) {
- case *NullFloat32:
- *sp = NullFloat32{}
- case **float32:
- *sp = nil
- }
- break
- }
- x, err := getFloat32Value(v)
- if err != nil {
- return err
- }
- switch sp := ptr.(type) {
- case *NullFloat32:
- sp.Valid = true
- sp.Float32 = x
- case **float32:
- *sp = &x
- }
- case *[]NullFloat32, *[]*float32:
- if p == nil {
- return errNilDst(p)
- }
- if acode != sppb.TypeCode_FLOAT32 {
- return errTypeMismatch(code, acode, ptr)
- }
- if isNull {
- switch sp := ptr.(type) {
- case *[]NullFloat32:
- *sp = nil
- case *[]*float32:
- *sp = nil
- }
- break
- }
- x, err := getListValue(v)
- if err != nil {
- return err
- }
- switch sp := ptr.(type) {
- case *[]NullFloat32:
- y, err := decodeNullFloat32Array(x)
- if err != nil {
- return err
- }
- *sp = y
- case *[]*float32:
- y, err := decodeFloat32PointerArray(x)
- if err != nil {
- return err
- }
- *sp = y
- }
- case *[]float32:
- if p == nil {
- return errNilDst(p)
- }
- if acode != sppb.TypeCode_FLOAT32 {
- return errTypeMismatch(code, acode, ptr)
- }
- if isNull {
- *p = nil
- break
- }
- x, err := getListValue(v)
- if err != nil {
- return err
- }
- y, err := decodeFloat32Array(x)
- if err != nil {
- return err
- }
- *p = y
- case *big.Rat:
- if code != sppb.TypeCode_NUMERIC {
- return errTypeMismatch(code, acode, ptr)
- }
- if isNull {
- return errDstNotForNull(ptr)
- }
- x := v.GetStringValue()
- y, ok := (&big.Rat{}).SetString(x)
- if !ok {
- return errUnexpectedNumericStr(x)
- }
- *p = *y
- case *NullJSON:
- if p == nil {
- return errNilDst(p)
- }
- if code == sppb.TypeCode_ARRAY {
- if acode != sppb.TypeCode_JSON {
- return errTypeMismatch(code, acode, ptr)
- }
- x, err := getListValue(v)
- if err != nil {
- return err
- }
- y, err := decodeNullJSONArrayToNullJSON(x)
- if err != nil {
- return err
- }
- *p = *y
- } else {
- if code != sppb.TypeCode_JSON {
- return errTypeMismatch(code, acode, ptr)
- }
- if isNull {
- *p = NullJSON{}
- break
- }
- x := v.GetStringValue()
- var y interface{}
- err := jsonUnmarshal([]byte(x), &y)
- if err != nil {
- return err
- }
- *p = NullJSON{y, true}
- }
- case *[]NullJSON:
- if p == nil {
- return errNilDst(p)
- }
- if acode != sppb.TypeCode_JSON {
- return errTypeMismatch(code, acode, ptr)
- }
- if isNull {
- *p = nil
- break
- }
- x, err := getListValue(v)
- if err != nil {
- return err
- }
- y, err := decodeNullJSONArray(x)
- if err != nil {
- return err
- }
- *p = y
- case *NullNumeric:
- if p == nil {
- return errNilDst(p)
- }
- if code != sppb.TypeCode_NUMERIC {
- return errTypeMismatch(code, acode, ptr)
- }
- if isNull {
- *p = NullNumeric{}
- break
- }
- x := v.GetStringValue()
- y, ok := (&big.Rat{}).SetString(x)
- if !ok {
- return errUnexpectedNumericStr(x)
- }
- *p = NullNumeric{*y, true}
- case **big.Rat:
- if p == nil {
- return errNilDst(p)
- }
- if code != sppb.TypeCode_NUMERIC {
- return errTypeMismatch(code, acode, ptr)
- }
- if isNull {
- *p = nil
- break
- }
- x := v.GetStringValue()
- y, ok := (&big.Rat{}).SetString(x)
- if !ok {
- return errUnexpectedNumericStr(x)
- }
- *p = y
- case *[]NullNumeric, *[]*big.Rat:
- if p == nil {
- return errNilDst(p)
- }
- if acode != sppb.TypeCode_NUMERIC {
- return errTypeMismatch(code, acode, ptr)
- }
- if isNull {
- switch sp := ptr.(type) {
- case *[]NullNumeric:
- *sp = nil
- case *[]*big.Rat:
- *sp = nil
- }
- break
- }
- x, err := getListValue(v)
- if err != nil {
- return err
- }
- switch sp := ptr.(type) {
- case *[]NullNumeric:
- y, err := decodeNullNumericArray(x)
- if err != nil {
- return err
- }
- *sp = y
- case *[]*big.Rat:
- y, err := decodeNumericPointerArray(x)
- if err != nil {
- return err
- }
- *sp = y
- }
- case *[]big.Rat:
- if p == nil {
- return errNilDst(p)
- }
- if acode != sppb.TypeCode_NUMERIC {
- return errTypeMismatch(code, acode, ptr)
- }
- if isNull {
- *p = nil
- break
- }
- x, err := getListValue(v)
- if err != nil {
- return err
- }
- y, err := decodeNumericArray(x)
- if err != nil {
- return err
- }
- *p = y
- case *PGNumeric:
- if p == nil {
- return errNilDst(p)
- }
- if code != sppb.TypeCode_NUMERIC || typeAnnotation != sppb.TypeAnnotationCode_PG_NUMERIC {
- return errTypeMismatch(code, acode, ptr)
- }
- if isNull {
- *p = PGNumeric{}
- break
- }
- *p = PGNumeric{v.GetStringValue(), true}
- case *[]PGNumeric:
- if p == nil {
- return errNilDst(p)
- }
- if acode != sppb.TypeCode_NUMERIC || atypeAnnotation != sppb.TypeAnnotationCode_PG_NUMERIC {
- return errTypeMismatch(code, acode, ptr)
- }
- if isNull {
- *p = nil
- break
- }
- x, err := getListValue(v)
- if err != nil {
- return err
- }
- y, err := decodePGNumericArray(x)
- if err != nil {
- return err
- }
- *p = y
- case *PGJsonB:
- if p == nil {
- return errNilDst(p)
- }
- if code != sppb.TypeCode_JSON || typeAnnotation != sppb.TypeAnnotationCode_PG_JSONB {
- return errTypeMismatch(code, acode, ptr)
- }
- if isNull {
- *p = PGJsonB{}
- break
- }
- x := v.GetStringValue()
- var y interface{}
- err := jsonUnmarshal([]byte(x), &y)
- if err != nil {
- return err
- }
- *p = PGJsonB{Value: y, Valid: true}
- case *[]PGJsonB:
- if p == nil {
- return errNilDst(p)
- }
- if acode != sppb.TypeCode_JSON || typeAnnotation != sppb.TypeAnnotationCode_PG_JSONB {
- return errTypeMismatch(code, acode, ptr)
- }
- if isNull {
- *p = nil
- break
- }
- x, err := getListValue(v)
- if err != nil {
- return err
- }
- y, err := decodePGJsonBArray(x)
- if err != nil {
- return err
- }
- *p = y
- case *time.Time:
- var nt NullTime
- if isNull {
- return errDstNotForNull(ptr)
- }
- err := parseNullTime(v, &nt, code, isNull)
- if err != nil {
- return err
- }
- *p = nt.Time
- case *NullTime:
- err := parseNullTime(v, p, code, isNull)
- if err != nil {
- return err
- }
- case **time.Time:
- var nt NullTime
- if isNull {
- *p = nil
- break
- }
- err := parseNullTime(v, &nt, code, isNull)
- if err != nil {
- return err
- }
- *p = &nt.Time
- case *[]NullTime, *[]*time.Time:
- if p == nil {
- return errNilDst(p)
- }
- if acode != sppb.TypeCode_TIMESTAMP {
- return errTypeMismatch(code, acode, ptr)
- }
- if isNull {
- switch sp := ptr.(type) {
- case *[]NullTime:
- *sp = nil
- case *[]*time.Time:
- *sp = nil
- }
- break
- }
- x, err := getListValue(v)
- if err != nil {
- return err
- }
- switch sp := ptr.(type) {
- case *[]NullTime:
- y, err := decodeNullTimeArray(x)
- if err != nil {
- return err
- }
- *sp = y
- case *[]*time.Time:
- y, err := decodeTimePointerArray(x)
- if err != nil {
- return err
- }
- *sp = y
- }
- case *[]time.Time:
- if p == nil {
- return errNilDst(p)
- }
- if acode != sppb.TypeCode_TIMESTAMP {
- return errTypeMismatch(code, acode, ptr)
- }
- if isNull {
- *p = nil
- break
- }
- x, err := getListValue(v)
- if err != nil {
- return err
- }
- y, err := decodeTimeArray(x)
- if err != nil {
- return err
- }
- *p = y
- case *civil.Date:
- if p == nil {
- return errNilDst(p)
- }
- if code != sppb.TypeCode_DATE {
- return errTypeMismatch(code, acode, ptr)
- }
- if isNull {
- return errDstNotForNull(ptr)
- }
- x, err := getStringValue(v)
- if err != nil {
- return err
- }
- y, err := civil.ParseDate(x)
- if err != nil {
- return errBadEncoding(v, err)
- }
- *p = y
- case *NullDate, **civil.Date:
- if p == nil {
- return errNilDst(p)
- }
- if code != sppb.TypeCode_DATE {
- return errTypeMismatch(code, acode, ptr)
- }
- if isNull {
- switch sp := ptr.(type) {
- case *NullDate:
- *sp = NullDate{}
- case **civil.Date:
- *sp = nil
- }
- break
- }
- x, err := getStringValue(v)
- if err != nil {
- return err
- }
- y, err := civil.ParseDate(x)
- if err != nil {
- return errBadEncoding(v, err)
- }
- switch sp := ptr.(type) {
- case *NullDate:
- sp.Valid = true
- sp.Date = y
- case **civil.Date:
- *sp = &y
- }
- case *[]NullDate, *[]*civil.Date:
- if p == nil {
- return errNilDst(p)
- }
- if acode != sppb.TypeCode_DATE {
- return errTypeMismatch(code, acode, ptr)
- }
- if isNull {
- switch sp := ptr.(type) {
- case *[]NullDate:
- *sp = nil
- case *[]*civil.Date:
- *sp = nil
- }
- break
- }
- x, err := getListValue(v)
- if err != nil {
- return err
- }
- switch sp := ptr.(type) {
- case *[]NullDate:
- y, err := decodeNullDateArray(x)
- if err != nil {
- return err
- }
- *sp = y
- case *[]*civil.Date:
- y, err := decodeDatePointerArray(x)
- if err != nil {
- return err
- }
- *sp = y
- }
- case *[]civil.Date:
- if p == nil {
- return errNilDst(p)
- }
- if acode != sppb.TypeCode_DATE {
- return errTypeMismatch(code, acode, ptr)
- }
- if isNull {
- *p = nil
- break
- }
- x, err := getListValue(v)
- if err != nil {
- return err
- }
- y, err := decodeDateArray(x)
- if err != nil {
- return err
- }
- *p = y
- case *[]NullRow:
- if p == nil {
- return errNilDst(p)
- }
- if acode != sppb.TypeCode_STRUCT {
- return errTypeMismatch(code, acode, ptr)
- }
- if isNull {
- *p = nil
- break
- }
- x, err := getListValue(v)
- if err != nil {
- return err
- }
- y, err := decodeRowArray(t.ArrayElementType.StructType, x)
- if err != nil {
- return err
- }
- *p = y
- case *GenericColumnValue:
- *p = GenericColumnValue{Type: t, Value: v}
- case protoreflect.Enum:
- if p == nil {
- return errNilDst(p)
- }
- if reflect.ValueOf(p).Kind() != reflect.Ptr {
- return errNotAPointer(p)
- }
- if code != sppb.TypeCode_ENUM && code != sppb.TypeCode_INT64 {
- return errTypeMismatch(code, acode, ptr)
- }
- if isNull {
- return errDstNotForNull(ptr)
- }
- y, err := getIntegerFromStringValue(v)
- if err != nil {
- return err
- }
- reflect.ValueOf(p).Elem().SetInt(y)
- case *NullProtoEnum:
- if p == nil {
- return errNilDst(p)
- }
- if p.ProtoEnumVal == nil {
- return errNilDstField(p, "ProtoEnumVal")
- }
- if reflect.ValueOf(p.ProtoEnumVal).Kind() != reflect.Ptr {
- return errNotAPointer(p)
- }
- if code != sppb.TypeCode_ENUM && code != sppb.TypeCode_INT64 {
- return errTypeMismatch(code, acode, ptr)
- }
- if isNull {
- *p = NullProtoEnum{}
- break
- }
- y, err := getIntegerFromStringValue(v)
- if err != nil {
- return err
- }
- reflect.ValueOf(p.ProtoEnumVal).Elem().SetInt(y)
- p.Valid = true
- case proto.Message:
- if p == nil {
- return errNilDst(p)
- }
- if reflect.ValueOf(p).Kind() != reflect.Ptr {
- return errNotAPointer(p)
- }
- if code != sppb.TypeCode_PROTO && code != sppb.TypeCode_BYTES {
- return errTypeMismatch(code, acode, ptr)
- }
- if isNull {
- return errDstNotForNull(ptr)
- }
- y, err := getBytesFromStringValue(v)
- if err != nil {
- return err
- }
- err = proto.Unmarshal(y, p)
- if err != nil {
- return err
- }
- case *NullProtoMessage:
- if p == nil {
- return errNilDst(p)
- }
- if p.ProtoMessageVal == nil {
- return errNilDstField(p, "ProtoMessageVal")
- }
- if reflect.ValueOf(p.ProtoMessageVal).Kind() != reflect.Ptr {
- return errNotAPointer(p.ProtoMessageVal)
- }
- if code != sppb.TypeCode_PROTO && code != sppb.TypeCode_BYTES {
- return errTypeMismatch(code, acode, ptr)
- }
- if isNull {
- *p = NullProtoMessage{}
- break
- }
- y, err := getBytesFromStringValue(v)
- if err != nil {
- return err
- }
- err = proto.Unmarshal(y, p.ProtoMessageVal)
- if err != nil {
- return err
- }
- p.Valid = true
- default:
- // Check if the pointer is a custom type that implements spanner.Decoder
- // interface.
- if decodedVal, ok := ptr.(Decoder); ok {
- x, err := getGenericValue(t, v)
- if err != nil {
- return err
- }
- return decodedVal.DecodeSpanner(x)
- }
-
- // Check if the pointer is a variant of a base type.
- decodableType := getDecodableSpannerType(ptr, true)
- if decodableType != spannerTypeUnknown {
- if isNull && !decodableType.supportsNull() {
- return errDstNotForNull(ptr)
- }
- return decodableType.decodeValueToCustomType(v, t, acode, atypeAnnotation, ptr)
- }
-
- rv := reflect.ValueOf(ptr)
- typ := rv.Type()
- // Check if the interface{} is a pointer and is of type array of proto columns
- if typ.Kind() == reflect.Ptr && isAnArrayOfProtoColumn(ptr) && code == sppb.TypeCode_ARRAY {
- if isNull {
- rv.Elem().Set(reflect.Zero(rv.Elem().Type()))
- break
- }
- // Get the user-defined type of the proto array
- etyp := typ.Elem().Elem()
- switch acode {
- case sppb.TypeCode_PROTO, sppb.TypeCode_BYTES:
- if etyp.Implements(protoMsgReflectType) {
- if etyp.Kind() == reflect.Ptr {
- x, err := getListValue(v)
- if err != nil {
- return err
- }
- return decodeProtoMessagePtrArray(x, t.ArrayElementType, rv)
- }
- return errTypeMismatch(code, acode, ptr)
- }
- case sppb.TypeCode_ENUM, sppb.TypeCode_INT64:
- if etyp.Implements(protoEnumReflectType) {
- x, err := getListValue(v)
- if err != nil {
- return err
- }
- if etyp.Kind() == reflect.Ptr {
- return decodeProtoEnumPtrArray(x, t.ArrayElementType, rv)
- }
- return decodeProtoEnumArray(x, t.ArrayElementType, rv, ptr)
- }
- }
- }
-
- // Check if the proto encoding is for an array of structs.
- if !(code == sppb.TypeCode_ARRAY && acode == sppb.TypeCode_STRUCT) {
- return errTypeMismatch(code, acode, ptr)
- }
- vp := reflect.ValueOf(p)
- if !vp.IsValid() {
- return errNilDst(p)
- }
- if !isPtrStructPtrSlice(vp.Type()) {
- // The container is not a slice of struct pointers.
- return fmt.Errorf("the container is not a slice of struct pointers: %v", errTypeMismatch(code, acode, ptr))
- }
- // Only use reflection for nil detection on slow path.
- // Also, IsNil panics on many types, so check it after the type check.
- if vp.IsNil() {
- return errNilDst(p)
- }
- if isNull {
- // The proto Value is encoding NULL, set the pointer to struct
- // slice to nil as well.
- vp.Elem().Set(reflect.Zero(vp.Elem().Type()))
- break
- }
- x, err := getListValue(v)
- if err != nil {
- return err
- }
- s := decodeSetting{
- Lenient: false,
- }
- for _, opt := range opts {
- opt.Apply(&s)
- }
- if err = decodeStructArray(t.ArrayElementType.StructType, x, p, s.Lenient); err != nil {
- return err
- }
- }
- return nil
-}
-
-// decodableSpannerType represents the Go types that a value from a Spanner
-// database can be converted to.
-type decodableSpannerType uint
-
-const (
- spannerTypeUnknown decodableSpannerType = iota
- spannerTypeInvalid
- spannerTypeNonNullString
- spannerTypeByteArray
- spannerTypeNonNullInt64
- spannerTypeNonNullBool
- spannerTypeNonNullFloat64
- spannerTypeNonNullFloat32
- spannerTypeNonNullNumeric
- spannerTypeNonNullTime
- spannerTypeNonNullDate
- spannerTypeNullString
- spannerTypeNullInt64
- spannerTypeNullBool
- spannerTypeNullFloat64
- spannerTypeNullFloat32
- spannerTypeNullTime
- spannerTypeNullDate
- spannerTypeNullNumeric
- spannerTypeNullJSON
- spannerTypePGNumeric
- spannerTypePGJsonB
- spannerTypeArrayOfNonNullString
- spannerTypeArrayOfByteArray
- spannerTypeArrayOfNonNullInt64
- spannerTypeArrayOfNonNullBool
- spannerTypeArrayOfNonNullFloat64
- spannerTypeArrayOfNonNullFloat32
- spannerTypeArrayOfNonNullNumeric
- spannerTypeArrayOfNonNullTime
- spannerTypeArrayOfNonNullDate
- spannerTypeArrayOfNullString
- spannerTypeArrayOfNullInt64
- spannerTypeArrayOfNullBool
- spannerTypeArrayOfNullFloat64
- spannerTypeArrayOfNullFloat32
- spannerTypeArrayOfNullNumeric
- spannerTypeArrayOfNullJSON
- spannerTypeArrayOfNullTime
- spannerTypeArrayOfNullDate
- spannerTypeArrayOfPGNumeric
- spannerTypeArrayOfPGJsonB
-)
-
-// supportsNull returns true for the Go types that can hold a null value from
-// Spanner.
-func (d decodableSpannerType) supportsNull() bool {
- switch d {
- case spannerTypeNonNullString, spannerTypeNonNullInt64, spannerTypeNonNullBool, spannerTypeNonNullFloat64, spannerTypeNonNullFloat32, spannerTypeNonNullTime, spannerTypeNonNullDate, spannerTypeNonNullNumeric:
- return false
- default:
- return true
- }
-}
-
-// The following list of types represent the struct types that represent a
-// specific Spanner data type in Go. If a pointer to one of these types is
-// passed to decodeValue, the client library will decode one column value into
-// the struct. For pointers to all other struct types, the client library will
-// treat it as a generic struct that should contain a field for each column in
-// the result set that is being decoded.
-
-var typeOfNonNullTime = reflect.TypeOf(time.Time{})
-var typeOfNonNullDate = reflect.TypeOf(civil.Date{})
-var typeOfNonNullNumeric = reflect.TypeOf(big.Rat{})
-var typeOfNullString = reflect.TypeOf(NullString{})
-var typeOfNullInt64 = reflect.TypeOf(NullInt64{})
-var typeOfNullBool = reflect.TypeOf(NullBool{})
-var typeOfNullFloat64 = reflect.TypeOf(NullFloat64{})
-var typeOfNullFloat32 = reflect.TypeOf(NullFloat32{})
-var typeOfNullTime = reflect.TypeOf(NullTime{})
-var typeOfNullDate = reflect.TypeOf(NullDate{})
-var typeOfNullNumeric = reflect.TypeOf(NullNumeric{})
-var typeOfNullJSON = reflect.TypeOf(NullJSON{})
-var typeOfPGNumeric = reflect.TypeOf(PGNumeric{})
-var typeOfPGJsonB = reflect.TypeOf(PGJsonB{})
-
-// getDecodableSpannerType returns the corresponding decodableSpannerType of
-// the given pointer.
-func getDecodableSpannerType(ptr interface{}, isPtr bool) decodableSpannerType {
- var val reflect.Value
- var kind reflect.Kind
- if isPtr {
- val = reflect.Indirect(reflect.ValueOf(ptr))
- } else {
- val = reflect.ValueOf(ptr)
- }
- kind = val.Kind()
- if kind == reflect.Invalid {
- return spannerTypeInvalid
- }
- switch kind {
- case reflect.Invalid:
- return spannerTypeInvalid
- case reflect.String:
- return spannerTypeNonNullString
- case reflect.Int64:
- return spannerTypeNonNullInt64
- case reflect.Bool:
- return spannerTypeNonNullBool
- case reflect.Float32:
- return spannerTypeNonNullFloat32
- case reflect.Float64:
- return spannerTypeNonNullFloat64
- case reflect.Ptr:
- t := val.Type()
- if t.ConvertibleTo(typeOfNullNumeric) {
- return spannerTypeNullNumeric
- }
- if t.ConvertibleTo(typeOfNullJSON) {
- return spannerTypeNullJSON
- }
- if t.ConvertibleTo(typeOfPGJsonB) {
- return spannerTypePGJsonB
- }
- case reflect.Struct:
- t := val.Type()
- if t.ConvertibleTo(typeOfNonNullNumeric) {
- return spannerTypeNonNullNumeric
- }
- if t.ConvertibleTo(typeOfNonNullTime) {
- return spannerTypeNonNullTime
- }
- if t.ConvertibleTo(typeOfNonNullDate) {
- return spannerTypeNonNullDate
- }
- if t.ConvertibleTo(typeOfNullString) {
- return spannerTypeNullString
- }
- if t.ConvertibleTo(typeOfNullInt64) {
- return spannerTypeNullInt64
- }
- if t.ConvertibleTo(typeOfNullBool) {
- return spannerTypeNullBool
- }
- if t.ConvertibleTo(typeOfNullFloat64) {
- return spannerTypeNullFloat64
- }
- if t.ConvertibleTo(typeOfNullFloat32) {
- return spannerTypeNullFloat32
- }
- if t.ConvertibleTo(typeOfNullTime) {
- return spannerTypeNullTime
- }
- if t.ConvertibleTo(typeOfNullDate) {
- return spannerTypeNullDate
- }
- if t.ConvertibleTo(typeOfNullNumeric) {
- return spannerTypeNullNumeric
- }
- if t.ConvertibleTo(typeOfNullJSON) {
- return spannerTypeNullJSON
- }
- if t.ConvertibleTo(typeOfPGNumeric) {
- return spannerTypePGNumeric
- }
- if t.ConvertibleTo(typeOfPGJsonB) {
- return spannerTypePGJsonB
- }
- case reflect.Slice:
- kind := val.Type().Elem().Kind()
- switch kind {
- case reflect.Invalid:
- return spannerTypeUnknown
- case reflect.String:
- return spannerTypeArrayOfNonNullString
- case reflect.Uint8:
- return spannerTypeByteArray
- case reflect.Int64:
- return spannerTypeArrayOfNonNullInt64
- case reflect.Bool:
- return spannerTypeArrayOfNonNullBool
- case reflect.Float64:
- return spannerTypeArrayOfNonNullFloat64
- case reflect.Float32:
- return spannerTypeArrayOfNonNullFloat32
- case reflect.Ptr:
- t := val.Type().Elem()
- if t.ConvertibleTo(typeOfNullNumeric) {
- return spannerTypeArrayOfNullNumeric
- }
- case reflect.Struct:
- t := val.Type().Elem()
- if t.ConvertibleTo(typeOfNonNullNumeric) {
- return spannerTypeArrayOfNonNullNumeric
- }
- if t.ConvertibleTo(typeOfNonNullTime) {
- return spannerTypeArrayOfNonNullTime
- }
- if t.ConvertibleTo(typeOfNonNullDate) {
- return spannerTypeArrayOfNonNullDate
- }
- if t.ConvertibleTo(typeOfNullString) {
- return spannerTypeArrayOfNullString
- }
- if t.ConvertibleTo(typeOfNullInt64) {
- return spannerTypeArrayOfNullInt64
- }
- if t.ConvertibleTo(typeOfNullBool) {
- return spannerTypeArrayOfNullBool
- }
- if t.ConvertibleTo(typeOfNullFloat64) {
- return spannerTypeArrayOfNullFloat64
- }
- if t.ConvertibleTo(typeOfNullFloat32) {
- return spannerTypeArrayOfNullFloat32
- }
- if t.ConvertibleTo(typeOfNullTime) {
- return spannerTypeArrayOfNullTime
- }
- if t.ConvertibleTo(typeOfNullDate) {
- return spannerTypeArrayOfNullDate
- }
- if t.ConvertibleTo(typeOfNullNumeric) {
- return spannerTypeArrayOfNullNumeric
- }
- if t.ConvertibleTo(typeOfNullJSON) {
- return spannerTypeArrayOfNullJSON
- }
- if t.ConvertibleTo(typeOfPGNumeric) {
- return spannerTypeArrayOfPGNumeric
- }
- if t.ConvertibleTo(typeOfPGJsonB) {
- return spannerTypeArrayOfPGJsonB
- }
- case reflect.Slice:
- // The only array-of-array type that is supported is [][]byte.
- kind := val.Type().Elem().Elem().Kind()
- switch kind {
- case reflect.Uint8:
- return spannerTypeArrayOfByteArray
- }
- }
- }
- // Not convertible to a known base type.
- return spannerTypeUnknown
-}
-
-// decodeValueToCustomType decodes a protobuf Value into a pointer to a Go
-// value. It must be possible to convert the value to the type pointed to by
-// the pointer.
-func (dsc decodableSpannerType) decodeValueToCustomType(v *proto3.Value, t *sppb.Type, acode sppb.TypeCode, atypeAnnotation sppb.TypeAnnotationCode, ptr interface{}) error {
- code := t.Code
- typeAnnotation := t.TypeAnnotation
- _, isNull := v.Kind.(*proto3.Value_NullValue)
- if dsc == spannerTypeInvalid {
- return errNilDst(ptr)
- }
- if isNull && !dsc.supportsNull() {
- return errDstNotForNull(ptr)
- }
-
- var result interface{}
- switch dsc {
- case spannerTypeNonNullString, spannerTypeNullString:
- if code != sppb.TypeCode_STRING {
- return errTypeMismatch(code, acode, ptr)
- }
- if isNull {
- result = &NullString{}
- break
- }
- x, err := getStringValue(v)
- if err != nil {
- return err
- }
- if dsc == spannerTypeNonNullString {
- result = &x
- } else {
- result = &NullString{x, !isNull}
- }
- case spannerTypeByteArray:
- if code != sppb.TypeCode_BYTES {
- return errTypeMismatch(code, acode, ptr)
- }
- if isNull {
- result = []byte(nil)
- break
- }
- x, err := getStringValue(v)
- if err != nil {
- return err
- }
- y, err := base64.StdEncoding.DecodeString(x)
- if err != nil {
- return errBadEncoding(v, err)
- }
- result = y
- case spannerTypeNonNullInt64, spannerTypeNullInt64:
- if code != sppb.TypeCode_INT64 {
- return errTypeMismatch(code, acode, ptr)
- }
- if isNull {
- result = &NullInt64{}
- break
- }
- x, err := getStringValue(v)
- if err != nil {
- return err
- }
- y, err := strconv.ParseInt(x, 10, 64)
- if err != nil {
- return errBadEncoding(v, err)
- }
- if dsc == spannerTypeNonNullInt64 {
- result = &y
- } else {
- result = &NullInt64{y, !isNull}
- }
- case spannerTypeNonNullBool, spannerTypeNullBool:
- if code != sppb.TypeCode_BOOL {
- return errTypeMismatch(code, acode, ptr)
- }
- if isNull {
- result = &NullBool{}
- break
- }
- x, err := getBoolValue(v)
- if err != nil {
- return err
- }
- if dsc == spannerTypeNonNullBool {
- result = &x
- } else {
- result = &NullBool{x, !isNull}
- }
- case spannerTypeNonNullFloat64, spannerTypeNullFloat64:
- if code != sppb.TypeCode_FLOAT64 {
- return errTypeMismatch(code, acode, ptr)
- }
- if isNull {
- result = &NullFloat64{}
- break
- }
- x, err := getFloat64Value(v)
- if err != nil {
- return err
- }
- if dsc == spannerTypeNonNullFloat64 {
- result = &x
- } else {
- result = &NullFloat64{x, !isNull}
- }
- case spannerTypeNonNullFloat32, spannerTypeNullFloat32:
- if code != sppb.TypeCode_FLOAT32 {
- return errTypeMismatch(code, acode, ptr)
- }
- if isNull {
- result = &NullFloat32{}
- break
- }
- x, err := getFloat32Value(v)
- if err != nil {
- return err
- }
- if dsc == spannerTypeNonNullFloat32 {
- result = &x
- } else {
- result = &NullFloat32{x, !isNull}
- }
- case spannerTypeNonNullNumeric, spannerTypeNullNumeric:
- if code != sppb.TypeCode_NUMERIC {
- return errTypeMismatch(code, acode, ptr)
- }
- if isNull {
- result = &NullNumeric{}
- break
- }
- x := v.GetStringValue()
- y, ok := (&big.Rat{}).SetString(x)
- if !ok {
- return errUnexpectedNumericStr(x)
- }
- if dsc == spannerTypeNonNullNumeric {
- result = y
- } else {
- result = &NullNumeric{*y, true}
- }
- case spannerTypePGNumeric:
- if code != sppb.TypeCode_NUMERIC || typeAnnotation != sppb.TypeAnnotationCode_PG_NUMERIC {
- return errTypeMismatch(code, acode, ptr)
- }
- if isNull {
- result = &PGNumeric{}
- break
- }
- result = &PGNumeric{v.GetStringValue(), true}
- case spannerTypeNullJSON:
- if code != sppb.TypeCode_JSON {
- return errTypeMismatch(code, acode, ptr)
- }
- if isNull {
- result = &NullJSON{}
- break
- }
- x := v.GetStringValue()
- var y interface{}
- err := jsonUnmarshal([]byte(x), &y)
- if err != nil {
- return err
- }
- result = &NullJSON{y, true}
- case spannerTypePGJsonB:
- if code != sppb.TypeCode_JSON || typeAnnotation != sppb.TypeAnnotationCode_PG_JSONB {
- return errTypeMismatch(code, acode, ptr)
- }
- if isNull {
- result = &PGJsonB{}
- break
- }
- x := v.GetStringValue()
- var y interface{}
- err := jsonUnmarshal([]byte(x), &y)
- if err != nil {
- return err
- }
- result = &PGJsonB{Value: y, Valid: true}
- case spannerTypeNonNullTime, spannerTypeNullTime:
- var nt NullTime
- err := parseNullTime(v, &nt, code, isNull)
- if err != nil {
- return err
- }
- if dsc == spannerTypeNonNullTime {
- result = &nt.Time
- } else {
- result = &nt
- }
- case spannerTypeNonNullDate, spannerTypeNullDate:
- if code != sppb.TypeCode_DATE {
- return errTypeMismatch(code, acode, ptr)
- }
- if isNull {
- result = &NullDate{}
- break
- }
- x, err := getStringValue(v)
- if err != nil {
- return err
- }
- y, err := civil.ParseDate(x)
- if err != nil {
- return errBadEncoding(v, err)
- }
- if dsc == spannerTypeNonNullDate {
- result = &y
- } else {
- result = &NullDate{y, !isNull}
- }
- case spannerTypeArrayOfNonNullString, spannerTypeArrayOfNullString:
- if acode != sppb.TypeCode_STRING {
- return errTypeMismatch(code, acode, ptr)
- }
- if isNull {
- ptr = nil
- return nil
- }
- x, err := getListValue(v)
- if err != nil {
- return err
- }
- y, err := decodeGenericArray(reflect.TypeOf(ptr).Elem(), x, stringType(), "STRING")
- if err != nil {
- return err
- }
- result = y
- case spannerTypeArrayOfByteArray:
- if acode != sppb.TypeCode_BYTES {
- return errTypeMismatch(code, acode, ptr)
- }
- if isNull {
- ptr = nil
- return nil
- }
- x, err := getListValue(v)
- if err != nil {
- return err
- }
- y, err := decodeGenericArray(reflect.TypeOf(ptr).Elem(), x, bytesType(), "BYTES")
- if err != nil {
- return err
- }
- result = y
- case spannerTypeArrayOfNonNullInt64, spannerTypeArrayOfNullInt64:
- if acode != sppb.TypeCode_INT64 {
- return errTypeMismatch(code, acode, ptr)
- }
- if isNull {
- ptr = nil
- return nil
- }
- x, err := getListValue(v)
- if err != nil {
- return err
- }
- y, err := decodeGenericArray(reflect.TypeOf(ptr).Elem(), x, intType(), "INT64")
- if err != nil {
- return err
- }
- result = y
- case spannerTypeArrayOfNonNullBool, spannerTypeArrayOfNullBool:
- if acode != sppb.TypeCode_BOOL {
- return errTypeMismatch(code, acode, ptr)
- }
- if isNull {
- ptr = nil
- return nil
- }
- x, err := getListValue(v)
- if err != nil {
- return err
- }
- y, err := decodeGenericArray(reflect.TypeOf(ptr).Elem(), x, boolType(), "BOOL")
- if err != nil {
- return err
- }
- result = y
- case spannerTypeArrayOfNonNullFloat32, spannerTypeArrayOfNullFloat32:
- if acode != sppb.TypeCode_FLOAT32 {
- return errTypeMismatch(code, acode, ptr)
- }
- if isNull {
- ptr = nil
- return nil
- }
- x, err := getListValue(v)
- if err != nil {
- return err
- }
- y, err := decodeGenericArray(reflect.TypeOf(ptr).Elem(), x, float32Type(), "FLOAT32")
- if err != nil {
- return err
- }
- result = y
- case spannerTypeArrayOfNonNullFloat64, spannerTypeArrayOfNullFloat64:
- if acode != sppb.TypeCode_FLOAT64 {
- return errTypeMismatch(code, acode, ptr)
- }
- if isNull {
- ptr = nil
- return nil
- }
- x, err := getListValue(v)
- if err != nil {
- return err
- }
- y, err := decodeGenericArray(reflect.TypeOf(ptr).Elem(), x, floatType(), "FLOAT64")
- if err != nil {
- return err
- }
- result = y
- case spannerTypeArrayOfNonNullNumeric, spannerTypeArrayOfNullNumeric:
- if acode != sppb.TypeCode_NUMERIC {
- return errTypeMismatch(code, acode, ptr)
- }
- if isNull {
- ptr = nil
- return nil
- }
- x, err := getListValue(v)
- if err != nil {
- return err
- }
- y, err := decodeGenericArray(reflect.TypeOf(ptr).Elem(), x, numericType(), "NUMERIC")
- if err != nil {
- return err
- }
- result = y
- case spannerTypeArrayOfPGNumeric:
- if acode != sppb.TypeCode_NUMERIC || atypeAnnotation != sppb.TypeAnnotationCode_PG_NUMERIC {
- return errTypeMismatch(code, acode, ptr)
- }
- if isNull {
- ptr = nil
- return nil
- }
- x, err := getListValue(v)
- if err != nil {
- return err
- }
- y, err := decodeGenericArray(reflect.TypeOf(ptr).Elem(), x, pgNumericType(), "PGNUMERIC")
- if err != nil {
- return err
- }
- result = y
- case spannerTypeArrayOfNullJSON:
- if acode != sppb.TypeCode_JSON {
- return errTypeMismatch(code, acode, ptr)
- }
- if isNull {
- ptr = nil
- return nil
- }
- x, err := getListValue(v)
- if err != nil {
- return err
- }
- y, err := decodeGenericArray(reflect.TypeOf(ptr).Elem(), x, jsonType(), "JSON")
- if err != nil {
- return err
- }
- result = y
- case spannerTypeArrayOfPGJsonB:
- if acode != sppb.TypeCode_JSON || atypeAnnotation != sppb.TypeAnnotationCode_PG_JSONB {
- return errTypeMismatch(code, acode, ptr)
- }
- if isNull {
- ptr = nil
- return nil
- }
- x, err := getListValue(v)
- if err != nil {
- return err
- }
- y, err := decodeGenericArray(reflect.TypeOf(ptr).Elem(), x, pgJsonbType(), "PGJSONB")
- if err != nil {
- return err
- }
- result = y
- case spannerTypeArrayOfNonNullTime, spannerTypeArrayOfNullTime:
- if acode != sppb.TypeCode_TIMESTAMP {
- return errTypeMismatch(code, acode, ptr)
- }
- if isNull {
- ptr = nil
- return nil
- }
- x, err := getListValue(v)
- if err != nil {
- return err
- }
- y, err := decodeGenericArray(reflect.TypeOf(ptr).Elem(), x, timeType(), "TIMESTAMP")
- if err != nil {
- return err
- }
- result = y
- case spannerTypeArrayOfNonNullDate, spannerTypeArrayOfNullDate:
- if acode != sppb.TypeCode_DATE {
- return errTypeMismatch(code, acode, ptr)
- }
- if isNull {
- ptr = nil
- return nil
- }
- x, err := getListValue(v)
- if err != nil {
- return err
- }
- y, err := decodeGenericArray(reflect.TypeOf(ptr).Elem(), x, dateType(), "DATE")
- if err != nil {
- return err
- }
- result = y
- default:
- // This should not be possible.
- return fmt.Errorf("unknown decodable type found: %v", dsc)
- }
- source := reflect.Indirect(reflect.ValueOf(result))
- destination := reflect.Indirect(reflect.ValueOf(ptr))
- destination.Set(source.Convert(destination.Type()))
- return nil
-}
-
-// errSrvVal returns an error for getting a wrong source protobuf value in decoding.
-func errSrcVal(v *proto3.Value, want string) error {
- return spannerErrorf(codes.FailedPrecondition, "cannot use %v(Kind: %T) as %s Value",
- v, v.GetKind(), want)
-}
-
-// getIntegerFromStringValue returns the integer value of the string value encoded in proto3.Value v
-func getIntegerFromStringValue(v *proto3.Value) (int64, error) {
- x, err := getStringValue(v)
- if err != nil {
- return 0, err
- }
- y, err := strconv.ParseInt(x, 10, 64)
- if err != nil {
- return 0, errBadEncoding(v, err)
- }
- return y, nil
-}
-
-// getBytesFromStringValue returns the bytes value of the string value encoded in proto3.Value v
-func getBytesFromStringValue(v *proto3.Value) ([]byte, error) {
- x, err := getStringValue(v)
- if err != nil {
- return nil, err
- }
- y, err := base64.StdEncoding.DecodeString(x)
- if err != nil {
- return nil, errBadEncoding(v, err)
- }
- return y, nil
-}
-
-// getStringValue returns the string value encoded in proto3.Value v whose
-// kind is proto3.Value_StringValue.
-func getStringValue(v *proto3.Value) (string, error) {
- if x, ok := v.GetKind().(*proto3.Value_StringValue); ok && x != nil {
- return x.StringValue, nil
- }
- return "", errSrcVal(v, "String")
-}
-
-// getBoolValue returns the bool value encoded in proto3.Value v whose
-// kind is proto3.Value_BoolValue.
-func getBoolValue(v *proto3.Value) (bool, error) {
- if x, ok := v.GetKind().(*proto3.Value_BoolValue); ok && x != nil {
- return x.BoolValue, nil
- }
- return false, errSrcVal(v, "Bool")
-}
-
-// getListValue returns the proto3.ListValue contained in proto3.Value v whose
-// kind is proto3.Value_ListValue.
-func getListValue(v *proto3.Value) (*proto3.ListValue, error) {
- if x, ok := v.GetKind().(*proto3.Value_ListValue); ok && x != nil {
- return x.ListValue, nil
- }
- return nil, errSrcVal(v, "List")
-}
-
-// getGenericValue returns the interface{} value encoded in proto3.Value.
-func getGenericValue(t *sppb.Type, v *proto3.Value) (interface{}, error) {
- switch x := v.GetKind().(type) {
- case *proto3.Value_NumberValue:
- return x.NumberValue, nil
- case *proto3.Value_BoolValue:
- return x.BoolValue, nil
- case *proto3.Value_StringValue:
- return x.StringValue, nil
- case *proto3.Value_ListValue:
- return x.ListValue, nil
- case *proto3.Value_NullValue:
- return getTypedNil(t)
- default:
- return 0, errSrcVal(v, "Number, Bool, String, List")
- }
-}
-
-func getTypedNil(t *sppb.Type) (interface{}, error) {
- switch t.Code {
- case sppb.TypeCode_FLOAT64:
- var f *float64
- return f, nil
- case sppb.TypeCode_BOOL:
- var b *bool
- return b, nil
- default:
- // The encoding for most types is string, except for the ones listed
- // above.
- var s *string
- return s, nil
- }
-}
-
-// errUnexpectedNumericStr returns error for decoder getting an unexpected
-// string for representing special numeric values.
-func errUnexpectedNumericStr(s string) error {
- return spannerErrorf(codes.FailedPrecondition, "unexpected string value %q for numeric number", s)
-}
-
-// errUnexpectedFloat64Str returns error for decoder getting an unexpected
-// string for representing special float values.
-func errUnexpectedFloat64Str(s string) error {
- return spannerErrorf(codes.FailedPrecondition, "unexpected string value %q for float64 number", s)
-}
-
-// getFloat64Value returns the float64 value encoded in proto3.Value v whose
-// kind is proto3.Value_NumberValue / proto3.Value_StringValue.
-// Cloud Spanner uses string to encode NaN, Infinity and -Infinity.
-func getFloat64Value(v *proto3.Value) (float64, error) {
- switch x := v.GetKind().(type) {
- case *proto3.Value_NumberValue:
- if x == nil {
- break
- }
- return x.NumberValue, nil
- case *proto3.Value_StringValue:
- if x == nil {
- break
- }
- switch x.StringValue {
- case "NaN":
- return math.NaN(), nil
- case "Infinity":
- return math.Inf(1), nil
- case "-Infinity":
- return math.Inf(-1), nil
- default:
- return 0, errUnexpectedFloat64Str(x.StringValue)
- }
- }
- return 0, errSrcVal(v, "Number")
-}
-
-// errUnexpectedFloat32Str returns error for decoder getting an unexpected
-// string for representing special float values.
-func errUnexpectedFloat32Str(s string) error {
- return spannerErrorf(codes.FailedPrecondition, "unexpected string value %q for float32 number", s)
-}
-
-// getFloat32Value returns the float32 value encoded in proto3.Value v whose
-// kind is proto3.Value_NumberValue / proto3.Value_StringValue.
-// Cloud Spanner uses string to encode NaN, Infinity and -Infinity.
-func getFloat32Value(v *proto3.Value) (float32, error) {
- switch x := v.GetKind().(type) {
- case *proto3.Value_NumberValue:
- if x == nil {
- break
- }
- return float32(x.NumberValue), nil
- case *proto3.Value_StringValue:
- if x == nil {
- break
- }
- switch x.StringValue {
- case "NaN":
- return float32(math.NaN()), nil
- case "Infinity":
- return float32(math.Inf(1)), nil
- case "-Infinity":
- return float32(math.Inf(-1)), nil
- default:
- return 0, errUnexpectedFloat32Str(x.StringValue)
- }
- }
- return 0, errSrcVal(v, "Number")
-}
-
-// errNilListValue returns error for unexpected nil ListValue in decoding Cloud Spanner ARRAYs.
-func errNilListValue(sqlType string) error {
- return spannerErrorf(codes.FailedPrecondition, "unexpected nil ListValue in decoding %v array", sqlType)
-}
-
-// errDecodeArrayElement returns error for failure in decoding single array element.
-func errDecodeArrayElement(i int, v proto.Message, sqlType string, err error) error {
- var se *Error
- if !errorAs(err, &se) {
- return spannerErrorf(codes.Unknown,
- "cannot decode %v(array element %v) as %v, error = <%v>", v, i, sqlType, err)
- }
- se.decorate(fmt.Sprintf("cannot decode %v(array element %v) as %v", v, i, sqlType))
- return se
-}
-
-// decodeGenericArray decodes proto3.ListValue pb into a slice which type is
-// determined through reflection.
-func decodeGenericArray(tp reflect.Type, pb *proto3.ListValue, t *sppb.Type, sqlType string) (interface{}, error) {
- if pb == nil {
- return nil, errNilListValue(sqlType)
- }
- a := reflect.MakeSlice(tp, len(pb.Values), len(pb.Values))
- for i, v := range pb.Values {
- if err := decodeValue(v, t, a.Index(i).Addr().Interface()); err != nil {
- return nil, errDecodeArrayElement(i, v, "STRING", err)
- }
- }
- return a.Interface(), nil
-}
-
-// decodeNullStringArray decodes proto3.ListValue pb into a NullString slice.
-func decodeNullStringArray(pb *proto3.ListValue) ([]NullString, error) {
- if pb == nil {
- return nil, errNilListValue("STRING")
- }
- a := make([]NullString, len(pb.Values))
- for i, v := range pb.Values {
- if err := decodeValue(v, stringType(), &a[i]); err != nil {
- return nil, errDecodeArrayElement(i, v, "STRING", err)
- }
- }
- return a, nil
-}
-
-// decodeStringPointerArray decodes proto3.ListValue pb into a *string slice.
-func decodeStringPointerArray(pb *proto3.ListValue) ([]*string, error) {
- if pb == nil {
- return nil, errNilListValue("STRING")
- }
- a := make([]*string, len(pb.Values))
- for i, v := range pb.Values {
- if err := decodeValue(v, stringType(), &a[i]); err != nil {
- return nil, errDecodeArrayElement(i, v, "STRING", err)
- }
- }
- return a, nil
-}
-
-// decodeStringArray decodes proto3.ListValue pb into a string slice.
-func decodeStringArray(pb *proto3.ListValue) ([]string, error) {
- if pb == nil {
- return nil, errNilListValue("STRING")
- }
- a := make([]string, len(pb.Values))
- st := stringType()
- for i, v := range pb.Values {
- if err := decodeValue(v, st, &a[i]); err != nil {
- return nil, errDecodeArrayElement(i, v, "STRING", err)
- }
- }
- return a, nil
-}
-
-// decodeNullInt64Array decodes proto3.ListValue pb into a NullInt64 slice.
-func decodeNullInt64Array(pb *proto3.ListValue) ([]NullInt64, error) {
- if pb == nil {
- return nil, errNilListValue("INT64")
- }
- a := make([]NullInt64, len(pb.Values))
- for i, v := range pb.Values {
- if err := decodeValue(v, intType(), &a[i]); err != nil {
- return nil, errDecodeArrayElement(i, v, "INT64", err)
- }
- }
- return a, nil
-}
-
-// decodeInt64PointerArray decodes proto3.ListValue pb into a *int64 slice.
-func decodeInt64PointerArray(pb *proto3.ListValue) ([]*int64, error) {
- if pb == nil {
- return nil, errNilListValue("INT64")
- }
- a := make([]*int64, len(pb.Values))
- for i, v := range pb.Values {
- if err := decodeValue(v, intType(), &a[i]); err != nil {
- return nil, errDecodeArrayElement(i, v, "INT64", err)
- }
- }
- return a, nil
-}
-
-// decodeInt64Array decodes proto3.ListValue pb into a int64 slice.
-func decodeInt64Array(pb *proto3.ListValue) ([]int64, error) {
- if pb == nil {
- return nil, errNilListValue("INT64")
- }
- a := make([]int64, len(pb.Values))
- for i, v := range pb.Values {
- if err := decodeValue(v, intType(), &a[i]); err != nil {
- return nil, errDecodeArrayElement(i, v, "INT64", err)
- }
- }
- return a, nil
-}
-
-// decodeNullBoolArray decodes proto3.ListValue pb into a NullBool slice.
-func decodeNullBoolArray(pb *proto3.ListValue) ([]NullBool, error) {
- if pb == nil {
- return nil, errNilListValue("BOOL")
- }
- a := make([]NullBool, len(pb.Values))
- for i, v := range pb.Values {
- if err := decodeValue(v, boolType(), &a[i]); err != nil {
- return nil, errDecodeArrayElement(i, v, "BOOL", err)
- }
- }
- return a, nil
-}
-
-// decodeBoolPointerArray decodes proto3.ListValue pb into a *bool slice.
-func decodeBoolPointerArray(pb *proto3.ListValue) ([]*bool, error) {
- if pb == nil {
- return nil, errNilListValue("BOOL")
- }
- a := make([]*bool, len(pb.Values))
- for i, v := range pb.Values {
- if err := decodeValue(v, boolType(), &a[i]); err != nil {
- return nil, errDecodeArrayElement(i, v, "BOOL", err)
- }
- }
- return a, nil
-}
-
-// decodeBoolArray decodes proto3.ListValue pb into a bool slice.
-func decodeBoolArray(pb *proto3.ListValue) ([]bool, error) {
- if pb == nil {
- return nil, errNilListValue("BOOL")
- }
- a := make([]bool, len(pb.Values))
- for i, v := range pb.Values {
- if err := decodeValue(v, boolType(), &a[i]); err != nil {
- return nil, errDecodeArrayElement(i, v, "BOOL", err)
- }
- }
- return a, nil
-}
-
-// decodeNullFloat64Array decodes proto3.ListValue pb into a NullFloat64 slice.
-func decodeNullFloat64Array(pb *proto3.ListValue) ([]NullFloat64, error) {
- if pb == nil {
- return nil, errNilListValue("FLOAT64")
- }
- a := make([]NullFloat64, len(pb.Values))
- for i, v := range pb.Values {
- if err := decodeValue(v, floatType(), &a[i]); err != nil {
- return nil, errDecodeArrayElement(i, v, "FLOAT64", err)
- }
- }
- return a, nil
-}
-
-// decodeFloat64PointerArray decodes proto3.ListValue pb into a *float slice.
-func decodeFloat64PointerArray(pb *proto3.ListValue) ([]*float64, error) {
- if pb == nil {
- return nil, errNilListValue("FLOAT64")
- }
- a := make([]*float64, len(pb.Values))
- for i, v := range pb.Values {
- if err := decodeValue(v, floatType(), &a[i]); err != nil {
- return nil, errDecodeArrayElement(i, v, "FLOAT64", err)
- }
- }
- return a, nil
-}
-
-// decodeFloat64Array decodes proto3.ListValue pb into a float64 slice.
-func decodeFloat64Array(pb *proto3.ListValue) ([]float64, error) {
- if pb == nil {
- return nil, errNilListValue("FLOAT64")
- }
- a := make([]float64, len(pb.Values))
- for i, v := range pb.Values {
- if err := decodeValue(v, floatType(), &a[i]); err != nil {
- return nil, errDecodeArrayElement(i, v, "FLOAT64", err)
- }
- }
- return a, nil
-}
-
-// decodeNullFloat32Array decodes proto3.ListValue pb into a NullFloat32 slice.
-func decodeNullFloat32Array(pb *proto3.ListValue) ([]NullFloat32, error) {
- if pb == nil {
- return nil, errNilListValue("FLOAT32")
- }
- a := make([]NullFloat32, len(pb.Values))
- for i, v := range pb.Values {
- if err := decodeValue(v, float32Type(), &a[i]); err != nil {
- return nil, errDecodeArrayElement(i, v, "FLOAT32", err)
- }
- }
- return a, nil
-}
-
-// decodeFloat32PointerArray decodes proto3.ListValue pb into a *float32 slice.
-func decodeFloat32PointerArray(pb *proto3.ListValue) ([]*float32, error) {
- if pb == nil {
- return nil, errNilListValue("FLOAT32")
- }
- a := make([]*float32, len(pb.Values))
- for i, v := range pb.Values {
- if err := decodeValue(v, float32Type(), &a[i]); err != nil {
- return nil, errDecodeArrayElement(i, v, "FLOAT32", err)
- }
- }
- return a, nil
-}
-
-// decodeFloat32Array decodes proto3.ListValue pb into a float32 slice.
-func decodeFloat32Array(pb *proto3.ListValue) ([]float32, error) {
- if pb == nil {
- return nil, errNilListValue("FLOAT32")
- }
- a := make([]float32, len(pb.Values))
- for i, v := range pb.Values {
- if err := decodeValue(v, float32Type(), &a[i]); err != nil {
- return nil, errDecodeArrayElement(i, v, "FLOAT32", err)
- }
- }
- return a, nil
-}
-
-// decodeNullNumericArray decodes proto3.ListValue pb into a NullNumeric slice.
-func decodeNullNumericArray(pb *proto3.ListValue) ([]NullNumeric, error) {
- if pb == nil {
- return nil, errNilListValue("NUMERIC")
- }
- a := make([]NullNumeric, len(pb.Values))
- for i, v := range pb.Values {
- if err := decodeValue(v, numericType(), &a[i]); err != nil {
- return nil, errDecodeArrayElement(i, v, "NUMERIC", err)
- }
- }
- return a, nil
-}
-
-// decodeNullJSONArray decodes proto3.ListValue pb into a NullJSON slice.
-func decodeNullJSONArray(pb *proto3.ListValue) ([]NullJSON, error) {
- if pb == nil {
- return nil, errNilListValue("JSON")
- }
- a := make([]NullJSON, len(pb.Values))
- for i, v := range pb.Values {
- if err := decodeValue(v, jsonType(), &a[i]); err != nil {
- return nil, errDecodeArrayElement(i, v, "JSON", err)
- }
- }
- return a, nil
-}
-
-// decodeJsonBArray decodes proto3.ListValue pb into a JsonB slice.
-func decodePGJsonBArray(pb *proto3.ListValue) ([]PGJsonB, error) {
- if pb == nil {
- return nil, errNilListValue("PGJSONB")
- }
- a := make([]PGJsonB, len(pb.Values))
- for i, v := range pb.Values {
- if err := decodeValue(v, pgJsonbType(), &a[i]); err != nil {
- return nil, errDecodeArrayElement(i, v, "PGJSONB", err)
- }
- }
- return a, nil
-}
-
-// decodeNullJSONArray decodes proto3.ListValue pb into a NullJSON pointer.
-func decodeNullJSONArrayToNullJSON(pb *proto3.ListValue) (*NullJSON, error) {
- if pb == nil {
- return nil, errNilListValue("JSON")
- }
- strs := []string{}
- for _, v := range pb.Values {
- if _, ok := v.Kind.(*proto3.Value_NullValue); ok {
- strs = append(strs, "null")
- } else {
- strs = append(strs, v.GetStringValue())
- }
- }
- s := fmt.Sprintf("[%s]", strings.Join(strs, ","))
- var y interface{}
- err := jsonUnmarshal([]byte(s), &y)
- if err != nil {
- return nil, err
- }
- return &NullJSON{y, true}, nil
-}
-
-// decodeNumericPointerArray decodes proto3.ListValue pb into a *big.Rat slice.
-func decodeNumericPointerArray(pb *proto3.ListValue) ([]*big.Rat, error) {
- if pb == nil {
- return nil, errNilListValue("NUMERIC")
- }
- a := make([]*big.Rat, len(pb.Values))
- for i, v := range pb.Values {
- if err := decodeValue(v, numericType(), &a[i]); err != nil {
- return nil, errDecodeArrayElement(i, v, "NUMERIC", err)
- }
- }
- return a, nil
-}
-
-// decodeNumericArray decodes proto3.ListValue pb into a big.Rat slice.
-func decodeNumericArray(pb *proto3.ListValue) ([]big.Rat, error) {
- if pb == nil {
- return nil, errNilListValue("NUMERIC")
- }
- a := make([]big.Rat, len(pb.Values))
- for i, v := range pb.Values {
- if err := decodeValue(v, numericType(), &a[i]); err != nil {
- return nil, errDecodeArrayElement(i, v, "NUMERIC", err)
- }
- }
- return a, nil
-}
-
-// decodePGNumericArray decodes proto3.ListValue pb into a PGNumeric slice.
-func decodePGNumericArray(pb *proto3.ListValue) ([]PGNumeric, error) {
- if pb == nil {
- return nil, errNilListValue("PGNUMERIC")
- }
- a := make([]PGNumeric, len(pb.Values))
- for i, v := range pb.Values {
- if err := decodeValue(v, pgNumericType(), &a[i]); err != nil {
- return nil, errDecodeArrayElement(i, v, "PGNUMERIC", err)
- }
- }
- return a, nil
-}
-
-// decodeByteArray decodes proto3.ListValue pb into a slice of byte slice.
-func decodeByteArray(pb *proto3.ListValue) ([][]byte, error) {
- if pb == nil {
- return nil, errNilListValue("BYTES")
- }
- a := make([][]byte, len(pb.Values))
- for i, v := range pb.Values {
- if err := decodeValue(v, bytesType(), &a[i]); err != nil {
- return nil, errDecodeArrayElement(i, v, "BYTES", err)
- }
- }
- return a, nil
-}
-
-// decodeProtoMessagePtrArray decodes proto3.ListValue pb into a *proto.Message slice.
-// The elements in the array implements proto.Message interface only if the element is a pointer (e.g. *ProtoMessage).
-// However, if the element is a value (e.g. ProtoMessage), then it does not implement proto.Message.
-// Therefore, decodeProtoMessagePtrArray allows decoding of proto message array if the array element is a pointer only.
-func decodeProtoMessagePtrArray(pb *proto3.ListValue, t *sppb.Type, rv reflect.Value) error {
- if pb == nil {
- return errNilListValue("PROTO")
- }
- etyp := rv.Type().Elem().Elem().Elem()
- a := reflect.MakeSlice(rv.Type().Elem(), len(pb.Values), len(pb.Values))
- for i, v := range pb.Values {
- _, isNull := v.Kind.(*proto3.Value_NullValue)
- if isNull {
- continue
- }
- msg := reflect.New(etyp).Interface().(proto.Message)
- if err := decodeValue(v, t, msg); err != nil {
- return errDecodeArrayElement(i, v, "PROTO", err)
- }
- a.Index(i).Set(reflect.ValueOf(msg))
- }
- rv.Elem().Set(a)
- return nil
-}
-
-// decodeProtoEnumPtrArray decodes proto3.ListValue pb into a *protoreflect.Enum slice.
-func decodeProtoEnumPtrArray(pb *proto3.ListValue, t *sppb.Type, rv reflect.Value) error {
- if pb == nil {
- return errNilListValue("ENUM")
- }
- etyp := rv.Type().Elem().Elem().Elem()
- a := reflect.MakeSlice(rv.Type().Elem(), len(pb.Values), len(pb.Values))
- for i, v := range pb.Values {
- _, isNull := v.Kind.(*proto3.Value_NullValue)
- if isNull {
- continue
- }
- enum := reflect.New(etyp).Interface().(protoreflect.Enum)
- if err := decodeValue(v, t, enum); err != nil {
- return errDecodeArrayElement(i, v, "ENUM", err)
- }
- a.Index(i).Set(reflect.ValueOf(enum))
- }
- rv.Elem().Set(a)
- return nil
-}
-
-// decodeProtoEnumArray decodes proto3.ListValue pb into a protoreflect.Enum slice.
-func decodeProtoEnumArray(pb *proto3.ListValue, t *sppb.Type, rv reflect.Value, ptr interface{}) error {
- if pb == nil {
- return errNilListValue("ENUM")
- }
- a := reflect.MakeSlice(rv.Type().Elem(), len(pb.Values), len(pb.Values))
- // decodeValue method can decode only if ENUM is a pointer type.
- // As the ENUM element in the Array is not a pointer type we cannot use decodeValue method
- // and hence handle it separately.
- for i, v := range pb.Values {
- _, isNull := v.Kind.(*proto3.Value_NullValue)
- // As the ENUM elements in the array are value type and not pointer type,
- // we cannot support NULL values in the array
- if isNull {
- return errNilNotAllowed(ptr, "*[]*protoreflect.Enum")
- }
- x, err := getStringValue(v)
- if err != nil {
- return err
- }
- y, err := strconv.ParseInt(x, 10, 64)
- if err != nil {
- return errBadEncoding(v, err)
- }
- a.Index(i).SetInt(y)
- }
- rv.Elem().Set(a)
- return nil
-}
-
-// decodeNullTimeArray decodes proto3.ListValue pb into a NullTime slice.
-func decodeNullTimeArray(pb *proto3.ListValue) ([]NullTime, error) {
- if pb == nil {
- return nil, errNilListValue("TIMESTAMP")
- }
- a := make([]NullTime, len(pb.Values))
- for i, v := range pb.Values {
- if err := decodeValue(v, timeType(), &a[i]); err != nil {
- return nil, errDecodeArrayElement(i, v, "TIMESTAMP", err)
- }
- }
- return a, nil
-}
-
-// decodeTimePointerArray decodes proto3.ListValue pb into a NullTime slice.
-func decodeTimePointerArray(pb *proto3.ListValue) ([]*time.Time, error) {
- if pb == nil {
- return nil, errNilListValue("TIMESTAMP")
- }
- a := make([]*time.Time, len(pb.Values))
- for i, v := range pb.Values {
- if err := decodeValue(v, timeType(), &a[i]); err != nil {
- return nil, errDecodeArrayElement(i, v, "TIMESTAMP", err)
- }
- }
- return a, nil
-}
-
-// decodeTimeArray decodes proto3.ListValue pb into a time.Time slice.
-func decodeTimeArray(pb *proto3.ListValue) ([]time.Time, error) {
- if pb == nil {
- return nil, errNilListValue("TIMESTAMP")
- }
- a := make([]time.Time, len(pb.Values))
- for i, v := range pb.Values {
- if err := decodeValue(v, timeType(), &a[i]); err != nil {
- return nil, errDecodeArrayElement(i, v, "TIMESTAMP", err)
- }
- }
- return a, nil
-}
-
-// decodeNullDateArray decodes proto3.ListValue pb into a NullDate slice.
-func decodeNullDateArray(pb *proto3.ListValue) ([]NullDate, error) {
- if pb == nil {
- return nil, errNilListValue("DATE")
- }
- a := make([]NullDate, len(pb.Values))
- for i, v := range pb.Values {
- if err := decodeValue(v, dateType(), &a[i]); err != nil {
- return nil, errDecodeArrayElement(i, v, "DATE", err)
- }
- }
- return a, nil
-}
-
-// decodeDatePointerArray decodes proto3.ListValue pb into a *civil.Date slice.
-func decodeDatePointerArray(pb *proto3.ListValue) ([]*civil.Date, error) {
- if pb == nil {
- return nil, errNilListValue("DATE")
- }
- a := make([]*civil.Date, len(pb.Values))
- for i, v := range pb.Values {
- if err := decodeValue(v, dateType(), &a[i]); err != nil {
- return nil, errDecodeArrayElement(i, v, "DATE", err)
- }
- }
- return a, nil
-}
-
-// decodeDateArray decodes proto3.ListValue pb into a civil.Date slice.
-func decodeDateArray(pb *proto3.ListValue) ([]civil.Date, error) {
- if pb == nil {
- return nil, errNilListValue("DATE")
- }
- a := make([]civil.Date, len(pb.Values))
- for i, v := range pb.Values {
- if err := decodeValue(v, dateType(), &a[i]); err != nil {
- return nil, errDecodeArrayElement(i, v, "DATE", err)
- }
- }
- return a, nil
-}
-
-func errNotStructElement(i int, v *proto3.Value) error {
- return errDecodeArrayElement(i, v, "STRUCT",
- spannerErrorf(codes.FailedPrecondition, "%v(type: %T) doesn't encode Cloud Spanner STRUCT", v, v))
-}
-
-// decodeRowArray decodes proto3.ListValue pb into a NullRow slice according to
-// the structural information given in sppb.StructType ty.
-func decodeRowArray(ty *sppb.StructType, pb *proto3.ListValue) ([]NullRow, error) {
- if pb == nil {
- return nil, errNilListValue("STRUCT")
- }
- a := make([]NullRow, len(pb.Values))
- for i := range pb.Values {
- switch v := pb.Values[i].GetKind().(type) {
- case *proto3.Value_ListValue:
- a[i] = NullRow{
- Row: Row{
- fields: ty.Fields,
- vals: v.ListValue.Values,
- },
- Valid: true,
- }
- // Null elements not currently supported by the server, see
- // https://cloud.google.com/spanner/docs/query-syntax#using-structs-with-select
- case *proto3.Value_NullValue:
- // no-op, a[i] is NullRow{} already
- default:
- return nil, errNotStructElement(i, pb.Values[i])
- }
- }
- return a, nil
-}
-
-// errNilSpannerStructType returns error for unexpected nil Cloud Spanner STRUCT
-// schema type in decoding.
-func errNilSpannerStructType() error {
- return spannerErrorf(codes.FailedPrecondition, "unexpected nil StructType in decoding Cloud Spanner STRUCT")
-}
-
-// errDupGoField returns error for duplicated Go STRUCT field names
-func errDupGoField(s interface{}, name string) error {
- return spannerErrorf(codes.InvalidArgument, "Go struct %+v(type %T) has duplicate fields for GO STRUCT field %s", s, s, name)
-}
-
-// errUnnamedField returns error for decoding a Cloud Spanner STRUCT with
-// unnamed field into a Go struct.
-func errUnnamedField(ty *sppb.StructType, i int) error {
- return spannerErrorf(codes.InvalidArgument, "unnamed field %v in Cloud Spanner STRUCT %+v", i, ty)
-}
-
-// errNoOrDupGoField returns error for decoding a Cloud Spanner
-// STRUCT into a Go struct which is either missing a field, or has duplicate
-// fields.
-func errNoOrDupGoField(s interface{}, f string) error {
- return spannerErrorf(codes.InvalidArgument, "Go struct %+v(type %T) has no or duplicate fields for Cloud Spanner STRUCT field %v", s, s, f)
-}
-
-// errDupColNames returns error for duplicated Cloud Spanner STRUCT field names
-// found in decoding a Cloud Spanner STRUCT into a Go struct.
-func errDupSpannerField(f string, ty *sppb.StructType) error {
- return spannerErrorf(codes.InvalidArgument, "duplicated field name %q in Cloud Spanner STRUCT %+v", f, ty)
-}
-
-// errDecodeStructField returns error for failure in decoding a single field of
-// a Cloud Spanner STRUCT.
-func errDecodeStructField(ty *sppb.StructType, f string, err error) error {
- var se *Error
- if !errorAs(err, &se) {
- return spannerErrorf(codes.Unknown,
- "cannot decode field %v of Cloud Spanner STRUCT %+v, error = <%v>", f, ty, err)
- }
- se.decorate(fmt.Sprintf("cannot decode field %v of Cloud Spanner STRUCT %+v", f, ty))
- return se
-}
-
-// decodeSetting contains all the settings for decoding from spanner struct
-type decodeSetting struct {
- Lenient bool
-}
-
-// DecodeOptions is the interface to change decode struct settings
-type DecodeOptions interface {
- Apply(s *decodeSetting)
-}
-
-type withLenient struct{ lenient bool }
-
-func (w withLenient) Apply(s *decodeSetting) {
- s.Lenient = w.lenient
-}
-
-// WithLenient returns a DecodeOptions that allows decoding into a struct with missing fields in database.
-func WithLenient() DecodeOptions {
- return withLenient{lenient: true}
-}
-
-// decodeStruct decodes proto3.ListValue pb into struct referenced by pointer
-// ptr, according to
-// the structural information given in sppb.StructType ty.
-func decodeStruct(ty *sppb.StructType, pb *proto3.ListValue, ptr interface{}, lenient bool) error {
- if reflect.ValueOf(ptr).IsNil() {
- return errNilDst(ptr)
- }
- if ty == nil {
- return errNilSpannerStructType()
- }
- // t holds the structural information of ptr.
- t := reflect.TypeOf(ptr).Elem()
- // v is the actual value that ptr points to.
- v := reflect.ValueOf(ptr).Elem()
-
- fields, err := fieldCache.Fields(t)
- if err != nil {
- return ToSpannerError(err)
- }
- // return error if lenient is true and destination has duplicate exported columns
- if lenient {
- fieldNames := getAllFieldNames(v)
- for _, f := range fieldNames {
- if fields.Match(f) == nil {
- return errDupGoField(ptr, f)
- }
- }
- }
- seen := map[string]bool{}
- for i, f := range ty.Fields {
- if f.Name == "" {
- return errUnnamedField(ty, i)
- }
- sf := fields.Match(f.Name)
- if sf == nil {
- if lenient {
- continue
- }
- return errNoOrDupGoField(ptr, f.Name)
- }
- if seen[f.Name] {
- // We don't allow duplicated field name.
- return errDupSpannerField(f.Name, ty)
- }
- opts := []DecodeOptions{withLenient{lenient: lenient}}
- // Try to decode a single field.
- if err := decodeValue(pb.Values[i], f.Type, v.FieldByIndex(sf.Index).Addr().Interface(), opts...); err != nil {
- return errDecodeStructField(ty, f.Name, err)
- }
- // Mark field f.Name as processed.
- seen[f.Name] = true
- }
- return nil
-}
-
-// isPtrStructPtrSlice returns true if ptr is a pointer to a slice of struct pointers.
-func isPtrStructPtrSlice(t reflect.Type) bool {
- if t.Kind() != reflect.Ptr || t.Elem().Kind() != reflect.Slice {
- // t is not a pointer to a slice.
- return false
- }
- if t = t.Elem(); t.Elem().Kind() != reflect.Ptr || t.Elem().Elem().Kind() != reflect.Struct {
- // the slice that t points to is not a slice of struct pointers.
- return false
- }
- return true
-}
-
-// decodeStructArray decodes proto3.ListValue pb into struct slice referenced by
-// pointer ptr, according to the
-// structural information given in a sppb.StructType.
-func decodeStructArray(ty *sppb.StructType, pb *proto3.ListValue, ptr interface{}, lenient bool) error {
- if pb == nil {
- return errNilListValue("STRUCT")
- }
- // Type of the struct pointers stored in the slice that ptr points to.
- ts := reflect.TypeOf(ptr).Elem().Elem()
- // The slice that ptr points to, might be nil at this point.
- v := reflect.ValueOf(ptr).Elem()
- // Allocate empty slice.
- v.Set(reflect.MakeSlice(v.Type(), 0, len(pb.Values)))
- // Decode every struct in pb.Values.
- for i, pv := range pb.Values {
- // Check if pv is a NULL value.
- if _, isNull := pv.Kind.(*proto3.Value_NullValue); isNull {
- // Append a nil pointer to the slice.
- v.Set(reflect.Append(v, reflect.New(ts).Elem()))
- continue
- }
- // Allocate empty struct.
- s := reflect.New(ts.Elem())
- // Get proto3.ListValue l from proto3.Value pv.
- l, err := getListValue(pv)
- if err != nil {
- return errDecodeArrayElement(i, pv, "STRUCT", err)
- }
- // Decode proto3.ListValue l into struct referenced by s.Interface().
- if err = decodeStruct(ty, l, s.Interface(), lenient); err != nil {
- return errDecodeArrayElement(i, pv, "STRUCT", err)
- }
- // Append the decoded struct back into the slice.
- v.Set(reflect.Append(v, s))
- }
- return nil
-}
-
-func getAllFieldNames(v reflect.Value) []string {
- var names []string
- typeOfT := v.Type()
- for i := 0; i < v.NumField(); i++ {
- f := v.Field(i)
- fieldType := typeOfT.Field(i)
- exported := (fieldType.PkgPath == "")
- // If a named field is unexported, ignore it. An anonymous
- // unexported field is processed, because it may contain
- // exported fields, which are visible.
- if !exported && !fieldType.Anonymous {
- continue
- }
- if f.Kind() == reflect.Struct {
- if fieldType.Anonymous {
- names = append(names, getAllFieldNames(reflect.ValueOf(f.Interface()))...)
- }
- continue
- }
- name, keep, _, _ := spannerTagParser(fieldType.Tag)
- if !keep {
- continue
- }
- if name == "" {
- name = fieldType.Name
- }
- names = append(names, name)
- }
- return names
-}
-
-// errEncoderUnsupportedType returns error for not being able to encode a value
-// of certain type.
-func errEncoderUnsupportedType(v interface{}) error {
- return spannerErrorf(codes.InvalidArgument, "client doesn't support type %T", v)
-}
-
-// encodeValue encodes a Go native type into a proto3.Value.
-func encodeValue(v interface{}) (*proto3.Value, *sppb.Type, error) {
- pb := &proto3.Value{
- Kind: &proto3.Value_NullValue{NullValue: proto3.NullValue_NULL_VALUE},
- }
- var pt *sppb.Type
- var err error
- switch v := v.(type) {
- case nil:
- case string:
- pb.Kind = stringKind(v)
- pt = stringType()
- case NullString:
- if v.Valid {
- return encodeValue(v.StringVal)
- }
- pt = stringType()
- case sql.NullString:
- if v.Valid {
- return encodeValue(v.String)
- }
- pt = stringType()
- case []string:
- if v != nil {
- pb, err = encodeArray(len(v), func(i int) interface{} { return v[i] })
- if err != nil {
- return nil, nil, err
- }
- }
- pt = listType(stringType())
- case []NullString:
- if v != nil {
- pb, err = encodeArray(len(v), func(i int) interface{} { return v[i] })
- if err != nil {
- return nil, nil, err
- }
- }
- pt = listType(stringType())
- case *string:
- if v != nil {
- return encodeValue(*v)
- }
- pt = stringType()
- case []*string:
- if v != nil {
- pb, err = encodeArray(len(v), func(i int) interface{} { return v[i] })
- if err != nil {
- return nil, nil, err
- }
- }
- pt = listType(stringType())
- case []byte:
- if v != nil {
- pb.Kind = stringKind(base64.StdEncoding.EncodeToString(v))
- }
- pt = bytesType()
- case [][]byte:
- if v != nil {
- pb, err = encodeArray(len(v), func(i int) interface{} { return v[i] })
- if err != nil {
- return nil, nil, err
- }
- }
- pt = listType(bytesType())
- case int:
- pb.Kind = stringKind(strconv.FormatInt(int64(v), 10))
- pt = intType()
- case []int:
- if v != nil {
- pb, err = encodeArray(len(v), func(i int) interface{} { return v[i] })
- if err != nil {
- return nil, nil, err
- }
- }
- pt = listType(intType())
- case int64:
- pb.Kind = stringKind(strconv.FormatInt(v, 10))
- pt = intType()
- case []int64:
- if v != nil {
- pb, err = encodeArray(len(v), func(i int) interface{} { return v[i] })
- if err != nil {
- return nil, nil, err
- }
- }
- pt = listType(intType())
- case NullInt64:
- if v.Valid {
- return encodeValue(v.Int64)
- }
- pt = intType()
- case []NullInt64:
- if v != nil {
- pb, err = encodeArray(len(v), func(i int) interface{} { return v[i] })
- if err != nil {
- return nil, nil, err
- }
- }
- pt = listType(intType())
- case *int64:
- if v != nil {
- return encodeValue(*v)
- }
- pt = intType()
- case []*int64:
- if v != nil {
- pb, err = encodeArray(len(v), func(i int) interface{} { return v[i] })
- if err != nil {
- return nil, nil, err
- }
- }
- pt = listType(intType())
- case bool:
- pb.Kind = &proto3.Value_BoolValue{BoolValue: v}
- pt = boolType()
- case []bool:
- if v != nil {
- pb, err = encodeArray(len(v), func(i int) interface{} { return v[i] })
- if err != nil {
- return nil, nil, err
- }
- }
- pt = listType(boolType())
- case NullBool:
- if v.Valid {
- return encodeValue(v.Bool)
- }
- pt = boolType()
- case []NullBool:
- if v != nil {
- pb, err = encodeArray(len(v), func(i int) interface{} { return v[i] })
- if err != nil {
- return nil, nil, err
- }
- }
- pt = listType(boolType())
- case *bool:
- if v != nil {
- return encodeValue(*v)
- }
- pt = boolType()
- case []*bool:
- if v != nil {
- pb, err = encodeArray(len(v), func(i int) interface{} { return v[i] })
- if err != nil {
- return nil, nil, err
- }
- }
- pt = listType(boolType())
- case float64:
- pb.Kind = &proto3.Value_NumberValue{NumberValue: v}
- pt = floatType()
- case []float64:
- if v != nil {
- pb, err = encodeArray(len(v), func(i int) interface{} { return v[i] })
- if err != nil {
- return nil, nil, err
- }
- }
- pt = listType(floatType())
- case NullFloat64:
- if v.Valid {
- return encodeValue(v.Float64)
- }
- pt = floatType()
- case []NullFloat64:
- if v != nil {
- pb, err = encodeArray(len(v), func(i int) interface{} { return v[i] })
- if err != nil {
- return nil, nil, err
- }
- }
- pt = listType(floatType())
- case *float64:
- if v != nil {
- return encodeValue(*v)
- }
- pt = floatType()
- case []*float64:
- if v != nil {
- pb, err = encodeArray(len(v), func(i int) interface{} { return v[i] })
- if err != nil {
- return nil, nil, err
- }
- }
- pt = listType(floatType())
- case float32:
- pb.Kind = &proto3.Value_NumberValue{NumberValue: float64(v)}
- pt = float32Type()
- case []float32:
- if v != nil {
- pb, err = encodeArray(len(v), func(i int) interface{} { return v[i] })
- if err != nil {
- return nil, nil, err
- }
- }
- pt = listType(float32Type())
- case NullFloat32:
- if v.Valid {
- return encodeValue(v.Float32)
- }
- pt = float32Type()
- case []NullFloat32:
- if v != nil {
- pb, err = encodeArray(len(v), func(i int) interface{} { return v[i] })
- if err != nil {
- return nil, nil, err
- }
- }
- pt = listType(float32Type())
- case *float32:
- if v != nil {
- return encodeValue(*v)
- }
- pt = float32Type()
- case []*float32:
- if v != nil {
- pb, err = encodeArray(len(v), func(i int) interface{} { return v[i] })
- if err != nil {
- return nil, nil, err
- }
- }
- pt = listType(float32Type())
- case big.Rat:
- switch LossOfPrecisionHandling {
- case NumericError:
- err = validateNumeric(&v)
- if err != nil {
- return nil, nil, err
- }
- case NumericRound:
- // pass
- }
- pb.Kind = stringKind(NumericString(&v))
- pt = numericType()
- case []big.Rat:
- if v != nil {
- pb, err = encodeArray(len(v), func(i int) interface{} { return v[i] })
- if err != nil {
- return nil, nil, err
- }
- }
- pt = listType(numericType())
- case NullNumeric:
- if v.Valid {
- return encodeValue(v.Numeric)
- }
- pt = numericType()
- case []NullNumeric:
- if v != nil {
- pb, err = encodeArray(len(v), func(i int) interface{} { return v[i] })
- if err != nil {
- return nil, nil, err
- }
- }
- pt = listType(numericType())
- case PGNumeric:
- if v.Valid {
- pb.Kind = stringKind(v.Numeric)
- }
- return pb, pgNumericType(), nil
- case []PGNumeric:
- if v != nil {
- pb, err = encodeArray(len(v), func(i int) interface{} { return v[i] })
- if err != nil {
- return nil, nil, err
- }
- }
- pt = listType(pgNumericType())
- case NullJSON:
- if v.Valid {
- b, err := json.Marshal(v.Value)
- if err != nil {
- return nil, nil, err
- }
- pb.Kind = stringKind(string(b))
- }
- return pb, jsonType(), nil
- case []NullJSON:
- if v != nil {
- pb, err = encodeArray(len(v), func(i int) interface{} { return v[i] })
- if err != nil {
- return nil, nil, err
- }
- }
- pt = listType(jsonType())
- case PGJsonB:
- if v.Valid {
- b, err := json.Marshal(v.Value)
- if err != nil {
- return nil, nil, err
- }
- pb.Kind = stringKind(string(b))
- }
- return pb, pgJsonbType(), nil
- case []PGJsonB:
- if v != nil {
- pb, err = encodeArray(len(v), func(i int) interface{} { return v[i] })
- if err != nil {
- return nil, nil, err
- }
- }
- pt = listType(pgJsonbType())
- case *big.Rat:
- switch LossOfPrecisionHandling {
- case NumericError:
- err = validateNumeric(v)
- if err != nil {
- return nil, nil, err
- }
- case NumericRound:
- // pass
- }
- if v != nil {
- pb.Kind = stringKind(NumericString(v))
- }
- pt = numericType()
- case []*big.Rat:
- if v != nil {
- pb, err = encodeArray(len(v), func(i int) interface{} { return v[i] })
- if err != nil {
- return nil, nil, err
- }
- }
- pt = listType(numericType())
- case time.Time:
- if v == commitTimestamp {
- pb.Kind = stringKind(commitTimestampPlaceholderString)
- } else {
- pb.Kind = stringKind(v.UTC().Format(time.RFC3339Nano))
- }
- pt = timeType()
- case []time.Time:
- if v != nil {
- pb, err = encodeArray(len(v), func(i int) interface{} { return v[i] })
- if err != nil {
- return nil, nil, err
- }
- }
- pt = listType(timeType())
- case NullTime:
- if v.Valid {
- return encodeValue(v.Time)
- }
- pt = timeType()
- case []NullTime:
- if v != nil {
- pb, err = encodeArray(len(v), func(i int) interface{} { return v[i] })
- if err != nil {
- return nil, nil, err
- }
- }
- pt = listType(timeType())
- case *time.Time:
- if v != nil {
- return encodeValue(*v)
- }
- pt = timeType()
- case []*time.Time:
- if v != nil {
- pb, err = encodeArray(len(v), func(i int) interface{} { return v[i] })
- if err != nil {
- return nil, nil, err
- }
- }
- pt = listType(timeType())
- case civil.Date:
- pb.Kind = stringKind(v.String())
- pt = dateType()
- case []civil.Date:
- if v != nil {
- pb, err = encodeArray(len(v), func(i int) interface{} { return v[i] })
- if err != nil {
- return nil, nil, err
- }
- }
- pt = listType(dateType())
- case NullDate:
- if v.Valid {
- return encodeValue(v.Date)
- }
- pt = dateType()
- case []NullDate:
- if v != nil {
- pb, err = encodeArray(len(v), func(i int) interface{} { return v[i] })
- if err != nil {
- return nil, nil, err
- }
- }
- pt = listType(dateType())
- case *civil.Date:
- if v != nil {
- return encodeValue(*v)
- }
- pt = dateType()
- case []*civil.Date:
- if v != nil {
- pb, err = encodeArray(len(v), func(i int) interface{} { return v[i] })
- if err != nil {
- return nil, nil, err
- }
- }
- pt = listType(dateType())
- case GenericColumnValue:
- // Deep clone to ensure subsequent changes to v before
- // transmission don't affect our encoded value.
- pb = proto.Clone(v.Value).(*proto3.Value)
- pt = proto.Clone(v.Type).(*sppb.Type)
- case []GenericColumnValue:
- return nil, nil, errEncoderUnsupportedType(v)
- case protoreflect.Enum:
- if v != nil {
- var protoEnumfqn string
- rv := reflect.ValueOf(v)
- if rv.Kind() != reflect.Ptr || !rv.IsNil() {
- pb.Kind = stringKind(strconv.FormatInt(int64(v.Number()), 10))
- protoEnumfqn = string(v.Descriptor().FullName())
- } else {
- defaultType := reflect.Zero(rv.Type().Elem()).Interface().(protoreflect.Enum)
- protoEnumfqn = string(defaultType.Descriptor().FullName())
- }
- pt = protoEnumType(protoEnumfqn)
- }
- case NullProtoEnum:
- if v.Valid {
- return encodeValue(v.ProtoEnumVal)
- }
- return nil, nil, errNotValidSrc(v)
- case proto.Message:
- if v != nil {
- if v.ProtoReflect().IsValid() {
- bytes, err := proto.Marshal(v)
- if err != nil {
- return nil, nil, err
- }
- pb.Kind = stringKind(base64.StdEncoding.EncodeToString(bytes))
- }
- protoMessagefqn := string(v.ProtoReflect().Descriptor().FullName())
- pt = protoMessageType(protoMessagefqn)
- }
- case NullProtoMessage:
- if v.Valid {
- return encodeValue(v.ProtoMessageVal)
- }
- return nil, nil, errNotValidSrc(v)
- default:
- // Check if the value is a custom type that implements spanner.Encoder
- // interface.
- if encodedVal, ok := v.(Encoder); ok {
- nv, err := encodedVal.EncodeSpanner()
- if err != nil {
- return nil, nil, err
- }
- return encodeValue(nv)
- }
-
- // Check if the value is a variant of a base type.
- decodableType := getDecodableSpannerType(v, false)
- if decodableType != spannerTypeUnknown && decodableType != spannerTypeInvalid {
- converted, err := convertCustomTypeValue(decodableType, v)
- if err != nil {
- return nil, nil, err
- }
- return encodeValue(converted)
- }
-
- if !isStructOrArrayOfStructValue(v) && !isAnArrayOfProtoColumn(v) {
- return nil, nil, errEncoderUnsupportedType(v)
- }
- typ := reflect.TypeOf(v)
-
- // Value is a Go struct value/ptr.
- if (typ.Kind() == reflect.Struct) ||
- (typ.Kind() == reflect.Ptr && typ.Elem().Kind() == reflect.Struct) {
- return encodeStruct(v)
- }
-
- // Value is a slice of Go struct values/ptrs.
- if typ.Kind() == reflect.Slice {
- if isAnArrayOfProtoColumn(v) {
- return encodeProtoArray(v)
- }
- return encodeStructArray(v)
- }
- }
- return pb, pt, nil
-}
-
-func convertCustomTypeValue(sourceType decodableSpannerType, v interface{}) (interface{}, error) {
- // destination will be initialized to a base type. The input value will be
- // converted to this type and copied to destination.
- var destination reflect.Value
- switch sourceType {
- case spannerTypeInvalid:
- return nil, fmt.Errorf("cannot encode a value to type spannerTypeInvalid")
- case spannerTypeNonNullString:
- destination = reflect.Indirect(reflect.New(reflect.TypeOf("")))
- case spannerTypeNullString:
- destination = reflect.Indirect(reflect.New(reflect.TypeOf(NullString{})))
- case spannerTypeByteArray:
- // Return a nil array directly if the input value is nil instead of
- // creating an empty slice and returning that.
- if reflect.ValueOf(v).IsNil() {
- return []byte(nil), nil
- }
- destination = reflect.MakeSlice(reflect.TypeOf([]byte{}), reflect.ValueOf(v).Len(), reflect.ValueOf(v).Cap())
- case spannerTypeNonNullInt64:
- destination = reflect.Indirect(reflect.New(reflect.TypeOf(int64(0))))
- case spannerTypeNullInt64:
- destination = reflect.Indirect(reflect.New(reflect.TypeOf(NullInt64{})))
- case spannerTypeNonNullBool:
- destination = reflect.Indirect(reflect.New(reflect.TypeOf(false)))
- case spannerTypeNullBool:
- destination = reflect.Indirect(reflect.New(reflect.TypeOf(NullBool{})))
- case spannerTypeNonNullFloat64:
- destination = reflect.Indirect(reflect.New(reflect.TypeOf(float64(0.0))))
- case spannerTypeNullFloat64:
- destination = reflect.Indirect(reflect.New(reflect.TypeOf(NullFloat64{})))
- case spannerTypeNonNullFloat32:
- destination = reflect.Indirect(reflect.New(reflect.TypeOf(float32(0.0))))
- case spannerTypeNullFloat32:
- destination = reflect.Indirect(reflect.New(reflect.TypeOf(NullFloat32{})))
- case spannerTypeNonNullTime:
- destination = reflect.Indirect(reflect.New(reflect.TypeOf(time.Time{})))
- case spannerTypeNullTime:
- destination = reflect.Indirect(reflect.New(reflect.TypeOf(NullTime{})))
- case spannerTypeNonNullDate:
- destination = reflect.Indirect(reflect.New(reflect.TypeOf(civil.Date{})))
- case spannerTypeNullDate:
- destination = reflect.Indirect(reflect.New(reflect.TypeOf(NullDate{})))
- case spannerTypeNonNullNumeric:
- destination = reflect.Indirect(reflect.New(reflect.TypeOf(big.Rat{})))
- case spannerTypeNullNumeric:
- destination = reflect.Indirect(reflect.New(reflect.TypeOf(NullNumeric{})))
- case spannerTypeNullJSON:
- destination = reflect.Indirect(reflect.New(reflect.TypeOf(NullJSON{})))
- case spannerTypePGJsonB:
- destination = reflect.Indirect(reflect.New(reflect.TypeOf(PGJsonB{})))
- case spannerTypePGNumeric:
- destination = reflect.Indirect(reflect.New(reflect.TypeOf(PGNumeric{})))
- case spannerTypeArrayOfNonNullString:
- if reflect.ValueOf(v).IsNil() {
- return []string(nil), nil
- }
- destination = reflect.MakeSlice(reflect.TypeOf([]string{}), reflect.ValueOf(v).Len(), reflect.ValueOf(v).Cap())
- case spannerTypeArrayOfNullString:
- if reflect.ValueOf(v).IsNil() {
- return []NullString(nil), nil
- }
- destination = reflect.MakeSlice(reflect.TypeOf([]NullString{}), reflect.ValueOf(v).Len(), reflect.ValueOf(v).Cap())
- case spannerTypeArrayOfByteArray:
- if reflect.ValueOf(v).IsNil() {
- return [][]byte(nil), nil
- }
- destination = reflect.MakeSlice(reflect.TypeOf([][]byte{}), reflect.ValueOf(v).Len(), reflect.ValueOf(v).Cap())
- case spannerTypeArrayOfNonNullInt64:
- if reflect.ValueOf(v).IsNil() {
- return []int64(nil), nil
- }
- destination = reflect.MakeSlice(reflect.TypeOf([]int64{}), reflect.ValueOf(v).Len(), reflect.ValueOf(v).Cap())
- case spannerTypeArrayOfNullInt64:
- if reflect.ValueOf(v).IsNil() {
- return []NullInt64(nil), nil
- }
- destination = reflect.MakeSlice(reflect.TypeOf([]NullInt64{}), reflect.ValueOf(v).Len(), reflect.ValueOf(v).Cap())
- case spannerTypeArrayOfNonNullBool:
- if reflect.ValueOf(v).IsNil() {
- return []bool(nil), nil
- }
- destination = reflect.MakeSlice(reflect.TypeOf([]bool{}), reflect.ValueOf(v).Len(), reflect.ValueOf(v).Cap())
- case spannerTypeArrayOfNullBool:
- if reflect.ValueOf(v).IsNil() {
- return []NullBool(nil), nil
- }
- destination = reflect.MakeSlice(reflect.TypeOf([]NullBool{}), reflect.ValueOf(v).Len(), reflect.ValueOf(v).Cap())
- case spannerTypeArrayOfNonNullFloat64:
- if reflect.ValueOf(v).IsNil() {
- return []float64(nil), nil
- }
- destination = reflect.MakeSlice(reflect.TypeOf([]float64{}), reflect.ValueOf(v).Len(), reflect.ValueOf(v).Cap())
- case spannerTypeArrayOfNullFloat64:
- if reflect.ValueOf(v).IsNil() {
- return []NullFloat64(nil), nil
- }
- destination = reflect.MakeSlice(reflect.TypeOf([]NullFloat64{}), reflect.ValueOf(v).Len(), reflect.ValueOf(v).Cap())
- case spannerTypeArrayOfNonNullFloat32:
- if reflect.ValueOf(v).IsNil() {
- return []float32(nil), nil
- }
- destination = reflect.MakeSlice(reflect.TypeOf([]float32{}), reflect.ValueOf(v).Len(), reflect.ValueOf(v).Cap())
- case spannerTypeArrayOfNullFloat32:
- if reflect.ValueOf(v).IsNil() {
- return []NullFloat32(nil), nil
- }
- destination = reflect.MakeSlice(reflect.TypeOf([]NullFloat32{}), reflect.ValueOf(v).Len(), reflect.ValueOf(v).Cap())
- case spannerTypeArrayOfNonNullTime:
- if reflect.ValueOf(v).IsNil() {
- return []time.Time(nil), nil
- }
- destination = reflect.MakeSlice(reflect.TypeOf([]time.Time{}), reflect.ValueOf(v).Len(), reflect.ValueOf(v).Cap())
- case spannerTypeArrayOfNullTime:
- if reflect.ValueOf(v).IsNil() {
- return []NullTime(nil), nil
- }
- destination = reflect.MakeSlice(reflect.TypeOf([]NullTime{}), reflect.ValueOf(v).Len(), reflect.ValueOf(v).Cap())
- case spannerTypeArrayOfNonNullDate:
- if reflect.ValueOf(v).IsNil() {
- return []civil.Date(nil), nil
- }
- destination = reflect.MakeSlice(reflect.TypeOf([]civil.Date{}), reflect.ValueOf(v).Len(), reflect.ValueOf(v).Cap())
- case spannerTypeArrayOfNullDate:
- if reflect.ValueOf(v).IsNil() {
- return []NullDate(nil), nil
- }
- destination = reflect.MakeSlice(reflect.TypeOf([]NullDate{}), reflect.ValueOf(v).Len(), reflect.ValueOf(v).Cap())
- case spannerTypeArrayOfNonNullNumeric:
- if reflect.ValueOf(v).IsNil() {
- return []big.Rat(nil), nil
- }
- destination = reflect.MakeSlice(reflect.TypeOf([]big.Rat{}), reflect.ValueOf(v).Len(), reflect.ValueOf(v).Cap())
- case spannerTypeArrayOfNullNumeric:
- if reflect.ValueOf(v).IsNil() {
- return []NullNumeric(nil), nil
- }
- destination = reflect.MakeSlice(reflect.TypeOf([]NullNumeric{}), reflect.ValueOf(v).Len(), reflect.ValueOf(v).Cap())
- case spannerTypeArrayOfNullJSON:
- if reflect.ValueOf(v).IsNil() {
- return []NullJSON(nil), nil
- }
- destination = reflect.MakeSlice(reflect.TypeOf([]NullJSON{}), reflect.ValueOf(v).Len(), reflect.ValueOf(v).Cap())
- case spannerTypeArrayOfPGJsonB:
- if reflect.ValueOf(v).IsNil() {
- return []PGJsonB(nil), nil
- }
- destination = reflect.MakeSlice(reflect.TypeOf([]PGJsonB{}), reflect.ValueOf(v).Len(), reflect.ValueOf(v).Cap())
- case spannerTypeArrayOfPGNumeric:
- if reflect.ValueOf(v).IsNil() {
- return []PGNumeric(nil), nil
- }
- destination = reflect.MakeSlice(reflect.TypeOf([]PGNumeric{}), reflect.ValueOf(v).Len(), reflect.ValueOf(v).Cap())
- default:
- // This should not be possible.
- return nil, fmt.Errorf("unknown decodable type found: %v", sourceType)
- }
- // destination has been initialized. Convert and copy the input value to
- // destination. That must be done per element if the input type is a slice
- // or an array.
- if destination.Kind() == reflect.Slice || destination.Kind() == reflect.Array {
- sourceSlice := reflect.ValueOf(v)
- for i := 0; i < destination.Len(); i++ {
- source := sourceSlice.Index(i)
- destination.Index(i).Set(source.Convert(destination.Type().Elem()))
- }
- } else {
- source := reflect.ValueOf(v)
- destination.Set(source.Convert(destination.Type()))
- }
- // Return the converted value.
- return destination.Interface(), nil
-}
-
-// Encodes a Go struct value/ptr in v to the spanner Value and Type protos. v
-// itself must be non-nil.
-func encodeStruct(v interface{}) (*proto3.Value, *sppb.Type, error) {
- typ := reflect.TypeOf(v)
- val := reflect.ValueOf(v)
-
- // Pointer to struct.
- if typ.Kind() == reflect.Ptr && typ.Elem().Kind() == reflect.Struct {
- typ = typ.Elem()
- if val.IsNil() {
- // nil pointer to struct, representing a NULL STRUCT value. Use a
- // dummy value to get the type.
- _, st, err := encodeStruct(reflect.Zero(typ).Interface())
- if err != nil {
- return nil, nil, err
- }
- return nullProto(), st, nil
- }
- val = val.Elem()
- }
-
- if typ.Kind() != reflect.Struct {
- return nil, nil, errEncoderUnsupportedType(v)
- }
-
- stf := make([]*sppb.StructType_Field, 0, typ.NumField())
- stv := make([]*proto3.Value, 0, typ.NumField())
-
- for i := 0; i < typ.NumField(); i++ {
- // If the field has a 'spanner' tag, use the value of that tag as the field name.
- // This is used to build STRUCT types with unnamed/duplicate fields.
- sf := typ.Field(i)
- fval := val.Field(i)
-
- // Embedded fields are not allowed.
- if sf.Anonymous {
- return nil, nil, errUnsupportedEmbeddedStructFields(sf.Name)
- }
-
- // Unexported fields are ignored.
- if !fval.CanInterface() {
- continue
- }
-
- fname, ok := sf.Tag.Lookup("spanner")
- if !ok {
- fname = sf.Name
- }
-
- eval, etype, err := encodeValue(fval.Interface())
- if err != nil {
- return nil, nil, err
- }
-
- stf = append(stf, mkField(fname, etype))
- stv = append(stv, eval)
- }
-
- return listProto(stv...), structType(stf...), nil
-}
-
-// Encodes a slice of Go struct values/ptrs in v to the spanner Value and Type
-// protos. v itself must be non-nil.
-func encodeStructArray(v interface{}) (*proto3.Value, *sppb.Type, error) {
- etyp := reflect.TypeOf(v).Elem()
- sliceval := reflect.ValueOf(v)
-
- // Slice of pointers to structs.
- if etyp.Kind() == reflect.Ptr {
- etyp = etyp.Elem()
- }
-
- // Use a dummy struct value to get the element type.
- _, elemTyp, err := encodeStruct(reflect.Zero(etyp).Interface())
- if err != nil {
- return nil, nil, err
- }
-
- // nil slice represents a NULL array-of-struct.
- if sliceval.IsNil() {
- return nullProto(), listType(elemTyp), nil
- }
-
- values := make([]*proto3.Value, 0, sliceval.Len())
-
- for i := 0; i < sliceval.Len(); i++ {
- ev, _, err := encodeStruct(sliceval.Index(i).Interface())
- if err != nil {
- return nil, nil, err
- }
- values = append(values, ev)
- }
- return listProto(values...), listType(elemTyp), nil
-}
-
-// Encodes a slice of proto messages or enum in v to the spanner Value and Type
-// protos.
-func encodeProtoArray(v interface{}) (*proto3.Value, *sppb.Type, error) {
- pb := nullProto()
- var pt *sppb.Type
- var err error
- sliceval := reflect.ValueOf(v)
- etyp := reflect.TypeOf(v).Elem()
-
- if etyp.Implements(protoMsgReflectType) {
- if !sliceval.IsNil() {
- pb, err = encodeProtoMessageArray(sliceval.Len(), func(i int) reflect.Value { return sliceval.Index(i) })
- if err != nil {
- return nil, nil, err
- }
- }
- defaultInstance := reflect.Zero(etyp).Interface().(proto.Message)
- protoMessagefqn := string(defaultInstance.ProtoReflect().Descriptor().FullName())
- pt = listType(protoMessageType(protoMessagefqn))
- } else if etyp.Implements(protoEnumReflectType) {
- if !sliceval.IsNil() {
- pb, err = encodeProtoEnumArray(sliceval.Len(), func(i int) reflect.Value { return sliceval.Index(i) })
- if err != nil {
- return nil, nil, err
- }
- }
- if etyp.Kind() == reflect.Ptr {
- etyp = etyp.Elem()
- }
- defaultInstance := reflect.Zero(etyp).Interface().(protoreflect.Enum)
- protoEnumfqn := string(defaultInstance.Descriptor().FullName())
- pt = listType(protoEnumType(protoEnumfqn))
- }
- return pb, pt, nil
-}
-
-func isStructOrArrayOfStructValue(v interface{}) bool {
- typ := reflect.TypeOf(v)
- if typ.Kind() == reflect.Slice {
- typ = typ.Elem()
- }
- if typ.Kind() == reflect.Ptr {
- typ = typ.Elem()
- }
- return typ.Kind() == reflect.Struct
-}
-
-func isAnArrayOfProtoColumn(v interface{}) bool {
- typ := reflect.TypeOf(v)
- if typ.Kind() == reflect.Ptr {
- typ = typ.Elem()
- }
- if typ.Kind() == reflect.Slice {
- typ = typ.Elem()
- }
- return typ.Implements(protoMsgReflectType) || typ.Implements(protoEnumReflectType)
-}
-
-func isSupportedMutationType(v interface{}) bool {
- switch v.(type) {
- case nil, string, *string, NullString, []string, []*string, []NullString,
- []byte, [][]byte,
- int, []int, int64, *int64, []int64, []*int64, NullInt64, []NullInt64,
- bool, *bool, []bool, []*bool, NullBool, []NullBool,
- float64, *float64, []float64, []*float64, NullFloat64, []NullFloat64,
- float32, *float32, []float32, []*float32, NullFloat32, []NullFloat32,
- time.Time, *time.Time, []time.Time, []*time.Time, NullTime, []NullTime,
- civil.Date, *civil.Date, []civil.Date, []*civil.Date, NullDate, []NullDate,
- big.Rat, *big.Rat, []big.Rat, []*big.Rat, NullNumeric, []NullNumeric,
- GenericColumnValue, proto.Message, protoreflect.Enum, NullProtoMessage, NullProtoEnum:
- return true
- default:
- // Check if the custom type implements spanner.Encoder interface.
- if _, ok := v.(Encoder); ok {
- return true
- }
-
- if isAnArrayOfProtoColumn(v) {
- return true
- }
-
- decodableType := getDecodableSpannerType(v, false)
- return decodableType != spannerTypeUnknown && decodableType != spannerTypeInvalid
- }
-}
-
-// encodeValueArray encodes a Value array into a proto3.ListValue.
-func encodeValueArray(vs []interface{}) (*proto3.ListValue, error) {
- lv := &proto3.ListValue{}
- lv.Values = make([]*proto3.Value, 0, len(vs))
- for _, v := range vs {
- if !isSupportedMutationType(v) {
- return nil, errEncoderUnsupportedType(v)
- }
- pb, _, err := encodeValue(v)
- if err != nil {
- return nil, err
- }
- lv.Values = append(lv.Values, pb)
- }
- return lv, nil
-}
-
-// encodeArray assumes that all values of the array element type encode without
-// error.
-func encodeArray(len int, at func(int) interface{}) (*proto3.Value, error) {
- vs := make([]*proto3.Value, len)
- var err error
- for i := 0; i < len; i++ {
- vs[i], _, err = encodeValue(at(i))
- if err != nil {
- return nil, err
- }
- }
- return listProto(vs...), nil
-}
-
-func encodeProtoMessageArray(len int, at func(int) reflect.Value) (*proto3.Value, error) {
- vs := make([]*proto3.Value, len)
- var err error
- for i := 0; i < len; i++ {
- v := at(i).Interface().(proto.Message)
- vs[i], _, err = encodeValue(v)
- if err != nil {
- return nil, err
- }
- }
- return listProto(vs...), nil
-}
-
-func encodeProtoEnumArray(len int, at func(int) reflect.Value) (*proto3.Value, error) {
- vs := make([]*proto3.Value, len)
- var err error
- for i := 0; i < len; i++ {
- v := at(i).Interface().(protoreflect.Enum)
- vs[i], _, err = encodeValue(v)
- if err != nil {
- return nil, err
- }
- }
- return listProto(vs...), nil
-}
-
-func spannerTagParser(t reflect.StructTag) (name string, keep bool, other interface{}, err error) {
- if s := t.Get("spanner"); s != "" {
- if s == "-" {
- return "", false, nil, nil
- }
- return s, true, nil, nil
- }
- return "", true, nil, nil
-}
-
-var fieldCache = fields.NewCache(spannerTagParser, nil, nil)
-
-func trimDoubleQuotes(payload []byte) ([]byte, error) {
- if len(payload) <= 1 || payload[0] != '"' || payload[len(payload)-1] != '"' {
- return nil, fmt.Errorf("payload is too short or not wrapped with double quotes: got %q", string(payload))
- }
- // Remove the double quotes at the beginning and the end.
- return payload[1 : len(payload)-1], nil
-}